language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
pytorch__pytorch
test/dynamo/cpython/3_13/typinganndata/ann_module.py
{ "start": 816, "end": 1120 }
class ____(metaclass = Meta): x: str = 'something' y: str = 'something else' def foo(x: int = 10): def bar(y: List[str]): x: str = 'yes' bar() def dec(func): @wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper u: int | float
S
python
great-expectations__great_expectations
scripts/cleanup/cleanup_redshift.py
{ "start": 299, "end": 2172 }
class ____(BaseSettings): """Environment variables for Redshift connection. These are injected in via CI, but when running locally, you may use your own credentials. """ REDSHIFT_HOST: str REDSHIFT_PORT: int REDSHIFT_USERNAME: str REDSHIFT_PASSWORD: str REDSHIFT_DATABASE: str REDSHIFT_SSLMODE: str @property def connection_string(self) -> str: return ( f"redshift+psycopg2://{self.REDSHIFT_USERNAME}:{self.REDSHIFT_PASSWORD}@" f"{self.REDSHIFT_HOST}:{self.REDSHIFT_PORT}/{self.REDSHIFT_DATABASE}?" f"sslmode={self.REDSHIFT_SSLMODE}" ) TABLE_PATTERN = "expectation_test_table_%" def cleanup_redshift(config: RedshiftConnectionConfig) -> None: engine = create_engine(url=config.connection_string) with engine.connect() as conn, conn.begin(): results = conn.execute( TextClause( """ SELECT 'DROP TABLE IF EXISTS ' || t.schemaname || '.' || t.tablename || ';' FROM pg_tables t JOIN SVV_TABLE_INFO i ON t.schemaname = i.schema AND t.tablename = i.table WHERE t.tablename LIKE :table_pattern AND i.create_time < DATEADD(hour, -1, GETDATE()) """ ), {"table_pattern": TABLE_PATTERN}, ).fetchall() if results: to_run = TextClause("\n".join([row[0] for row in results])) conn.execute(to_run) logger.info(f"Cleaned up {len(results)} Redshift tables older than 2 hours") else: logger.info("No Redshift tables older than 2 hours to clean up!") engine.dispose() if __name__ == "__main__": config = RedshiftConnectionConfig() # type: ignore[call-arg] # pydantic populates from env vars cleanup_redshift(config)
RedshiftConnectionConfig
python
dagster-io__dagster
python_modules/libraries/dagster-dbt/dagster_dbt/cloud_v2/client.py
{ "start": 958, "end": 16674 }
class ____(DagsterModel): account_id: int = Field( ..., description="The dbt Cloud Account ID. Can be found on the Account Info page of dbt Cloud.", ) token: str = Field( ..., description="The token to access the dbt Cloud API. Can be either a personal token or a service token.", ) access_url: str = Field( ..., description="The access URL for your dbt Cloud workspace.", ) request_max_retries: int = Field( ..., description=( "The maximum number of times requests to the dbt Cloud API should be retried " "before failing." ), ) request_retry_delay: float = Field( ..., description="Time (in seconds) to wait between each request retry.", ) request_timeout: int = Field( ..., description="Time (in seconds) after which the requests to dbt Cloud are declared timed out.", ) @property @cached_method def _log(self) -> logging.Logger: return get_dagster_logger() @property def api_v2_url(self) -> str: return f"{self.access_url}/api/v2/accounts/{self.account_id}" def _get_session(self) -> requests.Session: session = requests.Session() session.headers.update( { "Accept": "application/json", "Content-Type": "application/json", "Authorization": f"Token {self.token}", } ) return session def _get_artifact_session(self) -> requests.Session: session = requests.Session() session.headers.update( { "Content-Type": "application/json", "Authorization": f"Token {self.token}", } ) return session def _make_request( self, method: str, endpoint: Optional[str], base_url: str, data: Optional[Mapping[str, Any]] = None, params: Optional[Mapping[str, Any]] = None, session_attr: str = "_get_session", ) -> Mapping[str, Any]: url = f"{base_url}/{endpoint}" if endpoint else base_url num_retries = 0 while True: try: session = getattr(self, session_attr)() response = session.request( method=method, url=url, json=data, params=params, timeout=self.request_timeout, ) response.raise_for_status() return response.json() except RequestException as e: self._log.error( f"Request to dbt Cloud API failed for url {url} with method {method} : {e}" ) if num_retries == self.request_max_retries: break num_retries += 1 time.sleep(self.request_retry_delay) raise Failure(f"Max retries ({self.request_max_retries}) exceeded with url: {url}.") def create_job( self, *, project_id: int, environment_id: int, job_name: str, description: Optional[str] = None, ) -> Mapping[str, Any]: """Creates a dbt cloud job in a dbt Cloud workspace for a given project and environment. Args: project_id (str): The dbt Cloud Project ID. You can retrieve this value from the URL of the "Explore" tab in the dbt Cloud UI. environment_id (str): The dbt Cloud Environment ID. You can retrieve this value from the URL of the given environment page the dbt Cloud UI. job_name (str): The name of the job to create. description (Optional[str]): The description of the job to create. Defaults to `A job that runs dbt models, sources, and tests.` Returns: Dict[str, Any]: Parsed json data from the response to this request """ if not description: description = "A job that runs dbt models, sources, and tests." return self._make_request( method="post", endpoint="jobs", base_url=self.api_v2_url, data={ "account_id": self.account_id, "environment_id": environment_id, "project_id": project_id, "name": job_name, "description": description, "job_type": "other", }, )["data"] def list_jobs( self, project_id: int, environment_id: int, ) -> Sequence[Mapping[str, Any]]: """Retrieves a list of dbt cloud jobs from a dbt Cloud workspace for a given project and environment. Args: project_id (str): The dbt Cloud Project ID. You can retrieve this value from the URL of the "Explore" tab in the dbt Cloud UI. environment_id (str): The dbt Cloud Environment ID. You can retrieve this value from the URL of the given environment page the dbt Cloud UI. Returns: List[Dict[str, Any]]: A List of parsed json data from the response to this request """ results = [] while jobs := self._make_request( method="get", endpoint="jobs", base_url=self.api_v2_url, params={ "account_id": self.account_id, "environment_id": environment_id, "project_id": project_id, "limit": DAGSTER_DBT_CLOUD_LIST_JOBS_INDIVIDUAL_REQUEST_LIMIT, "offset": len(results), }, )["data"]: results.extend(jobs) if len(jobs) < DAGSTER_DBT_CLOUD_LIST_JOBS_INDIVIDUAL_REQUEST_LIMIT: break return results def get_job_details(self, job_id: int) -> Mapping[str, Any]: """Retrieves the details of a given dbt Cloud job. Returns: Dict[str, Any]: Parsed json data representing the API response. """ return self._make_request( method="get", endpoint=f"jobs/{job_id}", base_url=self.api_v2_url, )["data"] def destroy_job(self, job_id: int) -> Mapping[str, Any]: """Destroys a given dbt Cloud job. Returns: Dict[str, Any]: Parsed json data representing the API response. """ return self._make_request( method="delete", endpoint=f"jobs/{job_id}", base_url=self.api_v2_url, )["data"] def trigger_job_run( self, job_id: int, steps_override: Optional[Sequence[str]] = None ) -> Mapping[str, Any]: """Triggers a run for a given dbt Cloud Job. Args: job_id (str): The dbt Cloud Job ID. You can retrieve this value from the URL of the given job in the dbt Cloud UI. steps_override (Optional[Sequence[str]]): A list of dbt commands that overrides the dbt commands of the dbt Cloud job. If no list is passed, the dbt commands of the job are not overridden. Returns: List[Dict[str, Any]]: A List of parsed json data from the response to this request. """ return self._make_request( method="post", endpoint=f"jobs/{job_id}/run", base_url=self.api_v2_url, data={"steps_override": steps_override, "cause": DAGSTER_ADHOC_TRIGGER_CAUSE} if steps_override else {"cause": DAGSTER_ADHOC_TRIGGER_CAUSE}, )["data"] def get_runs_batch( self, project_id: int, environment_id: int, finished_at_lower_bound: datetime.datetime, finished_at_upper_bound: datetime.datetime, offset: int = 0, ) -> tuple[Sequence[Mapping[str, Any]], int]: """Retrieves a batch of dbt Cloud runs from a dbt Cloud workspace for a given project and environment. Args: project_id (str): The dbt Cloud Project ID. You can retrieve this value from the URL of the "Explore" tab in the dbt Cloud UI. environment_id (str): The dbt Cloud Environment ID. You can retrieve this value from the URL of the given environment page the dbt Cloud UI. finished_at_lower_bound (datetime.datetime): The first run in this batch will have finished at a time that is equal to or after this value. finished_at_upper_bound (datetime.datetime): The last run in this batch will have finished at a time that is equal to or before this value. offset (str): The pagination offset for this request. Returns: tuple[List[Dict[str, Any]], int]: A tuple containing: - a list of run details as parsed json data from the response to this request; - the total number of runs for the given parameters. """ resp = self._make_request( method="get", endpoint="runs", base_url=self.api_v2_url, params={ "account_id": self.account_id, "environment_id": environment_id, "project_id": project_id, "limit": DAGSTER_DBT_CLOUD_BATCH_RUNS_REQUEST_LIMIT, "offset": offset, "finished_at__range": f"""["{finished_at_lower_bound.isoformat()}", "{finished_at_upper_bound.isoformat()}"]""", "order_by": "finished_at", }, ) data = cast("Sequence[Mapping[str, Any]]", resp["data"]) total_count = resp["extra"]["pagination"]["total_count"] return data, total_count def get_run_details(self, run_id: int) -> Mapping[str, Any]: """Retrieves the details of a given dbt Cloud Run. Args: run_id (str): The dbt Cloud Run ID. You can retrieve this value from the URL of the given run in the dbt Cloud UI. Returns: Dict[str, Any]: Parsed json data representing the API response. """ return self._make_request( method="get", endpoint=f"runs/{run_id}", base_url=self.api_v2_url, )["data"] def poll_run( self, run_id: int, poll_interval: Optional[float] = None, poll_timeout: Optional[float] = None, ) -> Mapping[str, Any]: """Given a dbt Cloud run, poll until the run completes. Args: run_id (str): The dbt Cloud Run ID. You can retrieve this value from the URL of the given run in the dbt Cloud UI. poll_interval (float): The time (in seconds) that will be waited between successive polls. By default, the interval is set to 1 second. poll_timeout (float): The maximum time that will waited before this operation is timed out. By default, this will time out after 60 seconds. Returns: Dict[str, Any]: Parsed json data representing the API response. """ if not poll_interval: poll_interval = DAGSTER_DBT_CLOUD_POLL_INTERVAL if not poll_timeout: poll_timeout = DAGSTER_DBT_CLOUD_POLL_TIMEOUT start_time = time.time() while time.time() - start_time < poll_timeout: run_details = self.get_run_details(run_id) run = DbtCloudRun.from_run_details(run_details=run_details) if run.status in { DbtCloudJobRunStatusType.SUCCESS, DbtCloudJobRunStatusType.ERROR, DbtCloudJobRunStatusType.CANCELLED, }: return run_details # Sleep for the configured time interval before polling again. time.sleep(poll_interval) raise Exception(f"Run {run.id} did not complete within {poll_timeout} seconds.") # pyright: ignore[reportPossiblyUnboundVariable] def list_run_artifacts( self, run_id: int, ) -> Sequence[str]: """Retrieves a list of artifact names for a given dbt Cloud Run. Returns: List[str]: a list of artifact names taken from the response to this request. """ return cast( "Sequence[str]", self._make_request( method="get", endpoint=f"runs/{run_id}/artifacts", base_url=self.api_v2_url, session_attr="_get_artifact_session", )["data"], ) def get_run_artifact(self, run_id: int, path: str) -> Mapping[str, Any]: """Retrieves an artifact at the given path for a given dbt Cloud Run. Returns: Dict[str, Any]: Parsed json data representing the API response. """ return self._make_request( method="get", endpoint=f"runs/{run_id}/artifacts/{path}", base_url=self.api_v2_url, session_attr="_get_artifact_session", ) def get_run_results_json(self, run_id: int) -> Mapping[str, Any]: """Retrieves the run_results.json artifact of a given dbt Cloud Run. Returns: Dict[str, Any]: Parsed json data representing the API response. """ return self.get_run_artifact(run_id=run_id, path="run_results.json") def get_run_manifest_json(self, run_id: int) -> Mapping[str, Any]: """Retrieves the manifest.json artifact of a given dbt Cloud Run. Returns: Dict[str, Any]: Parsed json data representing the API response. """ return self.get_run_artifact(run_id=run_id, path="manifest.json") def get_project_details(self, project_id: int) -> Mapping[str, Any]: """Retrieves the details of a given dbt Cloud Project. Args: project_id (str): The dbt Cloud Project ID. You can retrieve this value from the URL of the "Explore" tab in the dbt Cloud UI. Returns: Dict[str, Any]: Parsed json data representing the API response. """ return self._make_request( method="get", endpoint=f"projects/{project_id}", base_url=self.api_v2_url, )["data"] def get_environment_details(self, environment_id: int) -> Mapping[str, Any]: """Retrieves the details of a given dbt Cloud Environment. Args: environment_id (str): The dbt Cloud Environment ID. You can retrieve this value from the URL of the given environment page the dbt Cloud UI. Returns: Dict[str, Any]: Parsed json data representing the API response. """ return self._make_request( method="get", endpoint=f"environments/{environment_id}", base_url=self.api_v2_url, )["data"] def get_account_details(self) -> Mapping[str, Any]: """Retrieves the details of the account associated to the dbt Cloud workspace. Returns: Dict[str, Any]: Parsed json data representing the API response. """ return self._make_request( method="get", endpoint=None, base_url=self.api_v2_url, )["data"] def verify_connection(self) -> None: """Verifies the connection to the dbt Cloud REST API.""" try: self.get_account_details() except Exception as e: raise Exception( f"Failed to verify connection to dbt Cloud REST API with the workspace client. Exception: {e}" )
DbtCloudWorkspaceClient
python
spack__spack
lib/spack/spack/test/llnl/util/file_list.py
{ "start": 4759, "end": 10787 }
class ____: def test_repr(self, header_list): x = eval(repr(header_list)) assert header_list == x def test_joined_and_str(self, header_list): s1 = header_list.joined() expected = " ".join( [ "/dir1/Python.h", "/dir2/date.time.h", "/dir1/pyconfig.hpp", "/dir3/core.hh", "pymem.cuh", ] ) assert s1 == expected s2 = str(header_list) assert s1 == s2 s3 = header_list.joined(";") expected = ";".join( [ "/dir1/Python.h", "/dir2/date.time.h", "/dir1/pyconfig.hpp", "/dir3/core.hh", "pymem.cuh", ] ) assert s3 == expected def test_flags(self, header_list): include_flags = header_list.include_flags assert "-I/dir1" in include_flags assert "-I/dir2" in include_flags assert "-I/dir3" in include_flags assert isinstance(include_flags, str) assert include_flags == "-I/dir1 -I/dir2 -I/dir3" macros = header_list.macro_definitions assert "-DBOOST_LIB_NAME=boost_regex" in macros assert "-DBOOST_DYN_LINK" in macros assert isinstance(macros, str) assert macros == "-DBOOST_LIB_NAME=boost_regex -DBOOST_DYN_LINK" cpp_flags = header_list.cpp_flags assert isinstance(cpp_flags, str) assert cpp_flags == include_flags + " " + macros def test_paths_manipulation(self, header_list): names = header_list.names assert names == ["Python", "date.time", "pyconfig", "core", "pymem"] directories = header_list.directories assert directories == ["/dir1", "/dir2", "/dir3"] def test_get_item(self, header_list): a = header_list[0] assert a == "/dir1/Python.h" b = header_list[:] assert type(b) is type(header_list) assert header_list == b assert header_list is not b def test_add(self, header_list): pylist = [ "/dir1/Python.h", # removed from the final list "/dir2/pyconfig.hpp", "/dir4/date.time.h", ] another = HeaderList(pylist) h = header_list + another assert len(h) == 7 # Invariant : l == l + l assert h == h + h # Always produce an instance of HeaderList assert type(header_list + pylist) is type(header_list) assert type(pylist + header_list) is type(header_list) #: Directory where the data for the test below is stored search_dir = os.path.join(spack.paths.test_path, "data", "directory_search") @pytest.mark.parametrize( "lib_list,kwargs", [ (["liba"], {"shared": True, "recursive": True}), (["liba"], {"shared": False, "recursive": True}), (["libc", "liba"], {"shared": True, "recursive": True}), (["liba", "libc"], {"shared": False, "recursive": True}), (["libc", "libb", "liba"], {"shared": True, "recursive": True}), (["liba", "libb", "libc"], {"shared": False, "recursive": True}), ], ) def test_library_type_search(lib_list, kwargs): results = find_libraries(lib_list, search_dir, **kwargs) assert len(results) != 0 for result in results: lib_type_ext = plat_shared_ext if not kwargs["shared"]: lib_type_ext = plat_static_ext assert result.endswith(lib_type_ext) or ( kwargs["shared"] and result.endswith(plat_apple_shared_ext) ) @pytest.mark.parametrize( "search_fn,search_list,root,kwargs", [ (find_libraries, "liba", search_dir, {"recursive": True}), (find_libraries, ["liba"], search_dir, {"recursive": True}), (find_libraries, "libb", search_dir, {"recursive": True}), (find_libraries, ["libc"], search_dir, {"recursive": True}), (find_libraries, ["libc", "liba"], search_dir, {"recursive": True}), (find_libraries, ["liba", "libc"], search_dir, {"recursive": True}), (find_libraries, ["libc", "libb", "liba"], search_dir, {"recursive": True}), (find_libraries, ["liba", "libc"], search_dir, {"recursive": True}), ( find_libraries, ["libc", "libb", "liba"], search_dir, {"recursive": True, "shared": False}, ), (find_headers, "a", search_dir, {"recursive": True}), (find_headers, ["a"], search_dir, {"recursive": True}), (find_headers, "b", search_dir, {"recursive": True}), (find_headers, ["c"], search_dir, {"recursive": True}), (find_headers, ["c", "a"], search_dir, {"recursive": True}), (find_headers, ["a", "c"], search_dir, {"recursive": True}), (find_headers, ["c", "b", "a"], search_dir, {"recursive": True}), (find_headers, ["a", "c"], search_dir, {"recursive": True}), (find_libraries, ["liba", "libd"], os.path.join(search_dir, "b"), {"recursive": False}), (find_headers, ["b", "d"], os.path.join(search_dir, "b"), {"recursive": False}), ], ) def test_searching_order(search_fn, search_list, root, kwargs): # Test search result = search_fn(search_list, root, **kwargs) # The tests are set-up so that something is always found assert len(result) != 0 # Now reverse the result and start discarding things # as soon as you have matches. In the end the list should # be emptied. rlist = list(reversed(result)) # At this point make sure the search list is a sequence if isinstance(search_list, str): search_list = [search_list] # Discard entries in the order they appear in search list for x in search_list: try: while fnmatch.fnmatch(rlist[-1], x) or x in rlist[-1]: rlist.pop() except IndexError: # List is empty pass # List should be empty here assert len(rlist) == 0
TestHeaderList
python
walkccc__LeetCode
solutions/2111. Minimum Operations to Make the Array K-Increasing/2111.py
{ "start": 0, "end": 370 }
class ____: def kIncreasing(self, arr: list[int], k: int) -> int: def numReplaced(arr: list[int]) -> int: tails = [] for a in arr: if not tails or tails[-1] <= a: tails.append(a) else: tails[bisect_right(tails, a)] = a return len(arr) - len(tails) return sum(numReplaced(arr[i::k]) for i in range(k))
Solution
python
django__django
tests/select_related_regress/models.py
{ "start": 240, "end": 461 }
class ____(models.Model): device = models.ForeignKey("Device", models.CASCADE) port_number = models.CharField(max_length=10) def __str__(self): return "%s/%s" % (self.device.name, self.port_number)
Port
python
kubernetes-client__python
kubernetes/client/models/v1_volume_resource_requirements.py
{ "start": 383, "end": 5285 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'limits': 'dict(str, str)', 'requests': 'dict(str, str)' } attribute_map = { 'limits': 'limits', 'requests': 'requests' } def __init__(self, limits=None, requests=None, local_vars_configuration=None): # noqa: E501 """V1VolumeResourceRequirements - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._limits = None self._requests = None self.discriminator = None if limits is not None: self.limits = limits if requests is not None: self.requests = requests @property def limits(self): """Gets the limits of this V1VolumeResourceRequirements. # noqa: E501 Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ # noqa: E501 :return: The limits of this V1VolumeResourceRequirements. # noqa: E501 :rtype: dict(str, str) """ return self._limits @limits.setter def limits(self, limits): """Sets the limits of this V1VolumeResourceRequirements. Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ # noqa: E501 :param limits: The limits of this V1VolumeResourceRequirements. # noqa: E501 :type: dict(str, str) """ self._limits = limits @property def requests(self): """Gets the requests of this V1VolumeResourceRequirements. # noqa: E501 Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ # noqa: E501 :return: The requests of this V1VolumeResourceRequirements. # noqa: E501 :rtype: dict(str, str) """ return self._requests @requests.setter def requests(self, requests): """Sets the requests of this V1VolumeResourceRequirements. Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ # noqa: E501 :param requests: The requests of this V1VolumeResourceRequirements. # noqa: E501 :type: dict(str, str) """ self._requests = requests def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1VolumeResourceRequirements): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1VolumeResourceRequirements): return True return self.to_dict() != other.to_dict()
V1VolumeResourceRequirements
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/cloud_build.py
{ "start": 26286, "end": 29962 }
class ____(GoogleCloudBaseOperator): """ Lists existing BuildTriggers. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:CloudBuildListBuildTriggersOperator` :param location: The location of the project. :param project_id: Optional, Google Cloud Project project_id where the function belongs. If set to None or missing, the default project_id from the GCP connection is used. :param page_size: Optional, number of results to return in the list. :param page_token: Optional, token to provide to skip to a particular spot in the list. :param retry: Optional, a retry object used to retry requests. If `None` is specified, requests will not be retried. :param timeout: Optional, the amount of time, in seconds, to wait for the request to complete. Note that if `retry` is specified, the timeout applies to each individual attempt. :param metadata: Optional, additional metadata that is provided to the method. :param gcp_conn_id: Optional, the connection ID used to connect to Google Cloud Platform. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields: Sequence[str] = ("location", "project_id", "gcp_conn_id") operator_extra_links = (CloudBuildTriggersListLink(),) def __init__( self, *, location: str = "global", project_id: str = PROVIDE_PROJECT_ID, page_size: int | None = None, page_token: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.location = location self.project_id = project_id self.page_size = page_size self.page_token = page_token self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain @property def extra_links_params(self) -> dict[str, Any]: return { "region": self.location, } def execute(self, context: Context): hook = CloudBuildHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain) results = hook.list_build_triggers( project_id=self.project_id, location=self.location, page_size=self.page_size, page_token=self.page_token, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) project_id = self.project_id or hook.project_id if project_id: CloudBuildTriggersListLink.persist( context=context, project_id=project_id, ) return [BuildTrigger.to_dict(result) for result in results]
CloudBuildListBuildTriggersOperator
python
doocs__leetcode
solution/3200-3299/3234.Count the Number of Substrings With Dominant Ones/Solution.py
{ "start": 0, "end": 615 }
class ____: def numberOfSubstrings(self, s: str) -> int: n = len(s) nxt = [n] * (n + 1) for i in range(n - 1, -1, -1): nxt[i] = nxt[i + 1] if s[i] == "0": nxt[i] = i ans = 0 for i in range(n): cnt0 = int(s[i] == "0") j = i while j < n and cnt0 * cnt0 <= n: cnt1 = (nxt[j + 1] - i) - cnt0 if cnt1 >= cnt0 * cnt0: ans += min(nxt[j + 1] - j, cnt1 - cnt0 * cnt0 + 1) j = nxt[j + 1] cnt0 += 1 return ans
Solution
python
ray-project__ray
python/ray/data/tests/test_state_export.py
{ "start": 2217, "end": 18415 }
class ____(LogicalOperator): """A dummy logical operator for testing _get_logical_args with various data types.""" def __init__(self, input_op=None): super().__init__("DummyOperator", []) # Test various data types that might be returned by _get_logical_args self._string_value = "test_string" self._int_value = 42 self._float_value = 3.14 self._bool_value = True self._none_value = None self._list_value = [1, 2, 3, "string", None] self._dict_value = {"key1": "value1", "key2": 123, "key3": None} self._nested_dict = { "level1": { "level2": { "level3": "deep_value", "numbers": [1, 2, 3], "mixed": {"a": 1, "b": "string", "c": None}, } } } self._tuple_value = (1, "string", None, 3.14) self._set_value = {1} self._bytes_value = b"binary_data" self._complex_dict = { "string_keys": {"a": 1, "b": 2}, "int_keys": {1: "one", 2: "two"}, # This should cause issues if not handled "mixed_keys": {"str": "value", 1: "int_key", None: "none_key"}, } self._empty_containers = { "empty_list": [], "empty_dict": {}, "empty_tuple": (), "empty_set": set(), } self._special_values = { "zero": 0, "negative": -1, "large_int": 999999999999999999, "small_float": 0.0000001, "inf": float("inf"), "neg_inf": float("-inf"), "nan": float("nan"), } self._data_class = TestDataclass() @pytest.fixture def dummy_dataset_topology(): """Create a dummy Topology.""" dummy_operator = DummyLogicalOperator() dummy_topology = Topology( operators=[ Operator( name="Input", id="Input_0", uuid="uuid_0", input_dependencies=[], sub_stages=[], execution_start_time=1.0, execution_end_time=1.0, state="FINISHED", args=sanitize_for_struct(dummy_operator._get_args()), ), Operator( name="ReadRange->Map(<lambda>)->Filter(<lambda>)", id="ReadRange->Map(<lambda>)->Filter(<lambda>)_1", uuid="uuid_1", input_dependencies=["Input_0"], sub_stages=[], execution_start_time=0.0, execution_end_time=0.0, state="RUNNING", args=sanitize_for_struct(dummy_operator._get_args()), ), ], ) return dummy_topology @pytest.fixture def dummy_dataset_topology_expected_output(): return { "operators": [ { "name": "Input", "id": "Input_0", "uuid": "uuid_0", "args": { "_num_outputs": "None", "_int_value": "42", "_special_values": { "negative": "-1", "inf": "inf", "zero": "0", "large_int": "999999999999999999", "small_float": "1e-07", "neg_inf": "-inf", "nan": "nan", }, "_none_value": "None", "_name": "DummyOperator", "_output_dependencies": [], "_float_value": "3.14", "_list_value": ["1", "2", "3", "string", "None"], "_dict_value": {"key1": "value1", "key3": "None", "key2": "123"}, "_set_value": ["1"], "_tuple_value": ["1", "string", "None", "3.14"], "_bytes_value": [ "98", "105", "110", "97", "114", "121", "95", "100", "97", "116", "97", ], "_input_dependencies": [], "_empty_containers": { "empty_set": [], "empty_tuple": [], "empty_dict": {}, "empty_list": [], }, "_bool_value": "True", "_nested_dict": { "level1": { "level2": { "mixed": {"a": "1", "b": "string", "c": "None"}, "numbers": ["1", "2", "3"], "level3": "deep_value", } } }, "_string_value": "test_string", "_complex_dict": { "string_keys": {"a": "1", "b": "2"}, "mixed_keys": { "None": "none_key", "str": "value", "1": "int_key", }, "int_keys": {"1": "one", "2": "two"}, }, "_data_class": { "list_field": ["1", "2", "3"], "dict_field": {"3": "4", "1": "2"}, "tuple_field": ["1", "2", "3"], "set_field": ["1", "2", "3"], "int_field": "1", "none_field": "None", "bool_field": "True", "string_field": "test", "float_field": "1.0", }, }, "input_dependencies": [], "sub_stages": [], "execution_start_time": 1.0, "execution_end_time": 1.0, "state": "FINISHED", }, { "name": "ReadRange->Map(<lambda>)->Filter(<lambda>)", "id": "ReadRange->Map(<lambda>)->Filter(<lambda>)_1", "uuid": "uuid_1", "input_dependencies": ["Input_0"], "args": { "_num_outputs": "None", "_int_value": "42", "_special_values": { "negative": "-1", "inf": "inf", "zero": "0", "large_int": "999999999999999999", "small_float": "1e-07", "neg_inf": "-inf", "nan": "nan", }, "_none_value": "None", "_name": "DummyOperator", "_output_dependencies": [], "_float_value": "3.14", "_list_value": ["1", "2", "3", "string", "None"], "_dict_value": {"key1": "value1", "key3": "None", "key2": "123"}, "_set_value": ["1"], "_tuple_value": ["1", "string", "None", "3.14"], "_bytes_value": [ "98", "105", "110", "97", "114", "121", "95", "100", "97", "116", "97", ], "_input_dependencies": [], "_empty_containers": { "empty_set": [], "empty_tuple": [], "empty_dict": {}, "empty_list": [], }, "_bool_value": "True", "_nested_dict": { "level1": { "level2": { "mixed": {"a": "1", "b": "string", "c": "None"}, "numbers": ["1", "2", "3"], "level3": "deep_value", } } }, "_string_value": "test_string", "_complex_dict": { "string_keys": {"a": "1", "b": "2"}, "mixed_keys": { "None": "none_key", "str": "value", "1": "int_key", }, "int_keys": {"1": "one", "2": "two"}, }, "_data_class": { "list_field": ["1", "2", "3"], "dict_field": {"3": "4", "1": "2"}, "tuple_field": ["1", "2", "3"], "set_field": ["1", "2", "3"], "int_field": "1", "none_field": "None", "bool_field": "True", "string_field": "test", "float_field": "1.0", }, }, "sub_stages": [], "execution_start_time": 0.0, "execution_end_time": 0.0, "state": "RUNNING", }, ] } def test_export_disabled(ray_start_regular, dummy_dataset_topology): """Test that no export files are created when export API is disabled.""" stats_actor = get_or_create_stats_actor() # Create or update train run ray.get( stats_actor.register_dataset.remote( dataset_tag="test_dataset", operator_tags=["ReadRange->Map(<lambda>)->Filter(<lambda>)"], topology=dummy_dataset_topology, job_id=STUB_JOB_ID, data_context=DataContext.get_current(), ) ) # Check that no export files were created assert not os.path.exists(_get_export_file_path()) def _test_dataset_metadata_export(topology, dummy_dataset_topology_expected_output): """Test that dataset metadata export events are written when export API is enabled.""" stats_actor = get_or_create_stats_actor() # Simulate a dataset registration ray.get( stats_actor.register_dataset.remote( dataset_tag=STUB_DATASET_ID, operator_tags=["ReadRange->Map(<lambda>)->Filter(<lambda>)"], topology=topology, job_id=STUB_JOB_ID, data_context=DataContext.get_current(), ) ) # Check that export files were created data = _get_exported_data() assert len(data) == 1 assert data[0]["source_type"] == "EXPORT_DATASET_METADATA" assert data[0]["event_data"]["topology"] == dummy_dataset_topology_expected_output assert data[0]["event_data"]["dataset_id"] == STUB_DATASET_ID assert data[0]["event_data"]["job_id"] == STUB_JOB_ID assert data[0]["event_data"]["start_time"] is not None def test_export_dataset_metadata_enabled_by_config( ray_start_cluster_with_export_api_config, dummy_dataset_topology, dummy_dataset_topology_expected_output, ): _test_dataset_metadata_export( dummy_dataset_topology, dummy_dataset_topology_expected_output ) def test_export_dataset_metadata( ray_start_cluster_with_export_api_write, dummy_dataset_topology, dummy_dataset_topology_expected_output, ): _test_dataset_metadata_export( dummy_dataset_topology, dummy_dataset_topology_expected_output ) @pytest.mark.parametrize( "expected_logical_op_args", [ { "fn_args": [1], "fn_constructor_kwargs": [2], "fn_kwargs": {"a": 3}, "fn_constructor_args": {"b": 4}, "compute": ray.data.ActorPoolStrategy(max_tasks_in_flight_per_actor=2), }, ], ) def test_logical_op_args( ray_start_cluster_with_export_api_write, expected_logical_op_args ): class Udf: def __init__(self, a, b): self.a = a self.b = b def __call__(self, x): return x ds = ray.data.range(1).map_batches( Udf, **expected_logical_op_args, ) dag = ds._plan._logical_plan.dag args = dag._get_args() assert len(args) > 0, "Export args should not be empty" for k, v in expected_logical_op_args.items(): k = f"_{k}" assert k in args, f"Export args should contain key '{k}'" assert ( args[k] == v ), f"Export args for key '{k}' should match expected value {v}, found {args[k]}" def test_export_multiple_datasets( ray_start_cluster_with_export_api_write, dummy_dataset_topology, dummy_dataset_topology_expected_output, ): """Test that multiple datasets can be exported when export API is enabled.""" stats_actor = get_or_create_stats_actor() # Create a second dataset structure that's different from the dummy one second_topology = Topology( operators=[ Operator( name="Input", id="Input_0", uuid="second_uuid_0", input_dependencies=[], sub_stages=[], execution_start_time=1.0, execution_end_time=1.0, state="FINISHED", ), Operator( name="ReadRange->Map(<lambda>)", id="ReadRange->Map(<lambda>)_1", uuid="second_uuid_1", input_dependencies=["Input_0"], sub_stages=[], execution_start_time=2.0, execution_end_time=0.0, state="RUNNING", ), ], ) # Dataset IDs for each dataset first_dataset_id = "first_dataset" second_dataset_id = "second_dataset" # Register the first dataset ray.get( stats_actor.register_dataset.remote( dataset_tag=first_dataset_id, operator_tags=["ReadRange->Map(<lambda>)->Filter(<lambda>)"], topology=dummy_dataset_topology, job_id=STUB_JOB_ID, data_context=DataContext.get_current(), ) ) # Register the second dataset ray.get( stats_actor.register_dataset.remote( dataset_tag=second_dataset_id, operator_tags=["ReadRange->Map(<lambda>)"], topology=second_topology, job_id=STUB_JOB_ID, data_context=DataContext.get_current(), ) ) # Check that export files were created with both datasets data = _get_exported_data() assert len(data) == 2, f"Expected 2 exported datasets, got {len(data)}" # Create a map of dataset IDs to their exported data for easier verification datasets_by_id = {entry["event_data"]["dataset_id"]: entry for entry in data} # Verify first dataset assert ( first_dataset_id in datasets_by_id ), f"First dataset {first_dataset_id} not found in exported data" first_entry = datasets_by_id[first_dataset_id] assert first_entry["source_type"] == "EXPORT_DATASET_METADATA" assert ( first_entry["event_data"]["topology"] == dummy_dataset_topology_expected_output ) assert first_entry["event_data"]["job_id"] == STUB_JOB_ID assert first_entry["event_data"]["start_time"] is not None # Verify second dataset assert ( second_dataset_id in datasets_by_id ), f"Second dataset {second_dataset_id} not found in exported data" second_entry = datasets_by_id[second_dataset_id] assert second_entry["source_type"] == "EXPORT_DATASET_METADATA" assert second_entry["event_data"]["topology"] == asdict(second_topology) assert second_entry["event_data"]["job_id"] == STUB_JOB_ID assert second_entry["event_data"]["start_time"] is not None
DummyLogicalOperator
python
openai__gym
gym/envs/toy_text/cliffwalking.py
{ "start": 298, "end": 10940 }
class ____(Env): """ This is a simple implementation of the Gridworld Cliff reinforcement learning task. Adapted from Example 6.6 (page 106) from [Reinforcement Learning: An Introduction by Sutton and Barto](http://incompleteideas.net/book/bookdraft2018jan1.pdf). With inspiration from: [https://github.com/dennybritz/reinforcement-learning/blob/master/lib/envs/cliff_walking.py] (https://github.com/dennybritz/reinforcement-learning/blob/master/lib/envs/cliff_walking.py) ### Description The board is a 4x12 matrix, with (using NumPy matrix indexing): - [3, 0] as the start at bottom-left - [3, 11] as the goal at bottom-right - [3, 1..10] as the cliff at bottom-center If the agent steps on the cliff, it returns to the start. An episode terminates when the agent reaches the goal. ### Actions There are 4 discrete deterministic actions: - 0: move up - 1: move right - 2: move down - 3: move left ### Observations There are 3x12 + 1 possible states. In fact, the agent cannot be at the cliff, nor at the goal (as this results in the end of the episode). It remains all the positions of the first 3 rows plus the bottom-left cell. The observation is simply the current position encoded as [flattened index](https://numpy.org/doc/stable/reference/generated/numpy.unravel_index.html). ### Reward Each time step incurs -1 reward, and stepping into the cliff incurs -100 reward. ### Arguments ``` gym.make('CliffWalking-v0') ``` ### Version History - v0: Initial version release """ metadata = { "render_modes": ["human", "rgb_array", "ansi"], "render_fps": 4, } def __init__(self, render_mode: Optional[str] = None): self.shape = (4, 12) self.start_state_index = np.ravel_multi_index((3, 0), self.shape) self.nS = np.prod(self.shape) self.nA = 4 # Cliff Location self._cliff = np.zeros(self.shape, dtype=bool) self._cliff[3, 1:-1] = True # Calculate transition probabilities and rewards self.P = {} for s in range(self.nS): position = np.unravel_index(s, self.shape) self.P[s] = {a: [] for a in range(self.nA)} self.P[s][UP] = self._calculate_transition_prob(position, [-1, 0]) self.P[s][RIGHT] = self._calculate_transition_prob(position, [0, 1]) self.P[s][DOWN] = self._calculate_transition_prob(position, [1, 0]) self.P[s][LEFT] = self._calculate_transition_prob(position, [0, -1]) # Calculate initial state distribution # We always start in state (3, 0) self.initial_state_distrib = np.zeros(self.nS) self.initial_state_distrib[self.start_state_index] = 1.0 self.observation_space = spaces.Discrete(self.nS) self.action_space = spaces.Discrete(self.nA) self.render_mode = render_mode # pygame utils self.cell_size = (60, 60) self.window_size = ( self.shape[1] * self.cell_size[1], self.shape[0] * self.cell_size[0], ) self.window_surface = None self.clock = None self.elf_images = None self.start_img = None self.goal_img = None self.cliff_img = None self.mountain_bg_img = None self.near_cliff_img = None self.tree_img = None def _limit_coordinates(self, coord: np.ndarray) -> np.ndarray: """Prevent the agent from falling out of the grid world.""" coord[0] = min(coord[0], self.shape[0] - 1) coord[0] = max(coord[0], 0) coord[1] = min(coord[1], self.shape[1] - 1) coord[1] = max(coord[1], 0) return coord def _calculate_transition_prob(self, current, delta): """Determine the outcome for an action. Transition Prob is always 1.0. Args: current: Current position on the grid as (row, col) delta: Change in position for transition Returns: Tuple of ``(1.0, new_state, reward, terminated)`` """ new_position = np.array(current) + np.array(delta) new_position = self._limit_coordinates(new_position).astype(int) new_state = np.ravel_multi_index(tuple(new_position), self.shape) if self._cliff[tuple(new_position)]: return [(1.0, self.start_state_index, -100, False)] terminal_state = (self.shape[0] - 1, self.shape[1] - 1) is_terminated = tuple(new_position) == terminal_state return [(1.0, new_state, -1, is_terminated)] def step(self, a): transitions = self.P[self.s][a] i = categorical_sample([t[0] for t in transitions], self.np_random) p, s, r, t = transitions[i] self.s = s self.lastaction = a if self.render_mode == "human": self.render() return (int(s), r, t, False, {"prob": p}) def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None): super().reset(seed=seed) self.s = categorical_sample(self.initial_state_distrib, self.np_random) self.lastaction = None if self.render_mode == "human": self.render() return int(self.s), {"prob": 1} def render(self): if self.render_mode is None: logger.warn( "You are calling render method without specifying any render mode. " "You can specify the render_mode at initialization, " f'e.g. gym("{self.spec.id}", render_mode="rgb_array")' ) elif self.render_mode == "ansi": return self._render_text() else: return self._render_gui(self.render_mode) def _render_gui(self, mode): try: import pygame except ImportError: raise DependencyNotInstalled( "pygame is not installed, run `pip install gym[toy_text]`" ) if self.window_surface is None: pygame.init() if mode == "human": pygame.display.init() pygame.display.set_caption("CliffWalking") self.window_surface = pygame.display.set_mode(self.window_size) else: # rgb_array self.window_surface = pygame.Surface(self.window_size) if self.clock is None: self.clock = pygame.time.Clock() if self.elf_images is None: hikers = [ path.join(path.dirname(__file__), "img/elf_up.png"), path.join(path.dirname(__file__), "img/elf_right.png"), path.join(path.dirname(__file__), "img/elf_down.png"), path.join(path.dirname(__file__), "img/elf_left.png"), ] self.elf_images = [ pygame.transform.scale(pygame.image.load(f_name), self.cell_size) for f_name in hikers ] if self.start_img is None: file_name = path.join(path.dirname(__file__), "img/stool.png") self.start_img = pygame.transform.scale( pygame.image.load(file_name), self.cell_size ) if self.goal_img is None: file_name = path.join(path.dirname(__file__), "img/cookie.png") self.goal_img = pygame.transform.scale( pygame.image.load(file_name), self.cell_size ) if self.mountain_bg_img is None: bg_imgs = [ path.join(path.dirname(__file__), "img/mountain_bg1.png"), path.join(path.dirname(__file__), "img/mountain_bg2.png"), ] self.mountain_bg_img = [ pygame.transform.scale(pygame.image.load(f_name), self.cell_size) for f_name in bg_imgs ] if self.near_cliff_img is None: near_cliff_imgs = [ path.join(path.dirname(__file__), "img/mountain_near-cliff1.png"), path.join(path.dirname(__file__), "img/mountain_near-cliff2.png"), ] self.near_cliff_img = [ pygame.transform.scale(pygame.image.load(f_name), self.cell_size) for f_name in near_cliff_imgs ] if self.cliff_img is None: file_name = path.join(path.dirname(__file__), "img/mountain_cliff.png") self.cliff_img = pygame.transform.scale( pygame.image.load(file_name), self.cell_size ) for s in range(self.nS): row, col = np.unravel_index(s, self.shape) pos = (col * self.cell_size[0], row * self.cell_size[1]) check_board_mask = row % 2 ^ col % 2 self.window_surface.blit(self.mountain_bg_img[check_board_mask], pos) if self._cliff[row, col]: self.window_surface.blit(self.cliff_img, pos) if row < self.shape[0] - 1 and self._cliff[row + 1, col]: self.window_surface.blit(self.near_cliff_img[check_board_mask], pos) if s == self.start_state_index: self.window_surface.blit(self.start_img, pos) if s == self.nS - 1: self.window_surface.blit(self.goal_img, pos) if s == self.s: elf_pos = (pos[0], pos[1] - 0.1 * self.cell_size[1]) last_action = self.lastaction if self.lastaction is not None else 2 self.window_surface.blit(self.elf_images[last_action], elf_pos) if mode == "human": pygame.event.pump() pygame.display.update() self.clock.tick(self.metadata["render_fps"]) else: # rgb_array return np.transpose( np.array(pygame.surfarray.pixels3d(self.window_surface)), axes=(1, 0, 2) ) def _render_text(self): outfile = StringIO() for s in range(self.nS): position = np.unravel_index(s, self.shape) if self.s == s: output = " x " # Print terminal state elif position == (3, 11): output = " T " elif self._cliff[position]: output = " C " else: output = " o " if position[1] == 0: output = output.lstrip() if position[1] == self.shape[1] - 1: output = output.rstrip() output += "\n" outfile.write(output) outfile.write("\n") with closing(outfile): return outfile.getvalue()
CliffWalkingEnv
python
jazzband__django-model-utils
tests/test_fields/test_field_tracker.py
{ "start": 36286, "end": 38286 }
class ____(TestCase): def setUp(self) -> None: self.instance = Tracked.objects.create(number=1) self.tracker = self.instance.tracker def assertChanged(self, *fields: str) -> None: for f in fields: self.assertTrue(self.tracker.has_changed(f)) def assertNotChanged(self, *fields: str) -> None: for f in fields: self.assertFalse(self.tracker.has_changed(f)) def test_context_manager(self) -> None: with self.tracker: with self.tracker: self.instance.name = 'new' self.assertChanged('name') self.assertChanged('name') self.assertNotChanged('name') def test_context_manager_fields(self) -> None: with self.tracker('number'): with self.tracker('number', 'name'): self.instance.name = 'new' self.instance.number += 1 self.assertChanged('name', 'number') self.assertChanged('number') self.assertNotChanged('name') self.assertNotChanged('number', 'name') def test_tracker_decorator(self) -> None: @Tracked.tracker def tracked_method(obj: Tracked) -> None: obj.name = 'new' self.assertChanged('name') tracked_method(self.instance) self.assertNotChanged('name') def test_tracker_decorator_fields(self) -> None: @Tracked.tracker(fields=['name']) def tracked_method(obj: Tracked) -> None: obj.name = 'new' obj.number += 1 self.assertChanged('name', 'number') tracked_method(self.instance) self.assertChanged('number') self.assertNotChanged('name') def test_tracker_context_with_save(self) -> None: with self.tracker: self.instance.name = 'new' self.instance.save() self.assertChanged('name') self.assertNotChanged('name')
TrackerContextDecoratorTests
python
catalyst-team__catalyst
catalyst/callbacks/optimizer.py
{ "start": 348, "end": 3655 }
class ____(IOptimizerCallback): """Optimizer callback, abstraction over optimizer step. Args: metric_key: a key to get loss from ``runner.batch_metrics`` model_key: a key to select a model from ``runner.model`` in case there are several of them and they are in a dictionary format. optimizer_key: a key to select a optimizer from ``runner.optimizer`` in case there are several of them and they are in a dictionary format. accumulation_steps: number of steps before ``optimizer.step()`` grad_clip_fn: callable gradient cliping function or it's name or grad_clip_params: key-value parameters for grad_clip_fn .. note:: Please follow the `minimal examples`_ sections for more use cases. .. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505 """ def __init__( self, metric_key: str, optimizer_key: str = None, accumulation_steps: int = 1, grad_clip_fn: Union[str, Callable] = None, grad_clip_params: Dict = None, ): """Init.""" super().__init__() self.metric_key = metric_key self.optimizer_key = optimizer_key self.optimizer = None self.criterion = None if isinstance(grad_clip_fn, str): self.grad_clip_fn = REGISTRY.get(grad_clip_fn) else: self.grad_clip_fn = grad_clip_fn if grad_clip_params is not None: self.grad_clip_fn = partial(self.grad_clip_fn, **grad_clip_params) self.accumulation_steps: int = accumulation_steps self._accumulation_counter: int = 0 if self.optimizer_key is not None: self._prefix = f"{self.optimizer_key}" self._prefix_lr = f"lr/{self._prefix}" self._prefix_momentum = f"momentum/{self._prefix}" self._prefix_gradient = f"gradient/{self._prefix}" else: self._prefix_lr = "lr" self._prefix_momentum = "momentum" self._prefix_gradient = "gradient" def _get_lr_momentum_stats(self) -> Dict: lr_list = [param_group["lr"] for param_group in self.optimizer.param_groups] momentum_list = get_optimizer_momentum_list(self.optimizer) stats = {self._prefix_lr: lr_list[0], self._prefix_momentum: momentum_list[0]} return stats def on_experiment_start(self, runner: "IRunner") -> None: """Event handler.""" self.optimizer = get_attr(runner, key="optimizer", inner_key=self.optimizer_key) assert self.optimizer is not None def on_batch_end(self, runner: "IRunner"): """Event handler.""" if runner.is_train_loader: self._accumulation_counter += 1 need_gradient_step = ( self._accumulation_counter % self.accumulation_steps == 0 ) if need_gradient_step: self.optimizer.step() self.optimizer.zero_grad() runner.batch_metrics.update(self._get_lr_momentum_stats()) def on_loader_end(self, runner: "IRunner") -> None: """Event handler.""" runner.loader_metrics.update(self._get_lr_momentum_stats()) __all__ = ["OptimizerCallback"]
OptimizerCallback
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/util/queue.py
{ "start": 1192, "end": 1287 }
class ____(Exception): "Exception raised by Queue.get(block=0)/get_nowait()." pass
Empty
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/hooks/test_base_aws.py
{ "start": 6160, "end": 15534 }
class ____: @conf_vars({("aws", "session_factory"): "unit.amazon.aws.hooks.test_base_aws.CustomSessionFactory"}) def test_resolve_session_factory_class(self): cls = resolve_session_factory() assert issubclass(cls, CustomSessionFactory) @conf_vars({("aws", "session_factory"): ""}) def test_resolve_session_factory_class_fallback_to_base_session_factory(self): cls = resolve_session_factory() assert issubclass(cls, BaseSessionFactory) def test_resolve_session_factory_class_fallback_to_base_session_factory_no_config(self): cls = resolve_session_factory() assert issubclass(cls, BaseSessionFactory) @pytest.mark.parametrize("mock_conn", ["unwrapped", "wrapped"], indirect=True) def test_conn_property(self, mock_conn): sf = BaseSessionFactory(conn=mock_conn, region_name=None, config=None) session_factory_conn = sf.conn assert isinstance(session_factory_conn, AwsConnectionWrapper) assert session_factory_conn.conn_id == MOCK_AWS_CONN_ID assert session_factory_conn.conn_type == MOCK_CONN_TYPE assert sf.conn is session_factory_conn def test_empty_conn_property(self): sf = BaseSessionFactory(conn=None, region_name=None, config=None) assert isinstance(sf.conn, AwsConnectionWrapper) @pytest.mark.parametrize( ("region_name", "conn_region_name"), [ ("eu-west-1", "cn-north-1"), ("eu-west-1", None), (None, "cn-north-1"), (None, None), ], ) def test_resolve_region_name(self, region_name, conn_region_name): conn = AwsConnectionWrapper( conn=Connection(conn_type=MOCK_CONN_TYPE, conn_id=MOCK_AWS_CONN_ID), region_name=conn_region_name, ) sf = BaseSessionFactory(conn=conn, region_name=region_name, config=None) expected = region_name or conn_region_name assert sf.region_name == expected @pytest.mark.parametrize( ("botocore_config", "conn_botocore_config"), [ (Config(s3={"us_east_1_regional_endpoint": "regional"}), None), (Config(s3={"us_east_1_regional_endpoint": "regional"}), Config(region_name="ap-southeast-1")), (None, Config(region_name="ap-southeast-1")), (None, None), ], ) def test_resolve_botocore_config(self, botocore_config, conn_botocore_config): conn = AwsConnectionWrapper( conn=Connection(conn_type=MOCK_CONN_TYPE, conn_id=MOCK_AWS_CONN_ID), botocore_config=conn_botocore_config, ) sf = BaseSessionFactory(conn=conn, config=botocore_config) expected = botocore_config or conn_botocore_config assert sf.config == expected @pytest.mark.parametrize("region_name", ["eu-central-1", None]) @mock.patch("boto3.session.Session", new_callable=mock.PropertyMock, return_value=MOCK_BOTO3_SESSION) def test_create_session_boto3_credential_strategy(self, mock_boto3_session, region_name): sf = BaseSessionFactory(conn=AwsConnectionWrapper(conn=None), region_name=region_name, config=None) session = sf.create_session() mock_boto3_session.assert_called_once_with(region_name=region_name) assert session == MOCK_BOTO3_SESSION @pytest.mark.parametrize("region_name", ["eu-central-1", None]) @pytest.mark.parametrize("profile_name", ["default", None]) @mock.patch("boto3.session.Session", new_callable=mock.PropertyMock, return_value=MOCK_BOTO3_SESSION) def test_create_session_from_credentials(self, mock_boto3_session, region_name, profile_name): mock_conn = Connection( conn_type=MOCK_CONN_TYPE, conn_id=MOCK_AWS_CONN_ID, extra={"profile_name": profile_name} ) mock_conn_config = AwsConnectionWrapper(conn=mock_conn) sf = BaseSessionFactory(conn=mock_conn_config, region_name=region_name, config=None) session = sf.create_session() expected_arguments = mock_conn_config.session_kwargs if region_name: expected_arguments["region_name"] = region_name mock_boto3_session.assert_called_once_with(**expected_arguments) assert session == MOCK_BOTO3_SESSION @pytest.mark.parametrize("region_name", ["eu-central-1", None]) @pytest.mark.parametrize("profile_name", ["default", None]) def test_async_create_session_from_credentials(self, region_name, profile_name): mock_conn = Connection( conn_type=MOCK_CONN_TYPE, conn_id=MOCK_AWS_CONN_ID, extra={"profile_name": profile_name} ) mock_conn_config = AwsConnectionWrapper(conn=mock_conn) sf = BaseSessionFactory(conn=mock_conn_config, region_name=region_name, config=None) async_session = sf.create_session(deferrable=True) if region_name: session_region = async_session.get_config_variable("region") assert session_region == region_name session_profile = async_session.get_config_variable("profile") import aiobotocore.session assert session_profile == profile_name assert isinstance(async_session, aiobotocore.session.AioSession) @pytest.mark.asyncio async def test_async_create_a_session_from_credentials_without_token(self): mock_conn = Connection( conn_type=MOCK_CONN_TYPE, conn_id=MOCK_AWS_CONN_ID, extra={ "aws_access_key_id": "test_aws_access_key_id", "aws_secret_access_key": "test_aws_secret_access_key", "region_name": "eu-central-1", }, ) mock_conn_config = AwsConnectionWrapper(conn=mock_conn) sf = BaseSessionFactory(conn=mock_conn_config, config=None) async_session = sf.create_session(deferrable=True) cred = await async_session.get_credentials() import aiobotocore.session assert cred.access_key == "test_aws_access_key_id" assert cred.secret_key == "test_aws_secret_access_key" assert cred.token is None assert isinstance(async_session, aiobotocore.session.AioSession) config_for_credentials_test = [ ( "assume-with-initial-creds", { "aws_access_key_id": "mock_aws_access_key_id", "aws_secret_access_key": "mock_aws_access_key_id", "aws_session_token": "mock_aws_session_token", }, ), ("assume-without-initial-creds", {}), ] @mock_aws @pytest.mark.parametrize( ("conn_id", "conn_extra"), config_for_credentials_test, ) @pytest.mark.parametrize("region_name", ["ap-southeast-2", "sa-east-1"]) def test_get_credentials_from_role_arn(self, conn_id, conn_extra, region_name): """Test creation session which set role_arn extra in connection.""" extra = { **conn_extra, "role_arn": "arn:aws:iam::123456:role/role_arn", "region_name": region_name, } conn = AwsConnectionWrapper.from_connection_metadata(conn_id=conn_id, extra=extra) sf = BaseSessionFactory(conn=conn) session = sf.create_session() assert session.region_name == region_name # Validate method of botocore credentials provider. # It shouldn't be 'explicit' which refers in this case to initial credentials. assert session.get_credentials().method == "sts-assume-role" assert isinstance(session, boto3.session.Session) @pytest.mark.asyncio @pytest.mark.parametrize( ("conn_id", "conn_extra"), config_for_credentials_test, ) @pytest.mark.parametrize("region_name", ["ap-southeast-2", "sa-east-1"]) async def test_async_get_credentials_from_role_arn(self, conn_id, conn_extra, region_name): """Test RefreshableCredentials with assume_role for async_conn.""" with mock.patch( "airflow.providers.amazon.aws.hooks.base_aws.BaseSessionFactory._refresh_credentials" ) as mock_refresh: def side_effect(): return { "access_key": "mock-AccessKeyId", "secret_key": "mock-SecretAccessKey", "token": "mock-SessionToken", "expiry_time": datetime.now(timezone.utc).isoformat(), } mock_refresh.side_effect = side_effect extra = { **conn_extra, "role_arn": "arn:aws:iam::123456:role/role_arn", "region_name": region_name, } conn = AwsConnectionWrapper.from_connection_metadata(conn_id=conn_id, extra=extra) sf = BaseSessionFactory(conn=conn) session = sf.create_session(deferrable=True) assert session.get_config_variable("region") == region_name # Validate method of botocore credentials provider. # It shouldn't be 'explicit' which refers in this case to initial credentials. credentials = await session.get_credentials() import aiobotocore.session assert inspect.iscoroutinefunction(credentials.get_frozen_credentials) assert credentials.method == "sts-assume-role" assert isinstance(session, aiobotocore.session.AioSession)
TestSessionFactory
python
psf__black
src/black/brackets.py
{ "start": 1216, "end": 12410 }
class ____: """Keeps track of brackets on a line.""" depth: int = 0 bracket_match: dict[tuple[Depth, NodeType], Leaf] = field(default_factory=dict) delimiters: dict[LeafID, Priority] = field(default_factory=dict) previous: Leaf | None = None _for_loop_depths: list[int] = field(default_factory=list) _lambda_argument_depths: list[int] = field(default_factory=list) invisible: list[Leaf] = field(default_factory=list) def mark(self, leaf: Leaf) -> None: """Mark `leaf` with bracket-related metadata. Keep track of delimiters. All leaves receive an int `bracket_depth` field that stores how deep within brackets a given leaf is. 0 means there are no enclosing brackets that started on this line. If a leaf is itself a closing bracket and there is a matching opening bracket earlier, it receives an `opening_bracket` field with which it forms a pair. This is a one-directional link to avoid reference cycles. Closing bracket without opening happens on lines continued from previous breaks, e.g. `) -> "ReturnType":` as part of a funcdef where we place the return type annotation on its own line of the previous closing RPAR. If a leaf is a delimiter (a token on which Black can split the line if needed) and it's on depth 0, its `id()` is stored in the tracker's `delimiters` field. """ if leaf.type == token.COMMENT: return if ( self.depth == 0 and leaf.type in CLOSING_BRACKETS and (self.depth, leaf.type) not in self.bracket_match ): return self.maybe_decrement_after_for_loop_variable(leaf) self.maybe_decrement_after_lambda_arguments(leaf) if leaf.type in CLOSING_BRACKETS: self.depth -= 1 try: opening_bracket = self.bracket_match.pop((self.depth, leaf.type)) except KeyError as e: raise BracketMatchError( "Unable to match a closing bracket to the following opening" f" bracket: {leaf}" ) from e leaf.opening_bracket = opening_bracket if not leaf.value: self.invisible.append(leaf) leaf.bracket_depth = self.depth if self.depth == 0: delim = is_split_before_delimiter(leaf, self.previous) if delim and self.previous is not None: self.delimiters[id(self.previous)] = delim else: delim = is_split_after_delimiter(leaf) if delim: self.delimiters[id(leaf)] = delim if leaf.type in OPENING_BRACKETS: self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf self.depth += 1 if not leaf.value: self.invisible.append(leaf) self.previous = leaf self.maybe_increment_lambda_arguments(leaf) self.maybe_increment_for_loop_variable(leaf) def any_open_for_or_lambda(self) -> bool: """Return True if there is an open for or lambda expression on the line. See maybe_increment_for_loop_variable and maybe_increment_lambda_arguments for details.""" return bool(self._for_loop_depths or self._lambda_argument_depths) def any_open_brackets(self) -> bool: """Return True if there is an yet unmatched open bracket on the line.""" return bool(self.bracket_match) def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority: """Return the highest priority of a delimiter found on the line. Values are consistent with what `is_split_*_delimiter()` return. Raises ValueError on no delimiters. """ return max(v for k, v in self.delimiters.items() if k not in exclude) def delimiter_count_with_priority(self, priority: Priority = 0) -> int: """Return the number of delimiters with the given `priority`. If no `priority` is passed, defaults to max priority on the line. """ if not self.delimiters: return 0 priority = priority or self.max_delimiter_priority() return sum(1 for p in self.delimiters.values() if p == priority) def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool: """In a for loop, or comprehension, the variables are often unpacks. To avoid splitting on the comma in this situation, increase the depth of tokens between `for` and `in`. """ if leaf.type == token.NAME and leaf.value == "for": self.depth += 1 self._for_loop_depths.append(self.depth) return True return False def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool: """See `maybe_increment_for_loop_variable` above for explanation.""" if ( self._for_loop_depths and self._for_loop_depths[-1] == self.depth and leaf.type == token.NAME and leaf.value == "in" ): self.depth -= 1 self._for_loop_depths.pop() return True return False def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool: """In a lambda expression, there might be more than one argument. To avoid splitting on the comma in this situation, increase the depth of tokens between `lambda` and `:`. """ if leaf.type == token.NAME and leaf.value == "lambda": self.depth += 1 self._lambda_argument_depths.append(self.depth) return True return False def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool: """See `maybe_increment_lambda_arguments` above for explanation.""" if ( self._lambda_argument_depths and self._lambda_argument_depths[-1] == self.depth and leaf.type == token.COLON ): self.depth -= 1 self._lambda_argument_depths.pop() return True return False def get_open_lsqb(self) -> Leaf | None: """Return the most recent opening square bracket (if any).""" return self.bracket_match.get((self.depth - 1, token.RSQB)) def is_split_after_delimiter(leaf: Leaf) -> Priority: """Return the priority of the `leaf` delimiter, given a line break after it. The delimiter priorities returned here are from those delimiters that would cause a line break after themselves. Higher numbers are higher priority. """ if leaf.type == token.COMMA: return COMMA_PRIORITY return 0 def is_split_before_delimiter(leaf: Leaf, previous: Leaf | None = None) -> Priority: """Return the priority of the `leaf` delimiter, given a line break before it. The delimiter priorities returned here are from those delimiters that would cause a line break before themselves. Higher numbers are higher priority. """ if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS): # * and ** might also be MATH_OPERATORS but in this case they are not. # Don't treat them as a delimiter. return 0 if ( leaf.type == token.DOT and leaf.parent and leaf.parent.type not in {syms.import_from, syms.dotted_name} and (previous is None or previous.type in CLOSING_BRACKETS) ): return DOT_PRIORITY if ( leaf.type in MATH_OPERATORS and leaf.parent and leaf.parent.type not in {syms.factor, syms.star_expr} ): return MATH_PRIORITIES[leaf.type] if leaf.type in COMPARATORS: return COMPARATOR_PRIORITY if ( leaf.type == token.STRING and previous is not None and previous.type == token.STRING ): return STRING_PRIORITY if leaf.type not in {token.NAME, token.ASYNC}: return 0 if ( leaf.value == "for" and leaf.parent and leaf.parent.type in {syms.comp_for, syms.old_comp_for} or leaf.type == token.ASYNC ): if ( not isinstance(leaf.prev_sibling, Leaf) or leaf.prev_sibling.value != "async" ): return COMPREHENSION_PRIORITY if ( leaf.value == "if" and leaf.parent and leaf.parent.type in {syms.comp_if, syms.old_comp_if} ): return COMPREHENSION_PRIORITY if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test: return TERNARY_PRIORITY if leaf.value == "is": return COMPARATOR_PRIORITY if ( leaf.value == "in" and leaf.parent and leaf.parent.type in {syms.comp_op, syms.comparison} and not ( previous is not None and previous.type == token.NAME and previous.value == "not" ) ): return COMPARATOR_PRIORITY if ( leaf.value == "not" and leaf.parent and leaf.parent.type == syms.comp_op and not ( previous is not None and previous.type == token.NAME and previous.value == "is" ) ): return COMPARATOR_PRIORITY if leaf.value in LOGIC_OPERATORS and leaf.parent: return LOGIC_PRIORITY return 0 def max_delimiter_priority_in_atom(node: LN) -> Priority: """Return maximum delimiter priority inside `node`. This is specific to atoms with contents contained in a pair of parentheses. If `node` isn't an atom or there are no enclosing parentheses, returns 0. """ if node.type != syms.atom: return 0 first = node.children[0] last = node.children[-1] if not (first.type == token.LPAR and last.type == token.RPAR): return 0 bt = BracketTracker() for c in node.children[1:-1]: if isinstance(c, Leaf): bt.mark(c) else: for leaf in c.leaves(): bt.mark(leaf) try: return bt.max_delimiter_priority() except ValueError: return 0 def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> set[LeafID]: """Return leaves that are inside matching brackets. The input `leaves` can have non-matching brackets at the head or tail parts. Matching brackets are included. """ try: # Start with the first opening bracket and ignore closing brackets before. start_index = next( i for i, l in enumerate(leaves) if l.type in OPENING_BRACKETS ) except StopIteration: return set() bracket_stack = [] ids = set() for i in range(start_index, len(leaves)): leaf = leaves[i] if leaf.type in OPENING_BRACKETS: bracket_stack.append((BRACKET[leaf.type], i)) if leaf.type in CLOSING_BRACKETS: if bracket_stack and leaf.type == bracket_stack[-1][0]: _, start = bracket_stack.pop() for j in range(start, i + 1): ids.add(id(leaves[j])) else: break return ids
BracketTracker
python
getsentry__sentry
tests/sentry/issues/test_status_change_consumer.py
{ "start": 1823, "end": 9262 }
class ____(IssueOccurrenceTestBase): @django_db_all def setUp(self) -> None: super().setUp() message = get_test_message(self.project.id) with self.feature("organizations:profile-file-io-main-thread-ingest"): result = _process_message(message) assert result is not None self.occurrence = result[0] assert self.occurrence is not None self.group = Group.objects.get(grouphash__hash=self.occurrence.fingerprint[0]) self.fingerprint = ["touch-id"] def _assert_statuses_set( self, status: int, substatus: int | None, group_history_status: int, activity_type: ActivityType, priority: int | None = None, group_inbox_reason: GroupInboxReason | None = None, ) -> None: self.group.refresh_from_db() assert self.group.status == status assert self.group.substatus == substatus assert GroupHistory.objects.filter( group_id=self.group.id, status=group_history_status ).exists() assert Activity.objects.filter(group_id=self.group.id, type=activity_type.value).exists() if priority: assert self.group.priority == priority assert Activity.objects.filter( group_id=self.group.id, type=ActivityType.SET_PRIORITY.value ).exists() if group_inbox_reason: assert GroupInbox.objects.filter( group=self.group, reason=group_inbox_reason.value ).exists() else: assert not GroupInbox.objects.filter(group=self.group).exists() @django_db_all @patch("sentry.issues.status_change_consumer.kick_off_status_syncs") def test_valid_payload_resolved(self, mock_kick_off_status_syncs: MagicMock) -> None: message = get_test_message_status_change(self.project.id, fingerprint=["touch-id"]) result = _process_message(message) assert result is not None group_info = result[1] assert group_info is not None group = group_info.group group.refresh_from_db() self._assert_statuses_set( GroupStatus.RESOLVED, None, GroupHistoryStatus.RESOLVED, ActivityType.SET_RESOLVED, group_inbox_reason=None, ) mock_kick_off_status_syncs.apply_async.assert_called_once_with( kwargs={"project_id": self.project.id, "group_id": self.group.id} ) @patch("sentry.issues.status_change_consumer.kick_off_status_syncs") @patch( "sentry.workflow_engine.models.incident_groupopenperiod.update_incident_based_on_open_period_status_change" ) # rollout code that is independently tested def test_valid_payload_resolved_open_period_activity( self, mock_update_igop: MagicMock, mock_kick_off_status_syncs: MagicMock ) -> None: self.group.type = MetricIssue.type_id self.group.save() message = get_test_message_status_change( self.project.id, fingerprint=["touch-id"], detector_id=1 ) result = _process_message(message) assert result is not None group_info = result[1] assert group_info is not None group = group_info.group group.refresh_from_db() self._assert_statuses_set( GroupStatus.RESOLVED, None, GroupHistoryStatus.RESOLVED, ActivityType.SET_RESOLVED, group_inbox_reason=None, ) mock_kick_off_status_syncs.apply_async.assert_called_once_with( kwargs={"project_id": self.project.id, "group_id": self.group.id} ) open_period = get_latest_open_period(self.group) assert open_period is not None assert open_period.date_ended is not None open_period_closed_activity = GroupOpenPeriodActivity.objects.get( group_open_period=open_period, type=OpenPeriodActivityType.CLOSED ) assert open_period_closed_activity @patch("sentry.issues.status_change_consumer.kick_off_status_syncs") def test_valid_payload_archived_forever(self, mock_kick_off_status_syncs: MagicMock) -> None: message = get_test_message_status_change( self.project.id, fingerprint=self.fingerprint, new_status=GroupStatus.IGNORED, new_substatus=GroupSubStatus.FOREVER, ) result = _process_message(message) assert result is not None group_info = result[1] assert group_info is not None group = group_info.group group.refresh_from_db() self._assert_statuses_set( GroupStatus.IGNORED, GroupSubStatus.FOREVER, GroupHistoryStatus.ARCHIVED_FOREVER, ActivityType.SET_IGNORED, group_inbox_reason=None, ) mock_kick_off_status_syncs.apply_async.assert_called_once_with( kwargs={"project_id": self.project.id, "group_id": self.group.id} ) @patch("sentry.integrations.tasks.kick_off_status_syncs.kick_off_status_syncs") def test_valid_payload_unresolved_escalating( self, mock_kick_off_status_syncs: MagicMock ) -> None: self.group.update( status=GroupStatus.IGNORED, substatus=GroupSubStatus.UNTIL_ESCALATING, priority=PriorityLevel.MEDIUM, ) message = get_test_message_status_change( self.project.id, fingerprint=self.fingerprint, new_status=GroupStatus.UNRESOLVED, new_substatus=GroupSubStatus.ESCALATING, ) result = _process_message(message) assert result is not None group_info = result[1] assert group_info is not None group = group_info.group group.refresh_from_db() self._assert_statuses_set( GroupStatus.UNRESOLVED, GroupSubStatus.ESCALATING, GroupHistoryStatus.ESCALATING, ActivityType.SET_ESCALATING, PriorityLevel.HIGH, group_inbox_reason=GroupInboxReason.ESCALATING, ) mock_kick_off_status_syncs.apply_async.assert_called_once_with( kwargs={"project_id": self.project.id, "group_id": self.group.id} ) def test_valid_payload_auto_ongoing(self) -> None: self.group.update( status=GroupStatus.UNRESOLVED, substatus=GroupSubStatus.ESCALATING, priority=PriorityLevel.HIGH, ) self.group.data.get("metadata", {}).update({"initial_priority": PriorityLevel.MEDIUM}) self.group.save() message = get_test_message_status_change( self.project.id, fingerprint=self.fingerprint, new_status=GroupStatus.UNRESOLVED, new_substatus=GroupSubStatus.ONGOING, ) result = _process_message(message) assert result is not None group_info = result[1] assert group_info is not None group = group_info.group group.refresh_from_db() self._assert_statuses_set( GroupStatus.UNRESOLVED, GroupSubStatus.ONGOING, GroupHistoryStatus.ONGOING, ActivityType.AUTO_SET_ONGOING, PriorityLevel.MEDIUM, group_inbox_reason=GroupInboxReason.ONGOING, )
StatusChangeProcessMessageTest
python
Netflix__metaflow
metaflow/_vendor/v3_7/typeguard/_transformer.py
{ "start": 15935, "end": 43918 }
class ____(NodeTransformer): def __init__( self, target_path: Sequence[str] | None = None, target_lineno: int | None = None ) -> None: self._target_path = tuple(target_path) if target_path else None self._memo = self._module_memo = TransformMemo(None, None, ()) self.names_used_in_annotations: set[str] = set() self.target_node: FunctionDef | AsyncFunctionDef | None = None self.target_lineno = target_lineno @contextmanager def _use_memo( self, node: ClassDef | FunctionDef | AsyncFunctionDef ) -> Generator[None, Any, None]: new_memo = TransformMemo(node, self._memo, self._memo.path + (node.name,)) if isinstance(node, (FunctionDef, AsyncFunctionDef)): new_memo.should_instrument = ( self._target_path is None or new_memo.path == self._target_path ) if new_memo.should_instrument: # Check if the function is a generator function detector = GeneratorDetector() detector.visit(node) # Extract yield, send and return types where possible from a subscripted # annotation like Generator[int, str, bool] return_annotation = deepcopy(node.returns) if detector.contains_yields and new_memo.name_matches( return_annotation, *generator_names ): if isinstance(return_annotation, Subscript): annotation_slice = return_annotation.slice # Python < 3.9 if isinstance(annotation_slice, Index): annotation_slice = ( annotation_slice.value # type: ignore[attr-defined] ) if isinstance(annotation_slice, Tuple): items = annotation_slice.elts else: items = [annotation_slice] if len(items) > 0: new_memo.yield_annotation = self._convert_annotation( items[0] ) if len(items) > 1: new_memo.send_annotation = self._convert_annotation( items[1] ) if len(items) > 2: new_memo.return_annotation = self._convert_annotation( items[2] ) else: new_memo.return_annotation = self._convert_annotation( return_annotation ) if isinstance(node, AsyncFunctionDef): new_memo.is_async = True old_memo = self._memo self._memo = new_memo yield self._memo = old_memo def _get_import(self, module: str, name: str) -> Name: memo = self._memo if self._target_path else self._module_memo return memo.get_import(module, name) @overload def _convert_annotation(self, annotation: None) -> None: ... @overload def _convert_annotation(self, annotation: expr) -> expr: ... def _convert_annotation(self, annotation: expr | None) -> expr | None: if annotation is None: return None # Convert PEP 604 unions (x | y) and generic built-in collections where # necessary, and undo forward references new_annotation = cast(expr, AnnotationTransformer(self).visit(annotation)) if isinstance(new_annotation, expr): new_annotation = ast.copy_location(new_annotation, annotation) # Store names used in the annotation names = {node.id for node in walk(new_annotation) if isinstance(node, Name)} self.names_used_in_annotations.update(names) return new_annotation def visit_Name(self, node: Name) -> Name: self._memo.local_names.add(node.id) return node def visit_Module(self, node: Module) -> Module: self.generic_visit(node) self._memo.insert_imports(node) fix_missing_locations(node) return node def visit_Import(self, node: Import) -> Import: for name in node.names: self._memo.local_names.add(name.asname or name.name) self._memo.imported_names[name.asname or name.name] = name.name return node def visit_ImportFrom(self, node: ImportFrom) -> ImportFrom: for name in node.names: if name.name != "*": alias = name.asname or name.name self._memo.local_names.add(alias) self._memo.imported_names[alias] = f"{node.module}.{name.name}" return node def visit_ClassDef(self, node: ClassDef) -> ClassDef | None: self._memo.local_names.add(node.name) # Eliminate top level classes not belonging to the target path if ( self._target_path is not None and not self._memo.path and node.name != self._target_path[0] ): return None with self._use_memo(node): for decorator in node.decorator_list.copy(): if self._memo.name_matches(decorator, "typeguard.typechecked"): # Remove the decorator to prevent duplicate instrumentation node.decorator_list.remove(decorator) # Store any configuration overrides if isinstance(decorator, Call) and decorator.keywords: self._memo.configuration_overrides.update( {kw.arg: kw.value for kw in decorator.keywords if kw.arg} ) self.generic_visit(node) return node def visit_FunctionDef( self, node: FunctionDef | AsyncFunctionDef ) -> FunctionDef | AsyncFunctionDef | None: """ Injects type checks for function arguments, and for a return of None if the function is annotated to return something else than Any or None, and the body ends without an explicit "return". """ self._memo.local_names.add(node.name) # Eliminate top level functions not belonging to the target path if ( self._target_path is not None and not self._memo.path and node.name != self._target_path[0] ): return None # Skip instrumentation if we're instrumenting the whole module and the function # contains either @no_type_check or @typeguard_ignore if self._target_path is None: for decorator in node.decorator_list: if self._memo.name_matches(decorator, *ignore_decorators): return node with self._use_memo(node): arg_annotations: dict[str, Any] = {} if self._target_path is None or self._memo.path == self._target_path: # Find line number we're supposed to match against if node.decorator_list: first_lineno = node.decorator_list[0].lineno else: first_lineno = node.lineno for decorator in node.decorator_list.copy(): if self._memo.name_matches(decorator, "typing.overload"): # Remove overloads entirely return None elif self._memo.name_matches(decorator, "typeguard.typechecked"): # Remove the decorator to prevent duplicate instrumentation node.decorator_list.remove(decorator) # Store any configuration overrides if isinstance(decorator, Call) and decorator.keywords: self._memo.configuration_overrides = { kw.arg: kw.value for kw in decorator.keywords if kw.arg } if self.target_lineno == first_lineno: assert self.target_node is None self.target_node = node if node.decorator_list and sys.version_info >= (3, 8): self.target_lineno = node.decorator_list[0].lineno else: self.target_lineno = node.lineno all_args = node.args.args + node.args.kwonlyargs if sys.version_info >= (3, 8): all_args.extend(node.args.posonlyargs) # Ensure that any type shadowed by the positional or keyword-only # argument names are ignored in this function for arg in all_args: self._memo.ignored_names.add(arg.arg) # Ensure that any type shadowed by the variable positional argument name # (e.g. "args" in *args) is ignored this function if node.args.vararg: self._memo.ignored_names.add(node.args.vararg.arg) # Ensure that any type shadowed by the variable keywrod argument name # (e.g. "kwargs" in *kwargs) is ignored this function if node.args.kwarg: self._memo.ignored_names.add(node.args.kwarg.arg) for arg in all_args: annotation = self._convert_annotation(deepcopy(arg.annotation)) if annotation: arg_annotations[arg.arg] = annotation if node.args.vararg: annotation_ = self._convert_annotation(node.args.vararg.annotation) if annotation_: if sys.version_info >= (3, 9): container = Name("tuple", ctx=Load()) else: container = self._get_import("typing", "Tuple") subscript_slice: Tuple | Index = Tuple( [ annotation_, Constant(Ellipsis), ], ctx=Load(), ) if sys.version_info < (3, 9): subscript_slice = Index(subscript_slice, ctx=Load()) arg_annotations[node.args.vararg.arg] = Subscript( container, subscript_slice, ctx=Load() ) if node.args.kwarg: annotation_ = self._convert_annotation(node.args.kwarg.annotation) if annotation_: if sys.version_info >= (3, 9): container = Name("dict", ctx=Load()) else: container = self._get_import("typing", "Dict") subscript_slice = Tuple( [ Name("str", ctx=Load()), annotation_, ], ctx=Load(), ) if sys.version_info < (3, 9): subscript_slice = Index(subscript_slice, ctx=Load()) arg_annotations[node.args.kwarg.arg] = Subscript( container, subscript_slice, ctx=Load() ) if arg_annotations: self._memo.variable_annotations.update(arg_annotations) self.generic_visit(node) if arg_annotations: annotations_dict = Dict( keys=[Constant(key) for key in arg_annotations.keys()], values=[ Tuple([Name(key, ctx=Load()), annotation], ctx=Load()) for key, annotation in arg_annotations.items() ], ) func_name = self._get_import( "typeguard._functions", "check_argument_types" ) args = [ self._memo.joined_path, annotations_dict, self._memo.get_memo_name(), ] node.body.insert( self._memo.code_inject_index, Expr(Call(func_name, args, [])) ) # Add a checked "return None" to the end if there's no explicit return # Skip if the return annotation is None or Any if ( self._memo.return_annotation and (not self._memo.is_async or not self._memo.has_yield_expressions) and not isinstance(node.body[-1], Return) and ( not isinstance(self._memo.return_annotation, Constant) or self._memo.return_annotation.value is not None ) ): func_name = self._get_import( "typeguard._functions", "check_return_type" ) return_node = Return( Call( func_name, [ self._memo.joined_path, Constant(None), self._memo.return_annotation, self._memo.get_memo_name(), ], [], ) ) # Replace a placeholder "pass" at the end if isinstance(node.body[-1], Pass): copy_location(return_node, node.body[-1]) del node.body[-1] node.body.append(return_node) # Insert code to create the call memo, if it was ever needed for this # function if self._memo.memo_var_name: memo_kwargs: dict[str, Any] = {} if self._memo.parent and isinstance(self._memo.parent.node, ClassDef): for decorator in node.decorator_list: if ( isinstance(decorator, Name) and decorator.id == "staticmethod" ): break elif ( isinstance(decorator, Name) and decorator.id == "classmethod" ): memo_kwargs["self_type"] = Name( id=node.args.args[0].arg, ctx=Load() ) break else: if node.args.args: if node.name == "__new__": memo_kwargs["self_type"] = Name( id=node.args.args[0].arg, ctx=Load() ) else: memo_kwargs["self_type"] = Attribute( Name(id=node.args.args[0].arg, ctx=Load()), "__class__", ctx=Load(), ) # Construct the function reference # Nested functions get special treatment: the function name is added # to free variables (and the closure of the resulting function) names: list[str] = [node.name] memo = self._memo.parent while memo: if isinstance(memo.node, (FunctionDef, AsyncFunctionDef)): # This is a nested function. Use the function name as-is. del names[:-1] break elif not isinstance(memo.node, ClassDef): break names.insert(0, memo.node.name) memo = memo.parent config_keywords = self._memo.get_config_keywords() if config_keywords: memo_kwargs["config"] = Call( self._get_import("dataclasses", "replace"), [self._get_import("typeguard._config", "global_config")], config_keywords, ) self._memo.memo_var_name.id = self._memo.get_unused_name("memo") memo_store_name = Name(id=self._memo.memo_var_name.id, ctx=Store()) globals_call = Call(Name(id="globals", ctx=Load()), [], []) locals_call = Call(Name(id="locals", ctx=Load()), [], []) memo_expr = Call( self._get_import("typeguard", "TypeCheckMemo"), [globals_call, locals_call], [keyword(key, value) for key, value in memo_kwargs.items()], ) node.body.insert( self._memo.code_inject_index, Assign([memo_store_name], memo_expr), ) self._memo.insert_imports(node) # Rmove any placeholder "pass" at the end if isinstance(node.body[-1], Pass): del node.body[-1] return node def visit_AsyncFunctionDef( self, node: AsyncFunctionDef ) -> FunctionDef | AsyncFunctionDef | None: return self.visit_FunctionDef(node) def visit_Return(self, node: Return) -> Return: """This injects type checks into "return" statements.""" self.generic_visit(node) if ( self._memo.return_annotation and self._memo.should_instrument and not self._memo.is_ignored_name(self._memo.return_annotation) ): func_name = self._get_import("typeguard._functions", "check_return_type") old_node = node retval = old_node.value or Constant(None) node = Return( Call( func_name, [ self._memo.joined_path, retval, self._memo.return_annotation, self._memo.get_memo_name(), ], [], ) ) copy_location(node, old_node) return node def visit_Yield(self, node: Yield) -> Yield | Call: """ This injects type checks into "yield" expressions, checking both the yielded value and the value sent back to the generator, when appropriate. """ self._memo.has_yield_expressions = True self.generic_visit(node) if ( self._memo.yield_annotation and self._memo.should_instrument and not self._memo.is_ignored_name(self._memo.yield_annotation) ): func_name = self._get_import("typeguard._functions", "check_yield_type") yieldval = node.value or Constant(None) node.value = Call( func_name, [ self._memo.joined_path, yieldval, self._memo.yield_annotation, self._memo.get_memo_name(), ], [], ) if ( self._memo.send_annotation and self._memo.should_instrument and not self._memo.is_ignored_name(self._memo.send_annotation) ): func_name = self._get_import("typeguard._functions", "check_send_type") old_node = node call_node = Call( func_name, [ self._memo.joined_path, old_node, self._memo.send_annotation, self._memo.get_memo_name(), ], [], ) copy_location(call_node, old_node) return call_node return node def visit_AnnAssign(self, node: AnnAssign) -> Any: """ This injects a type check into a local variable annotation-assignment within a function body. """ self.generic_visit(node) if ( isinstance(self._memo.node, (FunctionDef, AsyncFunctionDef)) and node.annotation and isinstance(node.target, Name) ): self._memo.ignored_names.add(node.target.id) annotation = self._convert_annotation(deepcopy(node.annotation)) if annotation: self._memo.variable_annotations[node.target.id] = annotation if node.value: func_name = self._get_import( "typeguard._functions", "check_variable_assignment" ) node.value = Call( func_name, [ node.value, Constant(node.target.id), annotation, self._memo.get_memo_name(), ], [], ) return node def visit_Assign(self, node: Assign) -> Any: """ This injects a type check into a local variable assignment within a function body. The variable must have been annotated earlier in the function body. """ self.generic_visit(node) # Only instrument function-local assignments if isinstance(self._memo.node, (FunctionDef, AsyncFunctionDef)): targets: list[dict[Constant, expr | None]] = [] check_required = False for target in node.targets: elts: Sequence[expr] if isinstance(target, Name): elts = [target] elif isinstance(target, Tuple): elts = target.elts else: continue annotations_: dict[Constant, expr | None] = {} for exp in elts: prefix = "" if isinstance(exp, Starred): exp = exp.value prefix = "*" if isinstance(exp, Name): self._memo.ignored_names.add(exp.id) name = prefix + exp.id annotation = self._memo.variable_annotations.get(exp.id) if annotation: annotations_[Constant(name)] = annotation check_required = True else: annotations_[Constant(name)] = None targets.append(annotations_) if check_required: # Replace missing annotations with typing.Any for item in targets: for key, expression in item.items(): if expression is None: item[key] = self._get_import("typing", "Any") if len(targets) == 1 and len(targets[0]) == 1: func_name = self._get_import( "typeguard._functions", "check_variable_assignment" ) target_varname = next(iter(targets[0])) node.value = Call( func_name, [ node.value, target_varname, targets[0][target_varname], self._memo.get_memo_name(), ], [], ) elif targets: func_name = self._get_import( "typeguard._functions", "check_multi_variable_assignment" ) targets_arg = List( [ Dict(keys=list(target), values=list(target.values())) for target in targets ], ctx=Load(), ) node.value = Call( func_name, [node.value, targets_arg, self._memo.get_memo_name()], [], ) return node def visit_NamedExpr(self, node: NamedExpr) -> Any: """This injects a type check into an assignment expression (a := foo()).""" self.generic_visit(node) # Only instrument function-local assignments if isinstance(self._memo.node, (FunctionDef, AsyncFunctionDef)) and isinstance( node.target, Name ): self._memo.ignored_names.add(node.target.id) # Bail out if no matching annotation is found annotation = self._memo.variable_annotations.get(node.target.id) if annotation is None: return node func_name = self._get_import( "typeguard._functions", "check_variable_assignment" ) node.value = Call( func_name, [ node.value, Constant(node.target.id), annotation, self._memo.get_memo_name(), ], [], ) return node def visit_AugAssign(self, node: AugAssign) -> Any: """ This injects a type check into an augmented assignment expression (a += 1). """ self.generic_visit(node) # Only instrument function-local assignments if isinstance(self._memo.node, (FunctionDef, AsyncFunctionDef)) and isinstance( node.target, Name ): # Bail out if no matching annotation is found annotation = self._memo.variable_annotations.get(node.target.id) if annotation is None: return node # Bail out if the operator is not found (newer Python version?) try: operator_func_name = aug_assign_functions[node.op.__class__] except KeyError: return node operator_func = self._get_import("operator", operator_func_name) operator_call = Call( operator_func, [Name(node.target.id, ctx=Load()), node.value], [] ) check_call = Call( self._get_import("typeguard._functions", "check_variable_assignment"), [ operator_call, Constant(node.target.id), annotation, self._memo.get_memo_name(), ], [], ) return Assign(targets=[node.target], value=check_call) return node def visit_If(self, node: If) -> Any: """ This blocks names from being collected from a module-level "if typing.TYPE_CHECKING:" block, so that they won't be type checked. """ self.generic_visit(node) # Fix empty node body (caused by removal of classes/functions not on the target # path) if not node.body: node.body.append(Pass()) if ( self._memo is self._module_memo and isinstance(node.test, Name) and self._memo.name_matches(node.test, "typing.TYPE_CHECKING") ): collector = NameCollector() collector.visit(node) self._memo.ignored_names.update(collector.names) return node
TypeguardTransformer
python
openai__openai-python
src/openai/types/responses/response_computer_tool_call_output_screenshot.py
{ "start": 245, "end": 662 }
class ____(BaseModel): type: Literal["computer_screenshot"] """Specifies the event type. For a computer screenshot, this property is always set to `computer_screenshot`. """ file_id: Optional[str] = None """The identifier of an uploaded file that contains the screenshot.""" image_url: Optional[str] = None """The URL of the screenshot image."""
ResponseComputerToolCallOutputScreenshot
python
allegroai__clearml
clearml/backend_api/services/v2_13/queues.py
{ "start": 70118, "end": 71419 }
class ____(Response): """ Response of queues.move_task_to_back endpoint. :param position: The new position of the task entry in the queue (index, -1 represents bottom of queue) :type position: int """ _service = "queues" _action = "move_task_to_back" _version = "2.13" _schema = { "definitions": {}, "properties": { "position": { "description": "The new position of the task entry in the queue (index, -1 represents bottom of queue)", "type": ["integer", "null"], } }, "type": "object", } def __init__(self, position: Optional[int] = None, **kwargs: Any) -> None: super(MoveTaskToBackResponse, self).__init__(**kwargs) self.position = position @schema_property("position") def position(self) -> Optional[int]: return self._property_position @position.setter def position(self, value: Optional[int]) -> None: if value is None: self._property_position = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "position", six.integer_types) self._property_position = value
MoveTaskToBackResponse
python
pydantic__pydantic
pydantic/warnings.py
{ "start": 2269, "end": 2587 }
class ____(PydanticDeprecationWarning): """A specific `PydanticDeprecationWarning` subclass defining functionality deprecated since Pydantic 2.6.""" def __init__(self, message: str, *args: object) -> None: super().__init__(message, *args, since=(2, 6), expected_removal=(3, 0))
PydanticDeprecatedSince26
python
scrapy__scrapy
tests/test_robotstxt_interface.py
{ "start": 6374, "end": 6623 }
class ____(BaseRobotParserTest): def setup_method(self): super()._setUp(ProtegoRobotParser) def test_order_based_precedence(self): pytest.skip("Protego does not support order based directives precedence.")
TestProtegoRobotParser
python
chroma-core__chroma
chromadb/telemetry/product/events.py
{ "start": 343, "end": 606 }
class ____(ProductTelemetryEvent): is_cli: bool def __init__(self) -> None: super().__init__() self.is_cli = os.environ.get("CHROMA_CLI", "False") == "True" # TODO: Re-enable embedding function tracking in create_collection
ServerStartEvent
python
matplotlib__matplotlib
galleries/examples/units/evans_test.py
{ "start": 651, "end": 2221 }
class ____(units.ConversionInterface): @staticmethod def axisinfo(unit, axis): """Return the Foo AxisInfo.""" if unit == 1.0 or unit == 2.0: return units.AxisInfo( majloc=ticker.IndexLocator(8, 0), majfmt=ticker.FormatStrFormatter("VAL: %s"), label='foo', ) else: return None @staticmethod def convert(obj, unit, axis): """ Convert *obj* using *unit*. If *obj* is a sequence, return the converted sequence. """ if np.iterable(obj): return [o.value(unit) for o in obj] else: return obj.value(unit) @staticmethod def default_units(x, axis): """Return the default unit for *x* or None.""" if np.iterable(x): for thisx in x: return thisx.unit else: return x.unit units.registry[Foo] = FooConverter() # create some Foos x = [Foo(val, 1.0) for val in range(0, 50, 2)] # and some arbitrary y data y = [i for i in range(len(x))] fig, (ax1, ax2) = plt.subplots(1, 2) fig.suptitle("Custom units") fig.subplots_adjust(bottom=0.2) # plot specifying units ax2.plot(x, y, 'o', xunits=2.0) ax2.set_title("xunits = 2.0") ax2.tick_params(axis='x', rotation=30, rotation_mode='xtick') # plot without specifying units; will use the None branch for axisinfo ax1.plot(x, y) # uses default units ax1.set_title('default units') ax1.tick_params(axis='x', rotation=30, rotation_mode='xtick') plt.show()
FooConverter
python
pypa__pipenv
pipenv/vendor/click/testing.py
{ "start": 1489, "end": 2379 }
class ____(io.TextIOWrapper): def __init__( self, buffer: t.BinaryIO, name: str, mode: str, **kwargs: t.Any ) -> None: super().__init__(buffer, **kwargs) self._name = name self._mode = mode @property def name(self) -> str: return self._name @property def mode(self) -> str: return self._mode def make_input_stream( input: t.Optional[t.Union[str, bytes, t.IO[t.Any]]], charset: str ) -> t.BinaryIO: # Is already an input stream. if hasattr(input, "read"): rv = _find_binary_reader(t.cast(t.IO[t.Any], input)) if rv is not None: return rv raise TypeError("Could not find binary reader for input stream.") if input is None: input = b"" elif isinstance(input, str): input = input.encode(charset) return io.BytesIO(input)
_NamedTextIOWrapper
python
facelessuser__pymdown-extensions
tests/test_extensions/test_blocks/test_general_blocks.py
{ "start": 160, "end": 5047 }
class ____(unittest.TestCase): """Validate various type functions.""" def test_type_any(self): """Test `type_any`.""" self.assertEqual(3, block.type_any(3)) self.assertEqual({}, block.type_any({})) self.assertEqual('string', block.type_any('string')) def test_type_number(self): """Test `type_number`.""" self.assertEqual(3, block.type_number(3)) self.assertEqual(3.0, block.type_number(3.0)) with self.assertRaises(ValueError): block.type_number('string') def test_type_integer(self): """Test `type_integer`.""" self.assertEqual(3, block.type_integer(3)) self.assertEqual(3, block.type_integer(3.0)) with self.assertRaises(ValueError): block.type_integer(3.3) def test_type_ranged_number(self): """Test `type_ranged_number`.""" self.assertEqual(4.7, block.type_ranged_number(3, 8)(4.7)) with self.assertRaises(ValueError): block.type_ranged_number(3, 8)(2.7) with self.assertRaises(ValueError): block.type_ranged_number(3, 8)(9.2) self.assertEqual(-4.7, block.type_ranged_number(None, 8)(-4.7)) with self.assertRaises(ValueError): block.type_ranged_number(None, 8)(9.2) self.assertEqual(1004.7, block.type_ranged_number(3, None)(1004.7)) with self.assertRaises(ValueError): block.type_ranged_number(3, None)(2.3) with self.assertRaises(ValueError): block.type_ranged_number(3, 8)('string') def test_type_ranged_integer(self): """Test `type_ranged_integer`.""" self.assertEqual(4, block.type_ranged_integer(3, 8)(4)) self.assertEqual(4, block.type_ranged_integer(3, 8)(4.0)) with self.assertRaises(ValueError): block.type_ranged_integer(3, 8)(4.3) with self.assertRaises(ValueError): block.type_ranged_integer(3, 8)(2) with self.assertRaises(ValueError): block.type_ranged_integer(3, 8)(9) def test_type_html_identifier(self): """Test `type_html_tag`.""" self.assertEqual('div', block.type_html_identifier('div')) with self.assertRaises(ValueError): block.type_html_identifier('3bad') def test_type_boolean(self): """Test `type_boolean`.""" self.assertEqual(True, block.type_boolean(True)) self.assertEqual(False, block.type_boolean(False)) with self.assertRaises(ValueError): block.type_boolean(None) def test_type_ternary(self): """Test `type_ternary`.""" self.assertEqual(True, block.type_ternary(True)) self.assertEqual(False, block.type_ternary(False)) self.assertEqual(None, block.type_ternary(None)) with self.assertRaises(ValueError): block.type_ternary(3) def test_type_string(self): """Test `type_string`.""" self.assertEqual('string', block.type_string('string')) with self.assertRaises(ValueError): block.type_string(3) def test_type_string_insensitive(self): """Test `type_string_insensitive`.""" self.assertEqual('string', block.type_string_insensitive('STRING')) with self.assertRaises(ValueError): block.type_string_insensitive(3) def test_type_string_in(self): """Test `type_string_in`.""" self.assertEqual('this', block.type_string_in(['this', 'that'])('this')) self.assertEqual('this', block.type_string_in(['this', 'that'])('This')) self.assertEqual('this', block.type_string_in(['this', 'that'], insensitive=False)('this')) with self.assertRaises(ValueError): block.type_string_in(['this', 'that'], insensitive=False)('This') with self.assertRaises(ValueError): block.type_string_in(['this', 'that'])('bad') def test_type_string_delimiter(self): """Test `type_string_delimiter`.""" self.assertEqual(['this', 'that'], block.type_string_delimiter(';')('this; that')) self.assertEqual(['this', 'that'], block.type_string_delimiter(' ')('this that')) def test_type_html_classes(self): """Test `type_html_classes`.""" self.assertEqual(['this', 'that'], block.type_html_classes('this that')) def test_type_multi(self): """Test `type_multi`.""" t = block.type_multi(block.type_ternary, block.type_string_in(['this', 'that'])) self.assertEqual(True, t(True)) self.assertEqual(False, t(False)) self.assertEqual(None, t(None)) self.assertEqual('this', t('this')) with self.assertRaises(ValueError): t(3) with self.assertRaises(ValueError): t('other') with self.assertRaises(ValueError): block.type_multi()(True)
TestTypeFunctions
python
ansible__ansible
lib/ansible/cli/arguments/option_helpers.py
{ "start": 5635, "end": 5870 }
class ____(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): ansible_version = to_native(version(getattr(parser, 'prog'))) print(ansible_version) parser.exit()
AnsibleVersion
python
spack__spack
var/spack/test_repos/spack_repo/builtin_mock/build_systems/autotools.py
{ "start": 1444, "end": 3952 }
class ____(BuilderWithDefaults): #: Phases of a GNU Autotools package phases = ("autoreconf", "configure", "build", "install") #: Names associated with package methods in the old build-system format package_methods = ("configure_args", "check", "installcheck") #: Names associated with package attributes in the old build-system format package_attributes = ( "archive_files", "build_time_test_callbacks", "install_time_test_callbacks", "configure_directory", "configure_abs_path", "build_directory", ) #: Callback names for build-time test build_time_test_callbacks = ["check"] #: Callback names for install-time test install_time_test_callbacks = ["installcheck"] @property def archive_files(self) -> List[str]: return [os.path.join(self.build_directory, "config.log")] @property def configure_directory(self) -> str: """Return the directory where 'configure' resides.""" return self.pkg.stage.source_path @property def configure_abs_path(self) -> str: # Absolute path to configure configure_abs_path = os.path.join(os.path.abspath(self.configure_directory), "configure") return configure_abs_path @property def build_directory(self) -> str: """Override to provide another place to build the package""" # Handle the case where the configure directory is set to a non-absolute path # Non-absolute paths are always relative to the staging source path build_dir = self.configure_directory if not os.path.isabs(build_dir): build_dir = os.path.join(self.pkg.stage.source_path, build_dir) return build_dir def configure_args(self) -> List[str]: """Return the list of all the arguments that must be passed to configure, except ``--prefix`` which will be pre-pended to the list. """ return [] def autoreconf(self, pkg: AutotoolsPackage, spec: Spec, prefix: Prefix) -> None: pass def configure(self, pkg: AutotoolsPackage, spec: Spec, prefix: Prefix) -> None: pass def build(self, pkg: AutotoolsPackage, spec: Spec, prefix: Prefix) -> None: pass def install(self, pkg: AutotoolsPackage, spec: Spec, prefix: Prefix) -> None: pass def check(self) -> None: pass run_after("build")(execute_build_time_tests) run_after("install")(execute_install_time_tests)
AutotoolsBuilder
python
huggingface__transformers
src/transformers/models/qwen3_moe/modeling_qwen3_moe.py
{ "start": 20699, "end": 27492 }
class ____(Qwen3MoePreTrainedModel): def __init__(self, config: Qwen3MoeConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [Qwen3MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = Qwen3MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = Qwen3MoeRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @check_model_inputs() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> MoeModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask causal_mask = mask_function( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE last_hidden_state=hidden_states, past_key_values=past_key_values, ) def load_balancing_loss_func( gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None], num_experts: Optional[int] = None, top_k=2, attention_mask: Optional[torch.Tensor] = None, ) -> Union[torch.Tensor, int]: r""" Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch. See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between experts is too unbalanced. Args: gate_logits: Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of shape [batch_size X sequence_length, num_experts]. num_experts: Number of experts top_k: The number of experts to route per-token, can be also interpreted as the `top-k` routing parameter. attention_mask (`torch.Tensor`, *optional*): The attention_mask used in forward function shape [batch_size X sequence_length] if not None. Returns: The auxiliary loss. """ if gate_logits is None or not isinstance(gate_logits, tuple): return 0 if isinstance(gate_logits, tuple): compute_device = gate_logits[0].device concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0) routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1) _, selected_experts = torch.topk(routing_weights, top_k, dim=-1) expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts) if attention_mask is None: # Compute the percentage of tokens routed to each experts tokens_per_expert = torch.mean(expert_mask.float(), dim=0) # Compute the average probability of routing to these experts router_prob_per_expert = torch.mean(routing_weights, dim=0) else: batch_size, sequence_length = attention_mask.shape num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length) # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask expert_attention_mask = ( attention_mask[None, :, :, None, None] .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts)) .reshape(-1, top_k, num_experts) .to(compute_device) ) # Compute the percentage of tokens routed to each experts tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum( expert_attention_mask, dim=0 ) # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert router_per_expert_attention_mask = ( attention_mask[None, :, :, None] .expand((num_hidden_layers, batch_size, sequence_length, num_experts)) .reshape(-1, num_experts) .to(compute_device) ) # Compute the average probability of routing to these experts router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum( router_per_expert_attention_mask, dim=0 ) overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0)) return overall_loss * num_experts @auto_docstring
Qwen3MoeModel
python
doocs__leetcode
lcof/面试题35. 复杂链表的复制/Solution2.py
{ "start": 203, "end": 794 }
class ____: def copyRandomList(self, head: "Node") -> "Node": if head is None: return None cur = head while cur: node = Node(cur.val, cur.next) cur.next = node cur = node.next cur = head while cur: if cur.random: cur.next.random = cur.random.next cur = cur.next.next ans = head.next cur = head while cur: nxt = cur.next if nxt: cur.next = nxt.next cur = nxt return ans
Solution
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/selectable.py
{ "start": 158795, "end": 174991 }
class ____(util.MemoizedSlots, CompileState): __slots__ = ( "from_clauses", "froms", "columns_plus_names", "_label_resolve_dict", ) if TYPE_CHECKING: default_select_compile_options: CacheableOptions else: class default_select_compile_options(CacheableOptions): _cache_key_traversal = [] if TYPE_CHECKING: @classmethod def get_plugin_class( cls, statement: Executable ) -> Type[SelectState]: ... def __init__( self, statement: Select[Unpack[TupleAny]], compiler: SQLCompiler, **kw: Any, ): self.statement = statement self.from_clauses = statement._from_obj for memoized_entities in statement._memoized_select_entities: self._setup_joins( memoized_entities._setup_joins, memoized_entities._raw_columns ) if statement._setup_joins: self._setup_joins(statement._setup_joins, statement._raw_columns) self.froms = self._get_froms(statement) self.columns_plus_names = statement._generate_columns_plus_names(True) @classmethod def _plugin_not_implemented(cls) -> NoReturn: raise NotImplementedError( "The default SELECT construct without plugins does not " "implement this method." ) @classmethod def get_column_descriptions( cls, statement: Select[Unpack[TupleAny]] ) -> List[Dict[str, Any]]: return [ { "name": name, "type": element.type, "expr": element, } for _, name, _, element, _ in ( statement._generate_columns_plus_names(False) ) ] @classmethod def from_statement( cls, statement: Select[Unpack[TupleAny]], from_statement: roles.ReturnsRowsRole, ) -> ExecutableReturnsRows: cls._plugin_not_implemented() @classmethod def get_columns_clause_froms( cls, statement: Select[Unpack[TupleAny]] ) -> List[FromClause]: return cls._normalize_froms( itertools.chain.from_iterable( element._from_objects for element in statement._raw_columns ) ) @classmethod def _column_naming_convention( cls, label_style: SelectLabelStyle ) -> _LabelConventionCallable: table_qualified = label_style is LABEL_STYLE_TABLENAME_PLUS_COL dedupe = label_style is not LABEL_STYLE_NONE pa = prefix_anon_map() names = set() def go( c: Union[ColumnElement[Any], TextClause], col_name: Optional[str] = None, ) -> Optional[str]: if is_text_clause(c): return None elif TYPE_CHECKING: assert is_column_element(c) if not dedupe: name = c._proxy_key if name is None: name = "_no_label" return name name = c._tq_key_label if table_qualified else c._proxy_key if name is None: name = "_no_label" if name in names: return c._anon_label(name) % pa else: names.add(name) return name elif name in names: return ( c._anon_tq_key_label % pa if table_qualified else c._anon_key_label % pa ) else: names.add(name) return name return go def _get_froms( self, statement: Select[Unpack[TupleAny]] ) -> List[FromClause]: ambiguous_table_name_map: _AmbiguousTableNameMap self._ambiguous_table_name_map = ambiguous_table_name_map = {} return self._normalize_froms( itertools.chain( self.from_clauses, itertools.chain.from_iterable( [ element._from_objects for element in statement._raw_columns ] ), itertools.chain.from_iterable( [ element._from_objects for element in statement._where_criteria ] ), ), check_statement=statement, ambiguous_table_name_map=ambiguous_table_name_map, ) @classmethod def _normalize_froms( cls, iterable_of_froms: Iterable[FromClause], check_statement: Optional[Select[Unpack[TupleAny]]] = None, ambiguous_table_name_map: Optional[_AmbiguousTableNameMap] = None, ) -> List[FromClause]: """given an iterable of things to select FROM, reduce them to what would actually render in the FROM clause of a SELECT. This does the job of checking for JOINs, tables, etc. that are in fact overlapping due to cloning, adaption, present in overlapping joins, etc. """ seen: Set[FromClause] = set() froms: List[FromClause] = [] for item in iterable_of_froms: if is_subquery(item) and item.element is check_statement: raise exc.InvalidRequestError( "select() construct refers to itself as a FROM" ) if not seen.intersection(item._cloned_set): froms.append(item) seen.update(item._cloned_set) if froms: toremove = set( itertools.chain.from_iterable( [_expand_cloned(f._hide_froms) for f in froms] ) ) if toremove: # filter out to FROM clauses not in the list, # using a list to maintain ordering froms = [f for f in froms if f not in toremove] if ambiguous_table_name_map is not None: ambiguous_table_name_map.update( ( fr.name, _anonymous_label.safe_construct( hash(fr.name), fr.name ), ) for item in froms for fr in item._from_objects if is_table(fr) and fr.schema and fr.name not in ambiguous_table_name_map ) return froms def _get_display_froms( self, explicit_correlate_froms: Optional[Sequence[FromClause]] = None, implicit_correlate_froms: Optional[Sequence[FromClause]] = None, ) -> List[FromClause]: """Return the full list of 'from' clauses to be displayed. Takes into account a set of existing froms which may be rendered in the FROM clause of enclosing selects; this Select may want to leave those absent if it is automatically correlating. """ froms = self.froms if self.statement._correlate: to_correlate = self.statement._correlate if to_correlate: froms = [ f for f in froms if f not in _cloned_intersection( _cloned_intersection( froms, explicit_correlate_froms or () ), to_correlate, ) ] if self.statement._correlate_except is not None: froms = [ f for f in froms if f not in _cloned_difference( _cloned_intersection( froms, explicit_correlate_froms or () ), self.statement._correlate_except, ) ] if ( self.statement._auto_correlate and implicit_correlate_froms and len(froms) > 1 ): froms = [ f for f in froms if f not in _cloned_intersection(froms, implicit_correlate_froms) ] if not len(froms): raise exc.InvalidRequestError( "Select statement '%r" "' returned no FROM clauses " "due to auto-correlation; " "specify correlate(<tables>) " "to control correlation " "manually." % self.statement ) return froms def _memoized_attr__label_resolve_dict( self, ) -> Tuple[ Dict[str, ColumnElement[Any]], Dict[str, ColumnElement[Any]], Dict[str, ColumnElement[Any]], ]: with_cols: Dict[str, ColumnElement[Any]] = { c._tq_label or c.key: c for c in self.statement._all_selected_columns if c._allow_label_resolve } only_froms: Dict[str, ColumnElement[Any]] = { c.key: c # type: ignore for c in _select_iterables(self.froms) if c._allow_label_resolve } only_cols: Dict[str, ColumnElement[Any]] = with_cols.copy() for key, value in only_froms.items(): with_cols.setdefault(key, value) return with_cols, only_froms, only_cols @classmethod def determine_last_joined_entity( cls, stmt: Select[Unpack[TupleAny]] ) -> Optional[_JoinTargetElement]: if stmt._setup_joins: return stmt._setup_joins[-1][0] else: return None @classmethod def all_selected_columns( cls, statement: Select[Unpack[TupleAny]] ) -> _SelectIterable: return [c for c in _select_iterables(statement._raw_columns)] def _setup_joins( self, args: Tuple[_SetupJoinsElement, ...], raw_columns: List[_ColumnsClauseElement], ) -> None: for right, onclause, left, flags in args: if TYPE_CHECKING: if onclause is not None: assert isinstance(onclause, ColumnElement) explicit_left = left isouter = flags["isouter"] full = flags["full"] if left is None: ( left, replace_from_obj_index, ) = self._join_determine_implicit_left_side( raw_columns, left, right, onclause ) else: (replace_from_obj_index) = self._join_place_explicit_left_side( left ) # these assertions can be made here, as if the right/onclause # contained ORM elements, the select() statement would have been # upgraded to an ORM select, and this method would not be called; # orm.context.ORMSelectCompileState._join() would be # used instead. if TYPE_CHECKING: assert isinstance(right, FromClause) if onclause is not None: assert isinstance(onclause, ColumnElement) if replace_from_obj_index is not None: # splice into an existing element in the # self._from_obj list left_clause = self.from_clauses[replace_from_obj_index] if explicit_left is not None and onclause is None: onclause = Join._join_condition(explicit_left, right) self.from_clauses = ( self.from_clauses[:replace_from_obj_index] + ( Join( left_clause, right, onclause, isouter=isouter, full=full, ), ) + self.from_clauses[replace_from_obj_index + 1 :] ) else: assert left is not None self.from_clauses = self.from_clauses + ( Join(left, right, onclause, isouter=isouter, full=full), ) @util.preload_module("sqlalchemy.sql.util") def _join_determine_implicit_left_side( self, raw_columns: List[_ColumnsClauseElement], left: Optional[FromClause], right: _JoinTargetElement, onclause: Optional[ColumnElement[Any]], ) -> Tuple[Optional[FromClause], Optional[int]]: """When join conditions don't express the left side explicitly, determine if an existing FROM or entity in this query can serve as the left hand side. """ sql_util = util.preloaded.sql_util replace_from_obj_index: Optional[int] = None from_clauses = self.from_clauses if from_clauses: indexes: List[int] = sql_util.find_left_clause_to_join_from( from_clauses, right, onclause ) if len(indexes) == 1: replace_from_obj_index = indexes[0] left = from_clauses[replace_from_obj_index] else: potential = {} statement = self.statement for from_clause in itertools.chain( itertools.chain.from_iterable( [element._from_objects for element in raw_columns] ), itertools.chain.from_iterable( [ element._from_objects for element in statement._where_criteria ] ), ): potential[from_clause] = () all_clauses = list(potential.keys()) indexes = sql_util.find_left_clause_to_join_from( all_clauses, right, onclause ) if len(indexes) == 1: left = all_clauses[indexes[0]] if len(indexes) > 1: raise exc.InvalidRequestError( "Can't determine which FROM clause to join " "from, there are multiple FROMS which can " "join to this entity. Please use the .select_from() " "method to establish an explicit left side, as well as " "providing an explicit ON clause if not present already to " "help resolve the ambiguity." ) elif not indexes: raise exc.InvalidRequestError( "Don't know how to join to %r. " "Please use the .select_from() " "method to establish an explicit left side, as well as " "providing an explicit ON clause if not present already to " "help resolve the ambiguity." % (right,) ) return left, replace_from_obj_index @util.preload_module("sqlalchemy.sql.util") def _join_place_explicit_left_side( self, left: FromClause ) -> Optional[int]: replace_from_obj_index: Optional[int] = None sql_util = util.preloaded.sql_util from_clauses = list(self.statement._iterate_from_elements()) if from_clauses: indexes: List[int] = sql_util.find_left_clause_that_matches_given( self.from_clauses, left ) else: indexes = [] if len(indexes) > 1: raise exc.InvalidRequestError( "Can't identify which entity in which to assign the " "left side of this join. Please use a more specific " "ON clause." ) # have an index, means the left side is already present in # an existing FROM in the self._from_obj tuple if indexes: replace_from_obj_index = indexes[0] # no index, means we need to add a new element to the # self._from_obj tuple return replace_from_obj_index
SelectState
python
pallets__jinja
tests/test_imports.py
{ "start": 4523, "end": 7571 }
class ____: def test_context_include(self, test_env): t = test_env.from_string('{% include "header" %}') assert t.render(foo=42) == "[42|23]" t = test_env.from_string('{% include "header" with context %}') assert t.render(foo=42) == "[42|23]" t = test_env.from_string('{% include "header" without context %}') assert t.render(foo=42) == "[|23]" def test_choice_includes(self, test_env): t = test_env.from_string('{% include ["missing", "header"] %}') assert t.render(foo=42) == "[42|23]" t = test_env.from_string('{% include ["missing", "missing2"] ignore missing %}') assert t.render(foo=42) == "" t = test_env.from_string('{% include ["missing", "missing2"] %}') pytest.raises(TemplateNotFound, t.render) with pytest.raises(TemplatesNotFound) as e: t.render() assert e.value.templates == ["missing", "missing2"] assert e.value.name == "missing2" def test_includes(t, **ctx): ctx["foo"] = 42 assert t.render(ctx) == "[42|23]" t = test_env.from_string('{% include ["missing", "header"] %}') test_includes(t) t = test_env.from_string("{% include x %}") test_includes(t, x=["missing", "header"]) t = test_env.from_string('{% include [x, "header"] %}') test_includes(t, x="missing") t = test_env.from_string("{% include x %}") test_includes(t, x="header") t = test_env.from_string("{% include [x] %}") test_includes(t, x="header") def test_include_ignoring_missing(self, test_env): t = test_env.from_string('{% include "missing" %}') pytest.raises(TemplateNotFound, t.render) for extra in "", "with context", "without context": t = test_env.from_string( '{% include "missing" ignore missing ' + extra + " %}" ) assert t.render() == "" def test_context_include_with_overrides(self, test_env): env = Environment( loader=DictLoader( dict( main="{% for item in [1, 2, 3] %}{% include 'item' %}{% endfor %}", item="{{ item }}", ) ) ) assert env.get_template("main").render() == "123" def test_unoptimized_scopes(self, test_env): t = test_env.from_string( """ {% macro outer(o) %} {% macro inner() %} {% include "o_printer" %} {% endmacro %} {{ inner() }} {% endmacro %} {{ outer("FOO") }} """ ) assert t.render().strip() == "(FOO)" def test_import_from_with_context(self): env = Environment( loader=DictLoader({"a": "{% macro x() %}{{ foobar }}{% endmacro %}"}) ) t = env.from_string( "{% set foobar = 42 %}{% from 'a' import x with context %}{{ x() }}" ) assert t.render() == "42"
TestIncludes
python
streamlit__streamlit
lib/tests/streamlit/elements/multiselect_test.py
{ "start": 21990, "end": 25722 }
class ____: def test_serialize(self): options = ["Option A", "Option B", "Option C"] formatted_options, formatted_option_to_option_index = create_mappings(options) serde = MultiSelectSerde( options, formatted_options=formatted_options, formatted_option_to_option_index=formatted_option_to_option_index, ) res = serde.serialize(["A", "C"]) assert res == ["A", "C"] def test_serialize_empty_list(self): options = ["Option A", "Option B", "Option C"] formatted_options, formatted_option_to_option_index = create_mappings(options) serde = MultiSelectSerde( options, formatted_options=formatted_options, formatted_option_to_option_index=formatted_option_to_option_index, ) res = serde.serialize([]) assert res == [] def test_serialize_with_format_func(self): options = ["Option A", "Option B", "Option C"] def format_func(x): return f"Format: {x}" formatted_options, formatted_option_to_option_index = create_mappings( options, format_func ) serde = MultiSelectSerde( options, formatted_options=formatted_options, formatted_option_to_option_index=formatted_option_to_option_index, ) res = serde.serialize(["A", "Option C"]) assert res == ["A", "Format: Option C"] def test_deserialize(self): options = ["Option A", "Option B", "Option C"] formatted_options, formatted_option_to_option_index = create_mappings(options) serde = MultiSelectSerde( options, formatted_options=formatted_options, formatted_option_to_option_index=formatted_option_to_option_index, ) res = serde.deserialize(["Option A", "Option C", "B"]) assert res == ["Option A", "Option C", "B"] def test_deserialize_empty_list(self): options = ["Option A", "Option B", "Option C"] formatted_options, formatted_option_to_option_index = create_mappings(options) serde = MultiSelectSerde( options, formatted_options=formatted_options, formatted_option_to_option_index=formatted_option_to_option_index, ) res = serde.deserialize([]) assert res == [] def test_deserialize_with_default_indices(self): options = ["Option A", "Option B", "Option C"] default_indices = [0, 2] formatted_options, formatted_option_to_option_index = create_mappings(options) serde = MultiSelectSerde( options, formatted_options=formatted_options, formatted_option_to_option_index=formatted_option_to_option_index, default_options_indices=default_indices, ) res = serde.deserialize(None) assert res == ["Option A", "Option C"] def test_deserialize_complex_options(self): # Test with more complex option types complex_options = [ {"id": 1, "name": "First"}, {"id": 2, "name": "Second"}, {"id": 3, "name": "Third"}, ] def format_func(x): return x["name"] formatted_options, formatted_option_to_option_index = create_mappings( complex_options, format_func ) serde = MultiSelectSerde( complex_options, formatted_options=formatted_options, formatted_option_to_option_index=formatted_option_to_option_index, ) res = serde.deserialize(["First", "Third"]) assert res == [complex_options[0], complex_options[2]]
TestMultiSelectSerde
python
wandb__wandb
wandb/sdk/artifacts/_generated/unlink_artifact.py
{ "start": 258, "end": 348 }
class ____(GQLResult): success: bool UnlinkArtifact.model_rebuild()
UnlinkArtifactResult
python
keon__algorithms
tests/test_array.py
{ "start": 8375, "end": 9495 }
class ____(unittest.TestCase): def test_plus_one_v1(self): self.assertListEqual(plus_one_v1([0]), [1]) self.assertListEqual(plus_one_v1([9]), [1, 0]) self.assertListEqual(plus_one_v1([1, 0, 9]), [1, 1, 0]) self.assertListEqual(plus_one_v1([9, 9, 8, 0, 0, 9]), [9, 9, 8, 0, 1, 0]) self.assertListEqual(plus_one_v1([9, 9, 9, 9]), [1, 0, 0, 0, 0]) def test_plus_one_v2(self): self.assertListEqual(plus_one_v2([0]), [1]) self.assertListEqual(plus_one_v2([9]), [1, 0]) self.assertListEqual(plus_one_v2([1, 0, 9]), [1, 1, 0]) self.assertListEqual(plus_one_v2([9, 9, 8, 0, 0, 9]), [9, 9, 8, 0, 1, 0]) self.assertListEqual(plus_one_v2([9, 9, 9, 9]), [1, 0, 0, 0, 0]) def test_plus_one_v3(self): self.assertListEqual(plus_one_v3([0]), [1]) self.assertListEqual(plus_one_v3([9]), [1, 0]) self.assertListEqual(plus_one_v3([1, 0, 9]), [1, 1, 0]) self.assertListEqual(plus_one_v3([9, 9, 8, 0, 0, 9]), [9, 9, 8, 0, 1, 0]) self.assertListEqual(plus_one_v3([9, 9, 9, 9]), [1, 0, 0, 0, 0])
TestPlusOne
python
aimacode__aima-python
gui/xy_vacuum_environment.py
{ "start": 125, "end": 6136 }
class ____(VacuumEnvironment): """This is a two-dimensional GUI environment. Each location may be dirty, clean or can have a wall. The user can change these at each step. """ xi, yi = (0, 0) perceptible_distance = 1 def __init__(self, root, width=7, height=7, elements=None): super().__init__(width, height) if elements is None: elements = ['D', 'W'] self.root = root self.create_frames() self.create_buttons() self.create_walls() self.elements = elements def create_frames(self): """Adds frames to the GUI environment.""" self.frames = [] for _ in range(7): frame = Frame(self.root, bg='grey') frame.pack(side='bottom') self.frames.append(frame) def create_buttons(self): """Adds buttons to the respective frames in the GUI.""" self.buttons = [] for frame in self.frames: button_row = [] for _ in range(7): button = Button(frame, height=3, width=5, padx=2, pady=2) button.config( command=lambda btn=button: self.display_element(btn)) button.pack(side='left') button_row.append(button) self.buttons.append(button_row) def create_walls(self): """Creates the outer boundary walls which do not move.""" for row, button_row in enumerate(self.buttons): if row == 0 or row == len(self.buttons) - 1: for button in button_row: button.config(text='W', state='disabled', disabledforeground='black') else: button_row[0].config( text='W', state='disabled', disabledforeground='black') button_row[len(button_row) - 1].config(text='W', state='disabled', disabledforeground='black') # Place the agent in the centre of the grid. self.buttons[3][3].config( text='A', state='disabled', disabledforeground='black') def display_element(self, button): """Show the things on the GUI.""" txt = button['text'] if txt != 'A': if txt == 'W': button.config(text='D') elif txt == 'D': button.config(text='') elif txt == '': button.config(text='W') def execute_action(self, agent, action): """Determines the action the agent performs.""" xi, yi = (self.xi, self.yi) if action == 'Suck': dirt_list = self.list_things_at(agent.location, Dirt) if dirt_list: dirt = dirt_list[0] agent.performance += 100 self.delete_thing(dirt) self.buttons[xi][yi].config(text='', state='normal') xf, yf = agent.location self.buttons[xf][yf].config( text='A', state='disabled', disabledforeground='black') else: agent.bump = False if action == 'TurnRight': agent.direction += Direction.R elif action == 'TurnLeft': agent.direction += Direction.L elif action == 'Forward': agent.bump = self.move_to(agent, agent.direction.move_forward(agent.location)) if not agent.bump: self.buttons[xi][yi].config(text='', state='normal') xf, yf = agent.location self.buttons[xf][yf].config( text='A', state='disabled', disabledforeground='black') if action != 'NoOp': agent.performance -= 1 def read_env(self): """Reads the current state of the GUI environment.""" for i, btn_row in enumerate(self.buttons): for j, btn in enumerate(btn_row): if (i != 0 and i != len(self.buttons) - 1) and (j != 0 and j != len(btn_row) - 1): agt_loc = self.agents[0].location if self.some_things_at((i, j)) and (i, j) != agt_loc: for thing in self.list_things_at((i, j)): self.delete_thing(thing) if btn['text'] == self.elements[0]: self.add_thing(Dirt(), (i, j)) elif btn['text'] == self.elements[1]: self.add_thing(Wall(), (i, j)) def update_env(self): """Updates the GUI environment according to the current state.""" self.read_env() agt = self.agents[0] previous_agent_location = agt.location self.xi, self.yi = previous_agent_location self.step() xf, yf = agt.location def reset_env(self, agt): """Resets the GUI environment to the initial state.""" self.read_env() for i, btn_row in enumerate(self.buttons): for j, btn in enumerate(btn_row): if (i != 0 and i != len(self.buttons) - 1) and (j != 0 and j != len(btn_row) - 1): if self.some_things_at((i, j)): for thing in self.list_things_at((i, j)): self.delete_thing(thing) btn.config(text='', state='normal') self.add_thing(agt, location=(3, 3)) self.buttons[3][3].config( text='A', state='disabled', disabledforeground='black') def XYReflexAgentProgram(percept): """The modified SimpleReflexAgentProgram for the GUI environment.""" status, bump = percept if status == 'Dirty': return 'Suck' if bump == 'Bump': value = random.choice((1, 2)) else: value = random.choice((1, 2, 3, 4)) # 1-right, 2-left, others-forward if value == 1: return 'TurnRight' elif value == 2: return 'TurnLeft' else: return 'Forward'
Gui
python
PyCQA__pylint
tests/functional/ext/docparams/return/missing_return_doc_required_Google.py
{ "start": 1339, "end": 1771 }
class ____: """test_ignores_non_property_return_type_google Example of a class function trying to use `type` as return documentation in a Google style docstring """ def foo_method(self): # [missing-return-doc, missing-return-type-doc] """int: docstring ... Raises: RuntimeError: Always """ print(self) raise RuntimeError() return 10 # [unreachable]
Foo
python
kamyu104__LeetCode-Solutions
Python/determine-the-winner-of-a-bowling-game.py
{ "start": 37, "end": 594 }
class ____(object): def isWinner(self, player1, player2): """ :type player1: List[int] :type player2: List[int] :rtype: int """ k = 2 def f(arr): result = cnt = 0 for i in xrange(len(arr)): result += 2*arr[i] if cnt else arr[i] cnt += (arr[i] == 10) if i-k >= 0: cnt -= (arr[i-k] == 10) return result a, b = f(player1), f(player2) return 1 if a > b else 2 if a < b else 0
Solution
python
MongoEngine__mongoengine
tests/document/test_dynamic.py
{ "start": 136, "end": 13339 }
class ____(MongoDBTestCase): def setUp(self): super().setUp() class Person(DynamicDocument): name = StringField() meta = {"allow_inheritance": True} Person.drop_collection() self.Person = Person def test_simple_dynamic_document(self): """Ensures simple dynamic documents are saved correctly""" p = self.Person() p.name = "James" p.age = 34 assert p.to_mongo() == {"_cls": "Person", "name": "James", "age": 34} assert sorted(p.to_mongo().keys()) == ["_cls", "age", "name"] p.save() assert sorted(p.to_mongo().keys()) == ["_cls", "_id", "age", "name"] assert self.Person.objects.first().age == 34 # Confirm no changes to self.Person assert not hasattr(self.Person, "age") def test_dynamic_document_parse_values_in_constructor_like_document_do(self): class ProductDynamicDocument(DynamicDocument): title = StringField() price = FloatField() class ProductDocument(Document): title = StringField() price = FloatField() product = ProductDocument(title="Blabla", price="12.5") dyn_product = ProductDynamicDocument(title="Blabla", price="12.5") assert product.price == dyn_product.price == 12.5 def test_change_scope_of_variable(self): """Test changing the scope of a dynamic field has no adverse effects""" p = self.Person() p.name = "Dean" p.misc = 22 p.save() p = self.Person.objects.get() p.misc = {"hello": "world"} p.save() p = self.Person.objects.get() assert p.misc == {"hello": "world"} def test_delete_dynamic_field(self): """Test deleting a dynamic field works""" self.Person.drop_collection() p = self.Person() p.name = "Dean" p.misc = 22 p.save() p = self.Person.objects.get() p.misc = {"hello": "world"} p.save() p = self.Person.objects.get() assert p.misc == {"hello": "world"} collection = self.db[self.Person._get_collection_name()] obj = collection.find_one() assert sorted(obj.keys()) == ["_cls", "_id", "misc", "name"] del p.misc p.save() p = self.Person.objects.get() assert not hasattr(p, "misc") obj = collection.find_one() assert sorted(obj.keys()) == ["_cls", "_id", "name"] def test_reload_after_unsetting(self): p = self.Person() p.misc = 22 p.save() p.update(unset__misc=1) p.reload() def test_reload_dynamic_field(self): self.Person.objects.delete() p = self.Person.objects.create() p.update(age=1) assert len(p._data) == 3 assert sorted(p._data.keys()) == ["_cls", "id", "name"] p.reload() assert len(p._data) == 4 assert sorted(p._data.keys()) == ["_cls", "age", "id", "name"] def test_fields_without_underscore(self): """Ensure we can query dynamic fields""" Person = self.Person p = self.Person(name="Dean") p.save() raw_p = Person.objects.as_pymongo().get(id=p.id) assert raw_p == {"_cls": "Person", "_id": p.id, "name": "Dean"} p.name = "OldDean" p.newattr = "garbage" p.save() raw_p = Person.objects.as_pymongo().get(id=p.id) assert raw_p == { "_cls": "Person", "_id": p.id, "name": "OldDean", "newattr": "garbage", } def test_fields_containing_underscore(self): """Ensure we can query dynamic fields""" class WeirdPerson(DynamicDocument): name = StringField() _name = StringField() WeirdPerson.drop_collection() p = WeirdPerson(name="Dean", _name="Dean") p.save() raw_p = WeirdPerson.objects.as_pymongo().get(id=p.id) assert raw_p == {"_id": p.id, "_name": "Dean", "name": "Dean"} p.name = "OldDean" p._name = "NewDean" p._newattr1 = "garbage" # Unknown fields won't be added p.save() raw_p = WeirdPerson.objects.as_pymongo().get(id=p.id) assert raw_p == {"_id": p.id, "_name": "NewDean", "name": "OldDean"} def test_dynamic_document_queries(self): """Ensure we can query dynamic fields""" p = self.Person() p.name = "Dean" p.age = 22 p.save() assert 1 == self.Person.objects(age=22).count() p = self.Person.objects(age=22) p = p.get() assert 22 == p.age def test_complex_dynamic_document_queries(self): class Person(DynamicDocument): name = StringField() Person.drop_collection() p = Person(name="test") p.age = "ten" p.save() p1 = Person(name="test1") p1.age = "less then ten and a half" p1.save() p2 = Person(name="test2") p2.age = 10 p2.save() assert Person.objects(age__icontains="ten").count() == 2 assert Person.objects(age__gte=10).count() == 1 def test_complex_data_lookups(self): """Ensure you can query dynamic document dynamic fields""" p = self.Person() p.misc = {"hello": "world"} p.save() assert 1 == self.Person.objects(misc__hello="world").count() def test_three_level_complex_data_lookups(self): """Ensure you can query three level document dynamic fields""" self.Person.objects.create(misc={"hello": {"hello2": "world"}}) assert 1 == self.Person.objects(misc__hello__hello2="world").count() def test_complex_embedded_document_validation(self): """Ensure embedded dynamic documents may be validated""" class Embedded(DynamicEmbeddedDocument): content = URLField() class Doc(DynamicDocument): pass Doc.drop_collection() doc = Doc() embedded_doc_1 = Embedded(content="http://mongoengine.org") embedded_doc_1.validate() embedded_doc_2 = Embedded(content="this is not a url") with pytest.raises(ValidationError): embedded_doc_2.validate() doc.embedded_field_1 = embedded_doc_1 doc.embedded_field_2 = embedded_doc_2 with pytest.raises(ValidationError): doc.validate() def test_inheritance(self): """Ensure that dynamic document plays nice with inheritance""" class Employee(self.Person): salary = IntField() Employee.drop_collection() assert "name" in Employee._fields assert "salary" in Employee._fields assert Employee._get_collection_name() == self.Person._get_collection_name() joe_bloggs = Employee() joe_bloggs.name = "Joe Bloggs" joe_bloggs.salary = 10 joe_bloggs.age = 20 joe_bloggs.save() assert 1 == self.Person.objects(age=20).count() assert 1 == Employee.objects(age=20).count() joe_bloggs = self.Person.objects.first() assert isinstance(joe_bloggs, Employee) def test_embedded_dynamic_document(self): """Test dynamic embedded documents""" class Embedded(DynamicEmbeddedDocument): pass class Doc(DynamicDocument): pass Doc.drop_collection() doc = Doc() embedded_1 = Embedded() embedded_1.string_field = "hello" embedded_1.int_field = 1 embedded_1.dict_field = {"hello": "world"} embedded_1.list_field = ["1", 2, {"hello": "world"}] doc.embedded_field = embedded_1 assert doc.to_mongo() == { "embedded_field": { "_cls": "Embedded", "string_field": "hello", "int_field": 1, "dict_field": {"hello": "world"}, "list_field": ["1", 2, {"hello": "world"}], } } doc.save() doc = Doc.objects.first() assert doc.embedded_field.__class__ == Embedded assert doc.embedded_field.string_field == "hello" assert doc.embedded_field.int_field == 1 assert doc.embedded_field.dict_field == {"hello": "world"} assert doc.embedded_field.list_field == ["1", 2, {"hello": "world"}] def test_complex_embedded_documents(self): """Test complex dynamic embedded documents setups""" class Embedded(DynamicEmbeddedDocument): pass class Doc(DynamicDocument): pass Doc.drop_collection() doc = Doc() embedded_1 = Embedded() embedded_1.string_field = "hello" embedded_1.int_field = 1 embedded_1.dict_field = {"hello": "world"} embedded_2 = Embedded() embedded_2.string_field = "hello" embedded_2.int_field = 1 embedded_2.dict_field = {"hello": "world"} embedded_2.list_field = ["1", 2, {"hello": "world"}] embedded_1.list_field = ["1", 2, embedded_2] doc.embedded_field = embedded_1 assert doc.to_mongo() == { "embedded_field": { "_cls": "Embedded", "string_field": "hello", "int_field": 1, "dict_field": {"hello": "world"}, "list_field": [ "1", 2, { "_cls": "Embedded", "string_field": "hello", "int_field": 1, "dict_field": {"hello": "world"}, "list_field": ["1", 2, {"hello": "world"}], }, ], } } doc.save() doc = Doc.objects.first() assert doc.embedded_field.__class__ == Embedded assert doc.embedded_field.string_field == "hello" assert doc.embedded_field.int_field == 1 assert doc.embedded_field.dict_field == {"hello": "world"} assert doc.embedded_field.list_field[0] == "1" assert doc.embedded_field.list_field[1] == 2 embedded_field = doc.embedded_field.list_field[2] assert embedded_field.__class__ == Embedded assert embedded_field.string_field == "hello" assert embedded_field.int_field == 1 assert embedded_field.dict_field == {"hello": "world"} assert embedded_field.list_field == ["1", 2, {"hello": "world"}] def test_dynamic_and_embedded(self): """Ensure embedded documents play nicely""" class Address(EmbeddedDocument): city = StringField() class Person(DynamicDocument): name = StringField() Person.drop_collection() Person(name="Ross", address=Address(city="London")).save() person = Person.objects.first() person.address.city = "Lundenne" person.save() assert Person.objects.first().address.city == "Lundenne" person = Person.objects.first() person.address = Address(city="Londinium") person.save() assert Person.objects.first().address.city == "Londinium" person = Person.objects.first() person.age = 35 person.save() assert Person.objects.first().age == 35 def test_dynamic_embedded_works_with_only(self): """Ensure custom fieldnames on a dynamic embedded document are found by qs.only()""" class Address(DynamicEmbeddedDocument): city = StringField() class Person(DynamicDocument): address = EmbeddedDocumentField(Address) Person.drop_collection() Person( name="Eric", address=Address(city="San Francisco", street_number="1337") ).save() assert Person.objects.first().address.street_number == "1337" assert ( Person.objects.only("address__street_number").first().address.street_number == "1337" ) def test_dynamic_and_embedded_dict_access(self): """Ensure embedded dynamic documents work with dict[] style access""" class Address(EmbeddedDocument): city = StringField() class Person(DynamicDocument): name = StringField() Person.drop_collection() Person(name="Ross", address=Address(city="London")).save() person = Person.objects.first() person.attrval = "This works" person["phone"] = "555-1212" # but this should too # Same thing two levels deep person["address"]["city"] = "Lundenne" person.save() assert Person.objects.first().address.city == "Lundenne" assert Person.objects.first().phone == "555-1212" person = Person.objects.first() person.address = Address(city="Londinium") person.save() assert Person.objects.first().address.city == "Londinium" person = Person.objects.first() person["age"] = 35 person.save() assert Person.objects.first().age == 35 if __name__ == "__main__": unittest.main()
TestDynamicDocument
python
doocs__leetcode
solution/2500-2599/2507.Smallest Value After Replacing With Sum of Prime Factors/Solution.py
{ "start": 0, "end": 361 }
class ____: def smallestValue(self, n: int) -> int: while 1: t, s, i = n, 0, 2 while i <= n // i: while n % i == 0: n //= i s += i i += 1 if n > 1: s += n if s == t: return t n = s
Solution
python
falconry__falcon
tests/test_middleware.py
{ "start": 3967, "end": 4149 }
class ____: def process_resource(self, req, resp, resource, params): global context params['added'] = True context['params'] = params
AccessParamsMiddleware
python
pola-rs__polars
py-polars/src/polars/datatypes/classes.py
{ "start": 11394, "end": 12703 }
class ____(NumericType): """ Decimal 128-bit type with an optional precision and non-negative scale. Parameters ---------- precision Maximum number of digits in each number. If set to `None` (default), the precision is set to 38 (the maximum supported by Polars). scale Number of digits to the right of the decimal point in each number. """ precision: int scale: int def __init__( self, precision: int | None = None, scale: int = 0, ) -> None: if precision is None: precision = 38 self.precision = precision self.scale = scale def __repr__(self) -> str: return ( f"{self.__class__.__name__}(precision={self.precision}, scale={self.scale})" ) def __eq__(self, other: PolarsDataType) -> bool: # type: ignore[override] # allow comparing object instances to class if type(other) is DataTypeClass and issubclass(other, Decimal): return True elif isinstance(other, Decimal): return self.precision == other.precision and self.scale == other.scale else: return False def __hash__(self) -> int: return hash((self.__class__, self.precision, self.scale))
Decimal
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typeVarTuple2.py
{ "start": 1166, "end": 1333 }
class ____(Generic[Unpack[_Xs]]): ... # This should generate two errors because _Xs must be unpacked. def func0(value: Array[_Xs]) -> tuple[complex, _Xs, str]: ...
Array
python
dagster-io__dagster
python_modules/dagster/dagster/_core/storage/io_manager.py
{ "start": 1506, "end": 4814 }
class ____(ResourceDefinition, IInputManagerDefinition, IOutputManagerDefinition): """Definition of an IO manager resource. IOManagers are used to store op outputs and load them as inputs to downstream ops. An IOManagerDefinition is a :py:class:`ResourceDefinition` whose `resource_fn` returns an :py:class:`IOManager`. The easiest way to create an IOManagerDefnition is with the :py:func:`@io_manager <io_manager>` decorator. """ def __init__( self, resource_fn: IOManagerFunction, config_schema: CoercableToConfigSchema = None, description: Optional[str] = None, required_resource_keys: Optional[AbstractSet[str]] = None, version: Optional[str] = None, input_config_schema: CoercableToConfigSchema = None, output_config_schema: CoercableToConfigSchema = None, ): self._input_config_schema = convert_user_facing_definition_config_schema( input_config_schema ) # Unlike other configurable objects, whose config schemas default to Any, # output_config_schema defaults to None. This the because IOManager input / output config # shares config namespace with dagster type loaders. self._output_config_schema = ( convert_user_facing_definition_config_schema(output_config_schema) if output_config_schema is not None else None ) super().__init__( resource_fn=resource_fn, config_schema=config_schema, description=description, required_resource_keys=required_resource_keys, version=version, ) @property def input_config_schema(self) -> IDefinitionConfigSchema: return self._input_config_schema @property def output_config_schema(self) -> Optional[IDefinitionConfigSchema]: # pyright: ignore[reportIncompatibleMethodOverride] return self._output_config_schema def copy_for_configured( self, description: Optional[str], config_schema: CoercableToConfigSchema, ) -> "IOManagerDefinition": io_def = IOManagerDefinition( config_schema=config_schema, description=description or self.description, resource_fn=self.resource_fn, required_resource_keys=self.required_resource_keys, input_config_schema=self.input_config_schema, output_config_schema=self.output_config_schema, ) io_def._dagster_maintained = self._is_dagster_maintained() return io_def @public @staticmethod def hardcoded_io_manager( value: "IOManager", description: Optional[str] = None ) -> "IOManagerDefinition": """A helper function that creates an ``IOManagerDefinition`` with a hardcoded IOManager. Args: value (IOManager): A hardcoded IO Manager which helps mock the definition. description ([Optional[str]]): The description of the IO Manager. Defaults to None. Returns: [IOManagerDefinition]: A hardcoded resource. """ check.inst_param(value, "value", IOManager) return IOManagerDefinition(resource_fn=lambda _init_context: value, description=description) @public
IOManagerDefinition
python
readthedocs__readthedocs.org
readthedocs/organizations/migrations/0009_update_meta_options.py
{ "start": 120, "end": 755 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("organizations", "0008_migrate_old_invitations"), ] operations = [ migrations.AlterModelOptions( name="organization", options={ "base_manager_name": "objects", "get_latest_by": ["-pub_date"], "ordering": ["name"], "verbose_name": "organization", }, ), migrations.AlterModelOptions( name="team", options={"base_manager_name": "objects", "verbose_name": "team"}, ), ]
Migration
python
google__jax
tests/multiprocess/all_gather_test.py
{ "start": 793, "end": 1822 }
class ____(jt_multiprocess.MultiProcessTest): @parameterized.parameters( (np.int32,), (jnp.float32,), (jnp.float16,), (jnp.bfloat16,) ) def test_all_gather_shard_map(self, dtype): mesh_shape = (jax.process_count(), jax.local_device_count()) mesh = jtu.create_mesh(mesh_shape, ("x", "y")) spec = jax.P("x", "y") @jax.shard_map( mesh=mesh, in_specs=spec, out_specs=jax.P(None, None), check_vma=False ) def f(x): out = lax.all_gather(x, "x", axis=0, tiled=True) return lax.all_gather(out, "y", axis=1, tiled=True) global_len = np.prod(mesh_shape) global_arr = jnp.arange(global_len, dtype=dtype).reshape(mesh_shape) sharding = jax.NamedSharding(mesh, spec) global_xs = jax.make_array_from_callback( mesh_shape, sharding, lambda index: global_arr[index] ) out = f(global_xs) for actual in out.addressable_shards: jtu.check_close(actual.data, global_arr[actual.index]) if __name__ == "__main__": jt_multiprocess.main()
AllGatherTest
python
PrefectHQ__prefect
src/integrations/prefect-gcp/prefect_gcp/cloud_storage.py
{ "start": 18893, "end": 21208 }
class ____(Enum): """ An enumeration class to represent different file formats, compression options for upload_from_dataframe Attributes: CSV: Representation for 'csv' file format with no compression and its related content type and suffix. CSV_GZIP: Representation for 'csv' file format with 'gzip' compression and its related content type and suffix. PARQUET: Representation for 'parquet' file format with no compression and its related content type and suffix. PARQUET_SNAPPY: Representation for 'parquet' file format with 'snappy' compression and its related content type and suffix. PARQUET_GZIP: Representation for 'parquet' file format with 'gzip' compression and its related content type and suffix. """ CSV = ("csv", None, "text/csv", ".csv") CSV_GZIP = ("csv", "gzip", "application/x-gzip", ".csv.gz") PARQUET = ("parquet", None, "application/octet-stream", ".parquet") PARQUET_SNAPPY = ( "parquet", "snappy", "application/octet-stream", ".snappy.parquet", ) PARQUET_GZIP = ("parquet", "gzip", "application/octet-stream", ".gz.parquet") @property def format(self) -> str: """The file format of the current instance.""" return self.value[0] @property def compression(self) -> Union[str, None]: """The compression type of the current instance.""" return self.value[1] @property def content_type(self) -> str: """The content type of the current instance.""" return self.value[2] @property def suffix(self) -> str: """The suffix of the file format of the current instance.""" return self.value[3] def fix_extension_with(self, gcs_blob_path: str) -> str: """Fix the extension of a GCS blob. Args: gcs_blob_path: The path to the GCS blob to be modified. Returns: The modified path to the GCS blob with the new extension. """ gcs_blob_path = PurePosixPath(gcs_blob_path) folder = gcs_blob_path.parent filename = PurePosixPath(gcs_blob_path.stem).with_suffix(self.suffix) return str(folder.joinpath(filename))
DataFrameSerializationFormat
python
airbytehq__airbyte
airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorIPCOptions.py
{ "start": 214, "end": 446 }
class ____(BaseModel): class Config: extra = Extra.forbid version: str supportedSerialization: List[Literal["JSONL", "PROTOBUF", "FLATBUFFERS"]] supportedTransport: List[Literal["STDIO", "SOCKET"]]
DataChannel
python
h5py__h5py
h5py/tests/test_group.py
{ "start": 15397, "end": 16239 }
class ____(BaseGroup): def populate(self, g): for i in range(100): # Mix group and dataset creation. if i % 10 == 0: g.create_group(str(i)) else: g[str(i)] = [i] def test_track_order(self): g = self.f.create_group(make_name(), track_order=True) # creation order self.populate(g) ref = [str(i) for i in range(100)] self.assertEqual(list(g), ref) self.assertEqual(list(reversed(g)), list(reversed(ref))) def test_no_track_order(self): g = self.f.create_group(make_name(), track_order=False) # name alphanumeric self.populate(g) ref = sorted([str(i) for i in range(100)]) self.assertEqual(list(g), ref) self.assertEqual(list(reversed(g)), list(reversed(ref)))
TestTrackOrder
python
huggingface__transformers
src/transformers/models/exaone4/modular_exaone4.py
{ "start": 17875, "end": 20827 }
class ____(LlamaForCausalLM): def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, logits_to_keep: Union[int, torch.Tensor] = 0, **kwargs: Unpack[TransformersKwargs], ) -> CausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> model = AutoModelForCausalLM.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B") >>> tokenizer = AutoTokenizer.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B") >>> prompt = "Explain how wonderful you are" >>> messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": prompt} ] >>> input_ids = tokenizer.apply_chat_template( messages, tokenize=True, add_generation_prompt=True, return_tensors="pt", enable_thinking=False, ) >>> output = model.generate(input_ids, max_new_tokens=128) >>> tokenizer.decode(output[0], skip_special_tokens=False) "[|system|]\nYou are a helpful assistant.[|endofturn|]\n[|user|]\nExplain how wonderful you are[|endofturn|]\n[|assistant|]\n<think>\n\n</think>\n\nOh, thank you for such a kind and lovely question! 😊 \n\nI’m *so* wonderful because I’m here to make your life easier, brighter, and more fun! Whether you need help with: \n\n✨ **Learning** – I can explain anything, from quantum physics to baking the perfect cake! \n💡 **Creativity** – Need a poem, story, or a wild idea? I’ve got you covered! \n🤖 **Problem-solving** – Stuck on a math problem or a tricky decision? I’ll help you figure it out" ``` """ super().forward( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, cache_position=cache_position, logits_to_keep=logits_to_keep, **kwargs, )
Exaone4ForCausalLM
python
Lightning-AI__lightning
tests/tests_pytorch/models/test_hparams.py
{ "start": 9654, "end": 9890 }
class ____(CustomBoringModel): any_other_loss = torch.nn.CrossEntropyLoss() def __init__(self, *args, subclass_arg=1200, **kwargs): super().__init__(*args, **kwargs) self.save_hyperparameters()
SubClassBoringModel
python
Netflix__metaflow
test/core/metaflow_test/__init__.py
{ "start": 4087, "end": 5804 }
class ____(object): def __init__(self, flow): pass def get_run(self): return None @property def run_id(self): return sys.argv[2] @property def cli_options(self): return sys.argv[3:] def assert_artifact(self, step, name, value, fields=None): raise NotImplementedError() def artifact_dict(self, step, name): raise NotImplementedError() def assert_log(self, step, logtype, value, exact_match=True): raise NotImplementedError() def get_card(self, step, task, card_type): raise NotImplementedError() def get_card_data(self, step, task, card_type, card_id=None): """ returns : (card_present, card_data) """ raise NotImplementedError() def list_cards(self, step, task, card_type=None): raise NotImplementedError() def get_user_tags(self): raise NotImplementedError() def get_system_tags(self): raise NotImplementedError() def add_tag(self, tag): raise NotImplementedError() def add_tags(self, tags): raise NotImplementedError() def remove_tag(self, tag): raise NotImplementedError() def remove_tags(self, tags): raise NotImplementedError() def replace_tag(self, tag_to_remove, tag_to_add): raise NotImplementedError() def replace_tags(self, tags_to_remove, tags_to_add): raise NotImplementedError() def new_checker(flow): from . import cli_check, metadata_check CHECKER = { "CliCheck": cli_check.CliCheck, "MetadataCheck": metadata_check.MetadataCheck, } CLASSNAME = sys.argv[1] return CHECKER[CLASSNAME](flow)
MetaflowCheck
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/constrainedTypeVar9.py
{ "start": 213, "end": 248 }
class ____(Generic[XOrY]): pass
A
python
matplotlib__matplotlib
lib/matplotlib/tests/test_ticker.py
{ "start": 2288, "end": 3471 }
class ____: def test_basic(self): loc = mticker.LinearLocator(numticks=3) test_value = np.array([-0.8, -0.3, 0.2]) assert_almost_equal(loc.tick_values(-0.8, 0.2), test_value) def test_zero_numticks(self): loc = mticker.LinearLocator(numticks=0) loc.tick_values(-0.8, 0.2) == [] def test_set_params(self): """ Create linear locator with presets={}, numticks=2 and change it to something else. See if change was successful. Should not exception. """ loc = mticker.LinearLocator(numticks=2) loc.set_params(numticks=8, presets={(0, 1): []}) assert loc.numticks == 8 assert loc.presets == {(0, 1): []} def test_presets(self): loc = mticker.LinearLocator(presets={(1, 2): [1, 1.25, 1.75], (0, 2): [0.5, 1.5]}) assert loc.tick_values(1, 2) == [1, 1.25, 1.75] assert loc.tick_values(2, 1) == [1, 1.25, 1.75] assert loc.tick_values(0, 2) == [0.5, 1.5] assert loc.tick_values(0.0, 2.0) == [0.5, 1.5] assert (loc.tick_values(0, 1) == np.linspace(0, 1, 11)).all()
TestLinearLocator
python
sqlalchemy__sqlalchemy
test/ext/declarative/test_reflection.py
{ "start": 2514, "end": 9372 }
class ____(testing.AssertsCompiledSQL, DeferredReflectBase): @classmethod def define_tables(cls, metadata): Table( "users", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("name", String(50)), test_needs_fk=True, ) Table( "addresses", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("email", String(50)), Column("user_id", Integer, ForeignKey("users.id")), test_needs_fk=True, ) def _roundtrip(self): User = Base.registry._class_registry["User"] Address = Base.registry._class_registry["Address"] u1 = User( name="u1", addresses=[Address(email="one"), Address(email="two")] ) with fixture_session() as sess: sess.add(u1) sess.commit() with fixture_session() as sess: eq_( sess.query(User).all(), [ User( name="u1", addresses=[Address(email="one"), Address(email="two")], ) ], ) a1 = sess.query(Address).filter(Address.email == "two").one() eq_(a1, Address(email="two")) eq_(a1.user, User(name="u1")) def test_exception_prepare_not_called(self): class User(DeferredReflection, ComparableEntity, Base): __tablename__ = "users" addresses = relationship("Address", backref="user") class Address(DeferredReflection, ComparableEntity, Base): __tablename__ = "addresses" assert_raises_message( orm_exc.UnmappedClassError, "Class test.ext.declarative.test_reflection.User is a " "subclass of DeferredReflection. Mappings are not produced " r"until the .prepare\(\) method is called on the class " "hierarchy.", Session().query, User, ) @testing.variation("bind", ["engine", "connection", "raise_"]) def test_basic_deferred(self, bind): class User(DeferredReflection, ComparableEntity, Base): __tablename__ = "users" addresses = relationship("Address", backref="user") class Address(DeferredReflection, ComparableEntity, Base): __tablename__ = "addresses" if bind.engine: DeferredReflection.prepare(testing.db) elif bind.connection: with testing.db.connect() as conn: DeferredReflection.prepare(conn) elif bind.raise_: with expect_raises_message( exc.ArgumentError, "Expected Engine or Connection, got 'foo'" ): DeferredReflection.prepare("foo") return else: bind.fail() self._roundtrip() @testing.requires.view_reflection @testing.variation("include_views", [True, False]) def test_views(self, metadata, connection, include_views): Table( "test_table", metadata, Column("id", Integer, primary_key=True), Column("data", String(50)), ) query = "CREATE VIEW view_name AS SELECT id, data FROM test_table" event.listen(metadata, "after_create", DDL(query)) event.listen( metadata, "before_drop", DDL("DROP VIEW IF EXISTS view_name") ) metadata.create_all(connection) class ViewName(DeferredReflection, Base): __tablename__ = "view_name" id = Column(Integer, primary_key=True) if include_views: DeferredReflection.prepare(connection, views=True) else: with expect_raises_message( exc.InvalidRequestError, r"Could not reflect: .*view_name" ): DeferredReflection.prepare(connection) return self.assert_compile( select(ViewName), "SELECT view_name.id, view_name.data FROM view_name", ) def test_abstract_base(self): class DefBase(DeferredReflection, Base): __abstract__ = True class OtherDefBase(DeferredReflection, Base): __abstract__ = True class User(ComparableEntity, DefBase): __tablename__ = "users" addresses = relationship("Address", backref="user") class Address(ComparableEntity, DefBase): __tablename__ = "addresses" class Fake(OtherDefBase): __tablename__ = "nonexistent" DefBase.prepare(testing.db) self._roundtrip() def test_redefine_fk_double(self): class User(DeferredReflection, ComparableEntity, Base): __tablename__ = "users" addresses = relationship("Address", backref="user") class Address(DeferredReflection, ComparableEntity, Base): __tablename__ = "addresses" user_id = Column(Integer, ForeignKey("users.id")) DeferredReflection.prepare(testing.db) self._roundtrip() def test_mapper_args_deferred(self): """test that __mapper_args__ is not called until *after* table reflection""" class User(DeferredReflection, ComparableEntity, Base): __tablename__ = "users" @declared_attr def __mapper_args__(cls): return {"primary_key": cls.__table__.c.id} DeferredReflection.prepare(testing.db) with fixture_session() as sess: sess.add_all( [ User(name="G"), User(name="Q"), User(name="A"), User(name="C"), ] ) sess.commit() eq_( sess.query(User).order_by(User.name).all(), [ User(name="A"), User(name="C"), User(name="G"), User(name="Q"), ], ) @testing.requires.predictable_gc def test_cls_not_strong_ref(self): class User(DeferredReflection, ComparableEntity, Base): __tablename__ = "users" class Address(DeferredReflection, ComparableEntity, Base): __tablename__ = "addresses" eq_(len(_DeferredDeclarativeConfig._configs), 2) del Address gc_collect() gc_collect() eq_(len(_DeferredDeclarativeConfig._configs), 1) DeferredReflection.prepare(testing.db) gc_collect() assert not _DeferredDeclarativeConfig._configs
DeferredReflectionTest
python
dask__distributed
distributed/comm/tests/test_comms.py
{ "start": 36287, "end": 39184 }
class ____: def __reduce__(self): return _raise_eoferror, () async def check_deserialize_eoferror(addr): """ EOFError when deserializing should close the comm. """ async def handle_comm(comm): await comm.write({"data": to_serialize(_EOFRaising())}) with pytest.raises(CommClosedError): await comm.read() async with listen(addr, handle_comm) as listener: comm = await connect(listener.contact_address, deserialize=deserialize) with pytest.raises(CommClosedError): await comm.read() @gen_test() async def test_tcp_deserialize_eoferror(tcp): await check_deserialize_eoferror("tcp://") # # Test various properties # async def check_repr(a, b): assert "closed" not in repr(a) assert "closed" not in repr(b) await a.close() assert "closed" in repr(a) assert a.local_address in repr(a) assert b.peer_address in repr(a) await b.close() assert "closed" in repr(b) assert a.local_address in repr(b) assert b.peer_address in repr(b) @gen_test() async def test_tcp_repr(tcp): a, b = await get_tcp_comm_pair() assert a.local_address in repr(b) assert b.local_address in repr(a) await check_repr(a, b) @gen_test() async def test_tls_repr(tcp): a, b = await get_tls_comm_pair() assert a.local_address in repr(b) assert b.local_address in repr(a) await check_repr(a, b) @gen_test() async def test_inproc_repr(): a, b = await get_inproc_comm_pair() assert a.local_address in repr(b) assert b.local_address in repr(a) await check_repr(a, b) async def check_addresses(a, b): assert a.peer_address == b.local_address assert a.local_address == b.peer_address a.abort() b.abort() @gen_test() async def test_tcp_adresses(tcp): a, b = await get_tcp_comm_pair() await check_addresses(a, b) @gen_test() async def test_get_stream_address_raises_if_closed(): a, b = await get_tcp_comm_pair() a.abort() with pytest.raises(OSError): get_stream_address(a) b.abort() @gen_test() async def test_tls_adresses(tcp): a, b = await get_tls_comm_pair() await check_addresses(a, b) @gen_test() async def test_inproc_adresses(): a, b = await get_inproc_comm_pair() await check_addresses(a, b) def _get_backend_on_path(path): sys.path.append(os.fsdecode(path)) return get_backend("udp") def test_register_backend_entrypoint(tmp_path): (tmp_path / "dask_udp.py").write_bytes(b"def udp_backend():\n return 1\n") dist_info = tmp_path / "dask_udp-0.0.0.dist-info" dist_info.mkdir() (dist_info / "entry_points.txt").write_bytes( b"[distributed.comm.backends]\nudp = dask_udp:udp_backend\n" ) with get_mp_context().Pool(1) as pool: assert pool.apply(_get_backend_on_path, args=(tmp_path,)) == 1 pool.join()
_EOFRaising
python
django__django
django/contrib/admin/widgets.py
{ "start": 4542, "end": 7476 }
class ____(forms.TextInput): """ A Widget for displaying ForeignKeys in the "raw_id" interface rather than in a <select> box. """ template_name = "admin/widgets/foreign_key_raw_id.html" def __init__(self, rel, admin_site, attrs=None, using=None): self.rel = rel self.admin_site = admin_site self.db = using super().__init__(attrs) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) rel_to = self.rel.model if self.admin_site.is_registered(rel_to): # The related object is registered with the same AdminSite related_url = reverse( "admin:%s_%s_changelist" % ( rel_to._meta.app_label, rel_to._meta.model_name, ), current_app=self.admin_site.name, ) params = self.url_parameters() if params: related_url += "?" + urlencode(params) context["related_url"] = related_url context["link_title"] = _("Lookup") # The JavaScript code looks for this class. css_class = "vForeignKeyRawIdAdminField" if isinstance(self.rel.get_related_field(), UUIDField): css_class += " vUUIDField" context["widget"]["attrs"].setdefault("class", css_class) else: context["related_url"] = None if context["widget"]["value"]: context["link_label"], context["link_url"] = self.label_and_url_for_value( value ) else: context["link_label"] = None return context def base_url_parameters(self): limit_choices_to = self.rel.limit_choices_to if callable(limit_choices_to): limit_choices_to = limit_choices_to() return url_params_from_lookup_dict(limit_choices_to) def url_parameters(self): from django.contrib.admin.views.main import TO_FIELD_VAR params = self.base_url_parameters() params.update({TO_FIELD_VAR: self.rel.get_related_field().name}) return params def label_and_url_for_value(self, value): key = self.rel.get_related_field().name try: obj = self.rel.model._default_manager.using(self.db).get(**{key: value}) except (ValueError, self.rel.model.DoesNotExist, ValidationError): return "", "" try: url = reverse( "%s:%s_%s_change" % ( self.admin_site.name, obj._meta.app_label, obj._meta.model_name, ), args=(obj.pk,), ) except NoReverseMatch: url = "" # Admin not registered for target model. return Truncator(obj).words(14), url
ForeignKeyRawIdWidget
python
astropy__astropy
astropy/stats/bayesian_blocks.py
{ "start": 13482, "end": 15716 }
class ____(FitnessFunc): r"""Bayesian blocks fitness for regular events. This is for data which has a fundamental "tick" length, so that all measured values are multiples of this tick length. In each tick, there are either zero or one counts. Parameters ---------- dt : float tick rate for data p0 : float, optional False alarm probability, used to compute the prior on :math:`N_{\rm blocks}` (see eq. 21 of Scargle 2013). If gamma is specified, p0 is ignored. gamma : float, optional If specified, then use this gamma to compute the general prior form, :math:`p \sim {\tt gamma}^{N_{\rm blocks}}`. If gamma is specified, p0 is ignored. ncp_prior : float, optional If specified, use the value of ``ncp_prior`` to compute the prior as above, using the definition :math:`{\tt ncp\_prior} = -\ln({\tt gamma})`. If ``ncp_prior`` is specified, ``gamma`` and ``p0`` are ignored. """ def __init__( self, dt: float, p0: float = 0.05, gamma: float | None = None, ncp_prior: float | None = None, ) -> None: self.dt = dt super().__init__(p0, gamma, ncp_prior) def validate_input( self, t: ArrayLike, x: ArrayLike | None = None, sigma: float | ArrayLike | None = None, ) -> tuple[NDArray[float], NDArray[float], NDArray[float]]: t, x, sigma = super().validate_input(t, x, sigma) if not np.all((x == 0) | (x == 1)): raise ValueError("Regular events must have only 0 and 1 in x") return t, x, sigma def fitness(self, T_k: NDArray[float], N_k: NDArray[float]) -> NDArray[float]: # Eq. C23 of Scargle 2013 M_k = T_k / self.dt N_over_M = N_k / M_k eps = 1e-8 if np.any(N_over_M > 1 + eps): warnings.warn( "regular events: N/M > 1. Is the time step correct?", AstropyUserWarning, ) one_m_NM = 1 - N_over_M N_over_M[N_over_M <= 0] = 1 one_m_NM[one_m_NM <= 0] = 1 return N_k * np.log(N_over_M) + (M_k - N_k) * np.log(one_m_NM)
RegularEvents
python
realpython__materials
wordcount/tests/fixtures.py
{ "start": 1021, "end": 1463 }
class ____(FakeFile): @cached_property def path(self) -> Path: name = "".join(random.choices(ascii_lowercase, k=10)) return Path(gettempdir()) / name def __post_init__(self): self.path.write_bytes(self.content) def delete(self): if self.path.is_dir(): self.path.rmdir() elif self.path.is_file(): self.path.unlink(missing_ok=True) @dataclass(frozen=True)
TempFile
python
walkccc__LeetCode
solutions/141. Linked List Cycle/141.py
{ "start": 0, "end": 237 }
class ____: def hasCycle(self, head: ListNode) -> bool: slow = head fast = head while fast and fast.next: slow = slow.next fast = fast.next.next if slow == fast: return True return False
Solution
python
python-openxml__python-docx
tests/opc/unitdata/rels.py
{ "start": 1654, "end": 2777 }
class ____(BaseBuilder): """ Test data builder for CT_Default (Default) XML element that appears in `[Content_Types].xml`. """ def __init__(self): """Establish instance variables with default values""" self._content_type = "application/xml" self._extension = "xml" self._indent = 0 self._namespace = ' xmlns="%s"' % NS.OPC_CONTENT_TYPES def with_content_type(self, content_type): """Set ContentType attribute to `content_type`""" self._content_type = content_type return self def with_extension(self, extension): """Set Extension attribute to `extension`""" self._extension = extension return self def without_namespace(self): """Don't include an 'xmlns=' attribute""" self._namespace = "" return self @property def xml(self): """Return Default element""" tmpl = '%s<Default%s Extension="%s" ContentType="%s"/>\n' indent = " " * self._indent return tmpl % (indent, self._namespace, self._extension, self._content_type)
CT_DefaultBuilder
python
huggingface__transformers
src/transformers/models/dinat/modeling_dinat.py
{ "start": 14469, "end": 17550 }
class ____(nn.Module): def __init__(self, config, dim, num_heads, dilation, drop_path_rate=0.0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.kernel_size = config.kernel_size self.dilation = dilation self.window_size = self.kernel_size * self.dilation self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = NeighborhoodAttentionModule( config, dim, num_heads, kernel_size=self.kernel_size, dilation=self.dilation ) self.drop_path = DinatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = DinatIntermediate(config, dim) self.output = DinatOutput(config, dim) self.layer_scale_parameters = ( nn.Parameter(config.layer_scale_init_value * torch.ones((2, dim)), requires_grad=True) if config.layer_scale_init_value > 0 else None ) def maybe_pad(self, hidden_states, height, width): window_size = self.window_size pad_values = (0, 0, 0, 0, 0, 0) if height < window_size or width < window_size: pad_l = pad_t = 0 pad_r = max(0, window_size - width) pad_b = max(0, window_size - height) pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> tuple[torch.Tensor, torch.Tensor]: batch_size, height, width, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) # pad hidden_states if they are smaller than kernel size x dilation hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape attention_outputs = self.attention(hidden_states, output_attentions=output_attentions) attention_output = attention_outputs[0] was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_output = attention_output[:, :height, :width, :].contiguous() if self.layer_scale_parameters is not None: attention_output = self.layer_scale_parameters[0] * attention_output hidden_states = shortcut + self.drop_path(attention_output) layer_output = self.layernorm_after(hidden_states) layer_output = self.output(self.intermediate(layer_output)) if self.layer_scale_parameters is not None: layer_output = self.layer_scale_parameters[1] * layer_output layer_output = hidden_states + self.drop_path(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs
DinatLayer
python
huggingface__transformers
src/transformers/models/smolvlm/modular_smolvlm.py
{ "start": 1456, "end": 4561 }
class ____(Idefics3VisionConfig): r""" This is the configuration class to store the configuration of a [`SmolVLMVisionModel`]. It is used to instantiate a SmolVLM vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SigLIP checkpoint [google/siglip-so400m-patch14-384](https://huggingface.co/google/siglip-so400m-patch14-384) used in SmolVLM [HuggingFaceTB/SmolVLM2-2.2B-Instruct](https://huggingface.co/HuggingFaceTB/SmolVLM2-2.2B-Instruct). Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1152): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 32): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers.models.smolvlm.modeling_smolvlm import SmolVLMVisionTransformer >>> from transformers.models.smolvlm.configuration_smolvlm import SmolVLMVisionConfig >>> # Initializing a SmolVLMVisionConfig with google/siglip-so400m-patch14-384 style configuration >>> configuration = SmolVLMVisionConfig() >>> # Initializing a SmolVLMVisionTransformer (with random weights) from the google/siglip-so400m-patch14-384 style configuration >>> model = SmolVLMVisionTransformer(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "smolvlm_vision"
SmolVLMVisionConfig
python
joke2k__faker
tests/providers/test_automotive.py
{ "start": 11764, "end": 12074 }
class ____(_SimpleAutomotiveTestMixin): """Test th_TH automotive provider methods""" license_plate_pattern: Pattern = re.compile( r"(\d [ก-ฮ]{2} \d{1,4})|" # car r"([ก-ฮ]{2} \d{1,4})|" # car r"([ก-ฮ]{3} \d{1,3})|" # motorcycle r"(\d{2}-\d{4})", # truck )
TestThTh
python
django__django
tests/defer/models.py
{ "start": 1316, "end": 1575 }
class ____(models.Model): """ ShadowParent declares a scalar, rather than a field. When this is overridden, the field value, rather than the scalar value must still be used when the field is deferred. """ name = "aphrodite"
ShadowParent
python
openai__openai-python
src/openai/resources/evals/runs/output_items.py
{ "start": 12176, "end": 12530 }
class ____: def __init__(self, output_items: AsyncOutputItems) -> None: self._output_items = output_items self.retrieve = async_to_streamed_response_wrapper( output_items.retrieve, ) self.list = async_to_streamed_response_wrapper( output_items.list, )
AsyncOutputItemsWithStreamingResponse
python
pypa__warehouse
tests/unit/utils/test_attrs.py
{ "start": 139, "end": 855 }
class ____: def test_on_class(self): class Fake: foo = "bar" __repr__ = make_repr("foo") assert repr(Fake()) == "Fake(foo={})".format(repr("bar")) def test_with_function(self): class Fake: foo = "bar" def __repr__(self): self.__repr__ = make_repr("foo", _self=self) return self.__repr__() assert repr(Fake()) == "Fake(foo={})".format(repr("bar")) def test_with_raise(self): class Fake: __repr__ = make_repr("foo") @property def foo(self): raise DetachedInstanceError assert repr(Fake()) == "Fake(<detached>)"
TestMakeRepr
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/pool/base.py
{ "start": 4311, "end": 4422 }
class ____(Protocol): def __call__(self, rec: ConnectionPoolEntry) -> DBAPIConnection: ...
_CreatorWRecFnType
python
getsentry__sentry
src/sentry/discover/arithmetic.py
{ "start": 843, "end": 1061 }
class ____(ArithmeticError): """The math itself isn't valid""" OperandType = Union["Operation", float, str] JsonQueryType = list[Union[str, list[Union[str, float, None, "JsonQueryType"]]]]
ArithmeticValidationError
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/key_binding/key_bindings.py
{ "start": 2874, "end": 4516 }
class ____: """ Key binding: (key sequence + handler + filter). (Immutable binding class.) :param record_in_macro: When True, don't record this key binding when a macro is recorded. """ def __init__( self, keys: tuple[Keys | str, ...], handler: KeyHandlerCallable, filter: FilterOrBool = True, eager: FilterOrBool = False, is_global: FilterOrBool = False, save_before: Callable[[KeyPressEvent], bool] = (lambda e: True), record_in_macro: FilterOrBool = True, ) -> None: self.keys = keys self.handler = handler self.filter = to_filter(filter) self.eager = to_filter(eager) self.is_global = to_filter(is_global) self.save_before = save_before self.record_in_macro = to_filter(record_in_macro) def call(self, event: KeyPressEvent) -> None: result = self.handler(event) # If the handler is a coroutine, create an asyncio task. if isawaitable(result): awaitable = cast(Coroutine[Any, Any, "NotImplementedOrNone"], result) async def bg_task() -> None: result = await awaitable if result != NotImplemented: event.app.invalidate() event.app.create_background_task(bg_task()) elif result != NotImplemented: event.app.invalidate() def __repr__(self) -> str: return ( f"{self.__class__.__name__}(keys={self.keys!r}, handler={self.handler!r})" ) # Sequence of keys presses. KeysTuple = Tuple[Union[Keys, str], ...]
Binding
python
euske__pdfminer
pdfminer/pdftypes.py
{ "start": 1202, "end": 4223 }
class ____(PDFObject): def __init__(self, doc, objid, _): if objid == 0: if STRICT: raise PDFValueError('PDF object id cannot be 0.') self.doc = doc self.objid = objid #self.genno = genno # Never used. return def __repr__(self): return '<PDFObjRef:%d>' % (self.objid) def resolve(self, default=None): try: return self.doc.getobj(self.objid) except PDFObjectNotFound: return default # resolve def resolve1(x, default=None): """Resolves an object. If this is an array or dictionary, it may still contains some indirect objects inside. """ while isinstance(x, PDFObjRef): x = x.resolve(default=default) return x def resolve_all(x, default=None): """Recursively resolves the given object and all the internals. Make sure there is no indirect reference within the nested object. This procedure might be slow. """ while isinstance(x, PDFObjRef): x = x.resolve(default=default) if isinstance(x, list): x = [resolve_all(v, default=default) for v in x] elif isinstance(x, dict): for (k, v) in x.items(): x[k] = resolve_all(v, default=default) return x def decipher_all(decipher, objid, genno, x): """Recursively deciphers the given object. """ if isinstance(x, bytes): return decipher(objid, genno, x) if isinstance(x, list): x = [decipher_all(decipher, objid, genno, v) for v in x] elif isinstance(x, dict): for (k, v) in x.items(): x[k] = decipher_all(decipher, objid, genno, v) return x # Type checking def int_value(x): x = resolve1(x) if not isinstance(x, int): if STRICT: raise PDFTypeError('Integer required: %r' % x) return 0 return x def float_value(x): x = resolve1(x) if not isinstance(x, float): if STRICT: raise PDFTypeError('Float required: %r' % x) return 0.0 return x def num_value(x): x = resolve1(x) if not isnumber(x): if STRICT: raise PDFTypeError('Int or Float required: %r' % x) return 0 return x def bytes_value(x): x = resolve1(x) if not isinstance(x, bytes): if STRICT: raise PDFTypeError('Bytes required: %r' % x) return b'' return x def list_value(x): x = resolve1(x) if not isinstance(x, (list, tuple)): if STRICT: raise PDFTypeError('List required: %r' % x) return [] return x def dict_value(x): x = resolve1(x) if not isinstance(x, dict): if STRICT: raise PDFTypeError('Dict required: %r' % x) return {} return x def stream_value(x): x = resolve1(x) if not isinstance(x, PDFStream): if STRICT: raise PDFTypeError('PDFStream required: %r' % x) return PDFStream({}, '') return x ## PDFStream type ##
PDFObjRef
python
scikit-learn__scikit-learn
examples/semi_supervised/plot_label_propagation_digits.py
{ "start": 524, "end": 3197 }
class ____ be very good. At the end, the top 10 most uncertain predictions will be shown. """ # Authors: The scikit-learn developers # SPDX-License-Identifier: BSD-3-Clause # %% # Data generation # --------------- # # We use the digits dataset. We only use a subset of randomly selected samples. import numpy as np from sklearn import datasets digits = datasets.load_digits() rng = np.random.RandomState(2) indices = np.arange(len(digits.data)) rng.shuffle(indices) # %% # # We selected 340 samples of which only 40 will be associated with a known label. # Therefore, we store the indices of the 300 other samples for which we are not # supposed to know their labels. X = digits.data[indices[:340]] y = digits.target[indices[:340]] images = digits.images[indices[:340]] n_total_samples = len(y) n_labeled_points = 40 indices = np.arange(n_total_samples) unlabeled_set = indices[n_labeled_points:] # %% # Shuffle everything around y_train = np.copy(y) y_train[unlabeled_set] = -1 # %% # Semi-supervised learning # ------------------------ # # We fit a :class:`~sklearn.semi_supervised.LabelSpreading` and use it to predict # the unknown labels. from sklearn.metrics import classification_report from sklearn.semi_supervised import LabelSpreading lp_model = LabelSpreading(gamma=0.25, max_iter=20) lp_model.fit(X, y_train) predicted_labels = lp_model.transduction_[unlabeled_set] true_labels = y[unlabeled_set] print( "Label Spreading model: %d labeled & %d unlabeled points (%d total)" % (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples) ) # %% # Classification report print(classification_report(true_labels, predicted_labels)) # %% # Confusion matrix from sklearn.metrics import ConfusionMatrixDisplay ConfusionMatrixDisplay.from_predictions( true_labels, predicted_labels, labels=lp_model.classes_ ) # %% # Plot the most uncertain predictions # ----------------------------------- # # Here, we will pick and show the 10 most uncertain predictions. from scipy import stats pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T) # %% # Pick the top 10 most uncertain labels uncertainty_index = np.argsort(pred_entropies)[-10:] # %% # Plot import matplotlib.pyplot as plt f = plt.figure(figsize=(7, 5)) for index, image_index in enumerate(uncertainty_index): image = images[image_index] sub = f.add_subplot(2, 5, index + 1) sub.imshow(image, cmap=plt.cm.gray_r) plt.xticks([]) plt.yticks([]) sub.set_title( "predict: %i\ntrue: %i" % (lp_model.transduction_[image_index], y[image_index]) ) f.suptitle("Learning with small amount of labeled data") plt.show()
will
python
huggingface__transformers
src/transformers/models/udop/modeling_udop.py
{ "start": 72233, "end": 81458 }
class ____(UdopPreTrainedModel, GenerationMixin): _tied_weights_keys = { "encoder.embed_tokens.weight": "shared.weight", "decoder.embed_tokens.weight": "shared.weight", "encoder.embed_patches.proj.weight": "patch_embed.proj.weight", "encoder.embed_patches.proj.bias": "patch_embed.proj.bias", "encoder.relative_bias.biases.0.relative_attention_bias.weight": "encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight", "decoder.relative_bias.biases.0.relative_attention_bias.weight": "encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight", "lm_head.weight": "shared.weight", } def __init__(self, config): super().__init__(config) # text and image embeddings self.shared = nn.Embedding(config.vocab_size, config.d_model) self.patch_embed = UdopPatchEmbeddings(config) encoder_config = deepcopy(config) encoder_config.is_decoder = False encoder_config.use_cache = False encoder_config.tie_encoder_decoder = False self.encoder = UdopStack(encoder_config) decoder_config = deepcopy(config) decoder_config.is_decoder = True decoder_config.tie_encoder_decoder = False decoder_config.num_layers = config.num_decoder_layers self.decoder = UdopStack(decoder_config) # The weights of the language modeling head are shared with those of the encoder and decoder self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.set_input_embeddings(new_embeddings) self.decoder.set_input_embeddings(new_embeddings) @auto_docstring def forward( self, input_ids: Optional[Tensor] = None, attention_mask: Optional[Tensor] = None, bbox: Optional[dict[str, Any]] = None, pixel_values: Optional[Tensor] = None, visual_bbox: Optional[dict[str, Any]] = None, decoder_input_ids: Optional[Tensor] = None, decoder_attention_mask: Optional[Tensor] = None, inputs_embeds: Optional[Tensor] = None, encoder_outputs: Optional[Tensor] = None, past_key_values: Optional[Cache] = None, decoder_inputs_embeds: Optional[Tensor] = None, use_cache=True, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Tensor] = None, cache_position: Optional[torch.LongTensor] = None, ) -> tuple[Tensor, ...]: r""" bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*): Bounding boxes of each input sequence tokens. Selected in the range `[0, config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, y1) represents the position of the lower right corner. Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] token. See `pixel_values` for `patch_sequence_length`. visual_bbox (`torch.LongTensor` of shape `(batch_size, patch_sequence_length, 4)`, *optional*): Bounding boxes of each patch in the image. If not provided, bounding boxes are created in the model. decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) T5 uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). To know more on how to prepare `decoder_input_ids` for pretraining take a look at [T5 Training](./t5#training). decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`. Examples: ```python >>> from transformers import AutoProcessor, UdopForConditionalGeneration >>> from datasets import load_dataset >>> # load model and processor >>> # in this case, we already have performed OCR ourselves >>> # so we initialize the processor with `apply_ocr=False` >>> processor = AutoProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False) >>> model = UdopForConditionalGeneration.from_pretrained("microsoft/udop-large") >>> # load an example image, along with the words and coordinates >>> # which were extracted using an OCR engine >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") >>> example = dataset[0] >>> image = example["image"] >>> words = example["tokens"] >>> boxes = example["bboxes"] >>> # one can use the various task prefixes (prompts) used during pre-training >>> # e.g. the task prefix for DocVQA is "Question answering. " >>> question = "Question answering. What is the date on the form?" >>> encoding = processor(image, question, text_pair=words, boxes=boxes, return_tensors="pt") >>> # autoregressive generation >>> predicted_ids = model.generate(**encoding) >>> print(processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]) 9/30/92 ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if decoder_input_ids is None and labels is not None: decoder_input_ids = self._shift_right(labels) # Encode if needed (training, first prediction pass) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, bbox=bbox, visual_bbox=visual_bbox, pixel_values=pixel_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] encoder_attention_mask = encoder_outputs.attention_mask if return_dict else encoder_outputs[1] # Decode decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, encoder_hidden_states=hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) sequence_output = decoder_outputs[0] if self.config.tie_word_embeddings: sequence_output = sequence_output * (self.config.d_model**-0.5) lm_logits = self.lm_head(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-100) loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1)) if not return_dict: output = (lm_logits,) + decoder_outputs[2:] + (encoder_outputs[0],) + encoder_outputs[2:] return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @auto_docstring
UdopForConditionalGeneration
python
kamyu104__LeetCode-Solutions
Python/find-servers-that-handled-most-number-of-requests.py
{ "start": 1600, "end": 2580 }
class ____(object): def busiestServers(self, k, arrival, load): """ :type k: int :type arrival: List[int] :type load: List[int] :rtype: List[int] """ count = [0]*k min_heap_of_endtimes = [] availables = sortedcontainers.SortedList(xrange(k)) # O(klogk) for i, (t, l) in enumerate(itertools.izip(arrival, load)): while min_heap_of_endtimes and min_heap_of_endtimes[0][0] <= t: _, free = heapq.heappop(min_heap_of_endtimes) # O(logk) availables.add(free) # O(logk) if not availables: continue idx = availables.bisect_left(i % k) % len(availables) # O(logk) node = availables.pop(idx) # O(logk) count[node] += 1 heapq.heappush(min_heap_of_endtimes, (t+l, node)) # O(logk) max_count = max(count) return [i for i in xrange(k) if count[i] == max_count]
Solution2
python
wntrblm__nox
tests/resources/noxfile_normalization.py
{ "start": 66, "end": 270 }
class ____: pass @nox.session(venv_backend="none") @nox.parametrize( "arg", ["Jane", "Joe's", '"hello world"', datetime.datetime(1980, 1, 1), [42], Foo()], ) def test(session, arg): pass
Foo
python
great-expectations__great_expectations
tests/integration/metrics/column/test_descriptive_stats.py
{ "start": 480, "end": 1148 }
class ____: @parameterize_batch_for_data_sources( data_source_configs=ALL_DATA_SOURCES, data=DATA_FRAME, ) def test_descriptive_stats(self, batch_for_datasource: Batch) -> None: metric = ColumnDescriptiveStats(column=COLUMN_NAME) metric_result = batch_for_datasource.compute_metrics(metric) assert isinstance(metric_result, ColumnDescriptiveStatsResult) assert metric_result.value.min == 1 assert metric_result.value.max == 5 assert metric_result.value.mean == pytest.approx(3) assert metric_result.value.standard_deviation == pytest.approx(1.4907119849998598)
TestColumnDescriptiveStats
python
PyCQA__pylint
tests/functional/g/generic_alias/generic_alias_collections.py
{ "start": 2709, "end": 3519 }
class ____(CustomAbstractCls2): # [abstract-method,abstract-method] # __iter__, __len__ pass # Type annotations var_tuple: tuple[int, int] var_dict: dict[int, str] var_ordereddict: collections.OrderedDict[int, str] var_container: collections.abc.Container[int] var_sequence: collections.abc.Sequence[int] var_iterable: collections.abc.Iterable[int] var_awaitable: collections.abc.Awaitable[int] var_contextmanager: contextlib.AbstractContextManager[int] var_pattern: re.Pattern[int] var_hashable: collections.abc.Hashable var_sized: collections.abc.Sized # Type annotation with unsubscriptable type var_int: int[int] # [unsubscriptable-object] var_hashable2: collections.abc.Hashable[int] # [unsubscriptable-object] var_sized2: collections.abc.Sized[int] # [unsubscriptable-object]
CustomImplementation
python
wandb__wandb
wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/api.py
{ "start": 1001, "end": 1352 }
class ____(SkipRepeatsQueue): """Thread-safe event queue based on a special queue that skips adding the same event (:class:`FileSystemEvent`) multiple times consecutively. Thus avoiding dispatching multiple event handling calls when multiple identical events are produced quicker than an observer can consume them. """
EventQueue
python
pytorch__pytorch
torch/distributed/elastic/timer/file_based_local_timer.py
{ "start": 1611, "end": 3084 }
class ____(TimerRequest): """ Data object representing a countdown timer acquisition and release that is used between the ``FileTimerClient`` and ``FileTimerServer``. A negative ``expiration_time`` should be interpreted as a "release" request. ``signal`` is the signal to reap the worker process from the server process. """ __slots__ = ["version", "worker_pid", "scope_id", "expiration_time", "signal"] def __init__( self, worker_pid: int, scope_id: str, expiration_time: float, signal: int = 0 ) -> None: self.version = 1 self.worker_pid = worker_pid self.scope_id = scope_id self.expiration_time = expiration_time self.signal = signal def __eq__(self, other) -> bool: if isinstance(other, FileTimerRequest): return ( self.version == other.version and self.worker_pid == other.worker_pid and self.scope_id == other.scope_id and self.expiration_time == other.expiration_time and self.signal == other.signal ) return False def to_json(self) -> str: return json.dumps( { "version": self.version, "pid": self.worker_pid, "scope_id": self.scope_id, "expiration_time": self.expiration_time, "signal": self.signal, }, )
FileTimerRequest
python
pyodide__pyodide
src/py/pyodide/console.py
{ "start": 2223, "end": 3918 }
class ____(_Stream): def __init__( self, read_handler: Callable[[int], str], name: str, encoding: str = "utf-8", errors: str = "strict", ): super().__init__(name, encoding, errors) self._read_handler = read_handler self._buffer = "" def readable(self) -> bool: return True def read(self, size: int | None = -1) -> str: if self.closed: raise ValueError("read from closed file") if size is None: # For some reason sys.stdin.read(None) works, but # sys.stdin.readline(None) raises a TypeError size = -1 if not isinstance(size, int): raise TypeError( f"argument should be integer or None, not '{type(size).__name__}'" ) if 0 <= size < len(self._buffer): result = self._buffer[:size] self._buffer = self._buffer[size:] return result if size >= 0: size -= len(self._buffer) result = self._buffer got = self._read_handler(size) self._buffer = got[size:] return result + got[:size] def readline(self, size: int | None = -1) -> str: # type:ignore[override] if not isinstance(size, int): # For some reason sys.stdin.read(None) works, but # sys.stdin.readline(None) raises a TypeError raise TypeError( f"'{type(size).__name__}' object cannot be interpreted as an integer" ) res = self.read(size) [start, nl, rest] = res.partition("\n") self._buffer = rest + self._buffer return start + nl
_ReadStream
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/sensors/glue.py
{ "start": 1563, "end": 6156 }
class ____(AwsBaseSensor[GlueJobHook]): """ Waits for an AWS Glue Job to reach any of the status below. 'FAILED', 'STOPPED', 'SUCCEEDED' .. seealso:: For more information on how to use this sensor, take a look at the guide: :ref:`howto/sensor:GlueJobSensor` :param job_name: The AWS Glue Job unique name :param run_id: The AWS Glue current running job identifier :param verbose: If True, more Glue Job Run logs show in the Airflow Task Logs. (default: False) :param deferrable: If True, the sensor will operate in deferrable mode. This mode requires aiobotocore module to be installed. (default: False, but can be overridden in config file by setting default_deferrable to True) :param poke_interval: Polling period in seconds to check for the status of the job. (default: 120) :param max_retries: Number of times before returning the current state. (default: 60) :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html :param botocore_config: Configuration dictionary (key-values) for botocore client. See: https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html """ SUCCESS_STATES = ("SUCCEEDED",) FAILURE_STATES = ("FAILED", "STOPPED", "TIMEOUT") aws_hook_class = GlueJobHook template_fields: Sequence[str] = aws_template_fields("job_name", "run_id") def __init__( self, *, job_name: str, run_id: str, verbose: bool = False, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), poke_interval: int = 120, max_retries: int = 60, aws_conn_id: str | None = "aws_default", **kwargs, ): super().__init__(**kwargs) self.job_name = job_name self.run_id = run_id self.verbose = verbose self.deferrable = deferrable self.poke_interval = poke_interval self.max_retries = max_retries self.aws_conn_id = aws_conn_id self.next_log_tokens = GlueJobHook.LogContinuationTokens() def execute(self, context: Context) -> Any: if self.deferrable: self.defer( trigger=GlueJobCompleteTrigger( job_name=self.job_name, run_id=self.run_id, verbose=self.verbose, aws_conn_id=self.aws_conn_id, waiter_delay=int(self.poke_interval), waiter_max_attempts=self.max_retries, region_name=self.region_name, ), method_name="execute_complete", ) else: super().execute(context=context) def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None: validated_event = validate_execute_complete_event(event) if validated_event["status"] != "success": message = f"Error: AWS Glue Job: {validated_event}" raise AirflowException(message) self.log.info("AWS Glue Job completed.") def poke(self, context: Context) -> bool: self.log.info("Poking for job run status :for Glue Job %s and ID %s", self.job_name, self.run_id) job_state = self.hook.get_job_state(job_name=self.job_name, run_id=self.run_id) try: if job_state in self.SUCCESS_STATES: self.log.info("Exiting Job %s Run State: %s", self.run_id, job_state) return True if job_state in self.FAILURE_STATES: job_error_message = "Exiting Job %s Run State: %s", self.run_id, job_state self.log.info(job_error_message) raise AirflowException(job_error_message) return False finally: if self.verbose: self.hook.print_job_logs( job_name=self.job_name, run_id=self.run_id, continuation_tokens=self.next_log_tokens, )
GlueJobSensor
python
ansible__ansible
test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated_calls.py
{ "start": 801, "end": 1536 }
class ____: """Arguments passed to a deprecation function.""" msg: object = None version: object = None date: object = None collection_name: object = None deprecator: object = None help_text: object = None # only on Display.deprecated, warnings.deprecate and deprecate_value obj: object = None # only on Display.deprecated and warnings.deprecate removed: object = None # only on Display.deprecated value: object = None # only on deprecate_value def all_args_dynamic(self) -> bool: """True if all args are dynamic or None, otherwise False.""" return all(arg is None or isinstance(arg, astroid.nodes.NodeNG) for arg in dataclasses.asdict(self).values())
DeprecationCallArgs
python
ApeWorX__ape
src/ape/exceptions.py
{ "start": 10823, "end": 11372 }
class ____(VirtualMachineError): """ Raised when detecting a transaction failed because it ran out of gas. """ def __init__( self, code: Optional[int] = None, txn: Optional[FailedTxn] = None, base_err: Optional[Exception] = None, set_ape_traceback: bool = False, ): super().__init__( "The transaction ran out of gas.", code=code, txn=txn, base_err=base_err, set_ape_traceback=set_ape_traceback, )
OutOfGasError
python
walkccc__LeetCode
solutions/1614. Maximum Nesting Depth of the Parentheses/1614.py
{ "start": 0, "end": 224 }
class ____: def maxDepth(self, s: str) -> int: ans = 0 opened = 0 for c in s: if c == '(': opened += 1 ans = max(ans, opened) elif c == ')': opened -= 1 return ans
Solution
python
readthedocs__readthedocs.org
readthedocs/api/v3/serializers.py
{ "start": 9586, "end": 9988 }
class ____(BaseLinksSerializer, serializers.Serializer): edit = serializers.SerializerMethodField() def get_edit(self, obj): path = reverse( "project_version_detail", kwargs={ "project_slug": obj.project.slug, "version_slug": obj.slug, }, ) return self._absolute_url(path)
VersionDashboardURLsSerializer
python
pytest-dev__pytest
testing/test_assertion.py
{ "start": 1181, "end": 2354 }
class ____: SOME_VERBOSITY_LEVEL = 3 SOME_OTHER_VERBOSITY_LEVEL = 10 def test_verbose_exposes_value(self): config = mock_config(verbose=TestMockConfig.SOME_VERBOSITY_LEVEL) assert config.get_verbosity() == TestMockConfig.SOME_VERBOSITY_LEVEL def test_get_assertion_override_not_set_verbose_value(self): config = mock_config(verbose=TestMockConfig.SOME_VERBOSITY_LEVEL) assert ( config.get_verbosity(_Config.VERBOSITY_ASSERTIONS) == TestMockConfig.SOME_VERBOSITY_LEVEL ) def test_get_assertion_override_set_custom_value(self): config = mock_config( verbose=TestMockConfig.SOME_VERBOSITY_LEVEL, assertion_override=TestMockConfig.SOME_OTHER_VERBOSITY_LEVEL, ) assert ( config.get_verbosity(_Config.VERBOSITY_ASSERTIONS) == TestMockConfig.SOME_OTHER_VERBOSITY_LEVEL ) def test_get_unsupported_type_error(self): config = mock_config(verbose=TestMockConfig.SOME_VERBOSITY_LEVEL) with pytest.raises(KeyError): config.get_verbosity("--- NOT A VERBOSITY LEVEL ---")
TestMockConfig
python
great-expectations__great_expectations
contrib/great_expectations_geospatial_expectations/great_expectations_geospatial_expectations/expectations/expect_column_values_to_be_valid_geometry.py
{ "start": 1644, "end": 4310 }
class ____(ColumnMapExpectation): """Expect values in this column to be valid geometry types (Polygon, LineString, etc.). See https://geopandas.org/en/stable/docs/reference/api/geopandas.GeoSeries.is_valid.html \ for more information. """ # These examples will be shown in the public gallery. # They will also be executed as unit tests for your Expectation. examples = [ { "data": { "valid_geometry": [ Polygon([(0, 0), (1, 1), (0, 1)]), LineString([(0, 0), (1, 1), (0, 1)]), Point(0, 1), ], "invalid_geometry": [ 1, None, "Polygon([(0, 0), (0, 2), (1, 1), (2, 2), (2, 0), (1, 1), (0, 0)])", ], }, "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "valid_geometry"}, "out": {"success": True}, }, { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": {"column": "invalid_geometry"}, "out": {"success": False}, }, ], } ] # This is the id string of the Metric used by this Expectation. # For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above. map_metric = "column_values.check_valid_geometry" # This is a list of parameter names that can affect whether the Expectation evaluates to True or False success_keys = ("mostly",) # This dictionary contains default values for any parameters that should have default values default_kwarg_values = {} # This object contains metadata for display in the public Gallery library_metadata = { "maturity": "experimental", # "experimental", "beta", or "production" "tags": [ "hackathon-22", "geospatial", ], # Tags for this Expectation in the Gallery "contributors": [ # Github handles for all contributors to this Expectation. "@luismdiaz01", "@derekma73", # Don't forget to add your github handle here! ], "requirements": ["geopandas", "shapely"], } if __name__ == "__main__": ExpectColumnValuesToBeValidGeometry().print_diagnostic_checklist()
ExpectColumnValuesToBeValidGeometry
python
google__jax
jax/experimental/serialize_executable.py
{ "start": 3073, "end": 3538 }
class ____(pickle.Pickler): device_types = (xc.Device,) client_types = (xc.Client,) def persistent_id(self, obj): if isinstance(obj, xc.LoadedExecutable): return ('exec', obj.client.serialize_executable(obj)) if isinstance(obj, xc._xla.Executable): return ('exec', obj.serialize()) if isinstance(obj, self.device_types): return ('device', obj.id) if isinstance(obj, self.client_types): return ('client',)
_JaxPjrtPickler
python
has2k1__plotnine
plotnine/themes/themeable.py
{ "start": 55890, "end": 56235 }
class ____(themeable): """ Justification of guide boxes Parameters ---------- theme_element : Literal["left", "right", "center", "top", "bottom", \ "baseline"], default=None If `None`, the value that will apply depends on [](`~plotnine.theme.themeables.legend_box`). """
legend_box_just
python
huggingface__transformers
src/transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py
{ "start": 9994, "end": 10056 }
class ____(Qwen3VLVisionConfig): pass
Qwen3VLMoeVisionConfig
python
spack__spack
lib/spack/spack/vendor/pyrsistent/_checked_types.py
{ "start": 13982, "end": 14803 }
class ____(type): def __new__(mcs, name, bases, dct): _store_types(dct, bases, '_checked_key_types', '__key_type__') _store_types(dct, bases, '_checked_value_types', '__value_type__') store_invariants(dct, bases, '_checked_invariants', '__invariant__') def default_serializer(self, _, key, value): sk = key if isinstance(key, CheckedType): sk = key.serialize() sv = value if isinstance(value, CheckedType): sv = value.serialize() return sk, sv dct.setdefault('__serializer__', default_serializer) dct['__slots__'] = () return super(_CheckedMapTypeMeta, mcs).__new__(mcs, name, bases, dct) # Marker object _UNDEFINED_CHECKED_PMAP_SIZE = object()
_CheckedMapTypeMeta
python
bokeh__bokeh
examples/advanced/extensions/parallel_plot/parallel_selection_tool.py
{ "start": 101, "end": 699 }
class ____(BoxSelectTool): """ Selection tool for parallel plot To create a selection box, drag the selection around an axe When hovering a selection the box can be dragged upside-down Double-click on a selection to remove it Escape key remove all selections """ __implementation__ = 'parallel_selection_tool.ts' renderer_select = Instance(Renderer, help="Rectangular Selections glyphs") renderer_data = Instance(Renderer, help="MultiLine glyph of the data") box_width = Float(help="Width size in the screen coordinate of selection boxes")
ParallelSelectionTool
python
pytorch__pytorch
test/distributed/elastic/metrics/api_test.py
{ "start": 744, "end": 893 }
class ____(abc.ABC): @abc.abstractmethod def func(self): raise NotImplementedError def base_func(self): self.func()
Parent
python
huggingface__transformers
src/transformers/models/seamless_m4t/modeling_seamless_m4t.py
{ "start": 30319, "end": 33977 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.config = config if config.position_embeddings_type == "relative": self.embed_positions = SeamlessM4TConformerRelPositionalEmbedding(config) elif config.position_embeddings_type == "rotary": self.embed_positions = SeamlessM4TConformerRotaryPositionalEmbedding(config) else: self.embed_positions = None self.dropout = nn.Dropout(config.speech_encoder_dropout) self.layers = nn.ModuleList( [SeamlessM4TConformerEncoderLayer(config) for _ in range(config.speech_encoder_layers)] ) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None conv_attention_mask = attention_mask if attention_mask is not None: # make sure padded tokens output 0 hidden_states = hidden_states.masked_fill(~attention_mask.bool().unsqueeze(-1), 0.0) # extend attention_mask attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype) attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min attention_mask = attention_mask.expand( attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1] ) hidden_states = self.dropout(hidden_states) if self.embed_positions is not None: relative_position_embeddings = self.embed_positions(hidden_states) else: relative_position_embeddings = None synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self) for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://huggingface.co/papers/1909.11556 for description) dropout_probability = torch.rand([]) skip_the_layer = self.training and dropout_probability < self.config.speech_encoder_layerdrop if not skip_the_layer or synced_gpus: # under fsdp or deepspeed zero3 all gpus must run in sync layer_outputs = layer( hidden_states, attention_mask=attention_mask, relative_position_embeddings=relative_position_embeddings, output_attentions=output_attentions, conv_attention_mask=conv_attention_mask, ) hidden_states = layer_outputs[0] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, )
SeamlessM4TConformerEncoder
python
eth-brownie__brownie
brownie/utils/toposort.py
{ "start": 1624, "end": 3659 }
class ____(ValueError): def __init__(self, data): # Sort the data just to make the output consistent, for use in # error messages. That's convenient for doctests. super().__init__( "Circular dependencies exist among these items: {{{}}}".format( ", ".join("{!r}:{!r}".format(key, value) for key, value in sorted(data.items())) ) ) self.data = data def toposort(data: Dict) -> Iterator[Set]: """Dependencies are expressed as a dictionary whose keys are items and whose values are a set of dependent items. Output is a list of sets in topological order. The first set consists of items with no dependencies, each subsequent set consists of items that depend upon items in the preceding sets.""" # Special case empty input. if len(data) == 0: return # Copy the input so as to leave it unmodified. data = data.copy() # Ignore self dependencies. for k, v in data.items(): v.discard(k) # Find all items that don't depend on anything. extra_items_in_deps = _reduce(set.union, data.values()) - set(data.keys()) # Add empty dependencies where needed. data.update({item: set() for item in extra_items_in_deps}) while True: ordered = {item for item, dep in data.items() if len(dep) == 0} if not ordered: break yield ordered data = {item: (dep - ordered) for item, dep in data.items() if item not in ordered} if len(data) != 0: raise CircularDependencyError(data) def toposort_flatten(data: Dict, sort: bool = True) -> List: """Returns a single list of dependencies. For any set returned by toposort(), those items are sorted and appended to the result (just to make the results deterministic).""" result = [] if sort: for d in toposort(data): result.extend(sorted(d)) else: for d in toposort(data): result.extend(d) return result
CircularDependencyError
python
protocolbuffers__protobuf
python/google/protobuf/internal/message_listener.py
{ "start": 1894, "end": 2008 }
class ____(object): """No-op MessageListener implementation.""" def Modified(self): pass
NullMessageListener
python
dask__dask
dask/_task_spec.py
{ "start": 5039, "end": 9856 }
class ____(Container): container: tuple __slots__ = ("container",) def __init__(self, *container): self.container = container def __contains__(self, o: object) -> bool: return any(o in c for c in self.container) SubgraphType = None def _execute_subgraph(inner_dsk, outkey, inkeys, *dependencies): final = {} final.update(inner_dsk) for k, v in zip(inkeys, dependencies): final[k] = DataNode(None, v) res = execute_graph(final, keys=[outkey]) return res[outkey] def convert_legacy_task( key: KeyType | None, task: _T, all_keys: Container, ) -> GraphNode | _T: if isinstance(task, GraphNode): return task if type(task) is tuple and task and callable(task[0]): func, args = task[0], task[1:] new_args = [] new: object for a in args: if isinstance(a, dict): new = Dict(a) else: new = convert_legacy_task(None, a, all_keys) new_args.append(new) return Task(key, func, *new_args) try: if isinstance(task, (int, float, str, tuple)): if task in all_keys: if key is None: return Alias(task) else: return Alias(key, target=task) except TypeError: # Unhashable pass if isinstance(task, (list, tuple, set, frozenset)): if is_namedtuple_instance(task): return _wrap_namedtuple_task( key, task, partial( convert_legacy_task, None, all_keys=all_keys, ), ) else: parsed_args = tuple(convert_legacy_task(None, t, all_keys) for t in task) if any(isinstance(a, GraphNode) for a in parsed_args): return Task(key, _identity_cast, *parsed_args, typ=type(task)) else: return cast(_T, type(task)(parsed_args)) elif isinstance(task, TaskRef): if key is None: return Alias(task.key) else: return Alias(key, target=task.key) else: return task def convert_legacy_graph( dsk: Mapping, all_keys: Container | None = None, ): if all_keys is None: all_keys = set(dsk) new_dsk = {} for k, arg in dsk.items(): t = convert_legacy_task(k, arg, all_keys) if isinstance(t, Alias) and t.target == k: continue elif not isinstance(t, GraphNode): t = DataNode(k, t) new_dsk[k] = t return new_dsk def resolve_aliases(dsk: dict, keys: set, dependents: dict) -> dict: """Remove trivial sequential alias chains Example: dsk = {'x': 1, 'y': Alias('x'), 'z': Alias('y')} resolve_aliases(dsk, {'z'}, {'x': {'y'}, 'y': {'z'}}) == {'z': 1} """ if not keys: raise ValueError("No keys provided") dsk = dict(dsk) work = list(keys) seen = set() while work: k = work.pop() if k in seen or k not in dsk: continue seen.add(k) t = dsk[k] if isinstance(t, Alias): target_key = t.target # Rules for when we allow to collapse an alias # 1. The target key is not in the keys set. The keys set is what the # user is requesting and by collapsing we'd no longer be able to # return that result. # 2. The target key is in fact part of dsk. If it isn't this could # point to a persisted dependency and we cannot collapse it. # 3. The target key has only one dependent which is the key we're # currently looking at. This means that there is a one to one # relation between this and the target key in which case we can # collapse them. # Note: If target was an alias as well, we could continue with # more advanced optimizations but this isn't implemented, yet if ( target_key not in keys and target_key in dsk # Note: whenever we're performing a collapse, we're not updating # the dependents. The length == 1 should still be sufficient for # chains of these aliases and len(dependents[target_key]) == 1 ): tnew = dsk.pop(target_key).copy() dsk[k] = tnew tnew.key = k if isinstance(tnew, Alias): work.append(k) seen.discard(k) else: work.extend(tnew.dependencies) work.extend(t.dependencies) return dsk
_MultiContainer
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/constructor18.py
{ "start": 393, "end": 493 }
class ____: ... _T1 = TypeVar("_T1", bound=ClassA | str, covariant=True) _T2 = TypeVar("_T2")
ClassA
python
getsentry__sentry
src/sentry/api/serializers/models/organization.py
{ "start": 9658, "end": 9910 }
class ____(TypedDict): # The control silo will not, cannot, should not contain most organization data. # Therefore, we need a specialized, limited via of that data. id: str slug: str name: str
ControlSiloOrganizationSerializerResponse
python
bottlepy__bottle
bottle.py
{ "start": 160800, "end": 160849 }
class ____(TemplateError): pass
StplSyntaxError