language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 81845, "end": 82071 }
class ____(BaseModel): ongoing_create_snapshot_requests: int = Field(..., description="") is_recovering: bool = Field(..., description="") recovery_timestamp: int = Field(..., description="")
PartialSnapshotTelemetry
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/compute.py
{ "start": 3228, "end": 10961 }
class ____(ComputeEngineBaseOperator): """ Creates an Instance in Google Compute Engine based on specified parameters. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:ComputeEngineInsertInstanceOperator` :param body: Instance representation as an object. Should at least include 'name', 'machine_type', 'disks' and 'network_interfaces' fields but doesn't include 'zone' field, as it will be specified in 'zone' parameter. Full or partial URL and can be represented as examples below: 1. "machine_type": "projects/your-project-name/zones/your-zone/machineTypes/your-machine-type" 2. "disk_type": "projects/your-project-name/zones/your-zone/diskTypes/your-disk-type" 3. "subnetwork": "projects/your-project-name/regions/your-region/subnetworks/your-subnetwork" :param zone: Google Cloud zone where the Instance exists :param project_id: Google Cloud project ID where the Compute Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud connection is used. :param resource_id: Name of the Instance. If the name of Instance is not specified in body['name'], the name will be taken from 'resource_id' parameter :param request_id: Unique request_id that you might add to achieve full idempotence (for example when client call times out repeating the request with the same request id will not create a new instance template again) It should be in UUID format as defined in RFC 4122 :param gcp_conn_id: The connection ID used to connect to Google Cloud. Defaults to 'google_cloud_default'. :param api_version: API version used (for example v1 - or beta). Defaults to v1. :param impersonation_chain: Service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :param retry: A retry object used to retry requests. If `None` is specified, requests will not be retried. :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if `retry` is specified, the timeout applies to each individual attempt. :param metadata: Additional metadata that is provided to the method. """ operator_extra_links = (ComputeInstanceDetailsLink(),) # [START gce_instance_insert_fields] template_fields: Sequence[str] = ( "body", "project_id", "zone", "request_id", "gcp_conn_id", "api_version", "impersonation_chain", "resource_id", ) # [END gce_instance_insert_fields] def __init__( self, *, body: dict, zone: str, resource_id: str | None = None, project_id: str = PROVIDE_PROJECT_ID, request_id: str | None = None, retry: Retry | None = None, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", api_version: str = "v1", validate_body: bool = True, impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: self.body = body self.zone = zone self.request_id = request_id if "name" in body: resource_id = self.body["name"] self._field_validator = None # Optional[GcpBodyFieldValidator] self.retry = retry self.timeout = timeout self.metadata = metadata if validate_body: self._field_validator = GcpBodyFieldValidator( GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version ) self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_FIELDS_TO_SANITIZE) super().__init__( resource_id=resource_id, zone=zone, project_id=project_id, gcp_conn_id=gcp_conn_id, api_version=api_version, impersonation_chain=impersonation_chain, **kwargs, ) def check_body_fields(self) -> None: required_params = ["machine_type", "disks", "network_interfaces"] for param in required_params: if param not in self.body: readable_param = param.replace("_", " ") raise AirflowException( f"The body '{self.body}' should contain at least {readable_param} for the new operator " f"in the '{param}' field. Check (google.cloud.compute_v1.types.Instance) " f"for more details about body fields description." ) def _validate_inputs(self) -> None: super()._validate_inputs() if not self.resource_id and "name" not in self.body: raise AirflowException( "The required parameters 'resource_id' and body['name'] are missing. " "Please, provide at least one of them." ) def _validate_all_body_fields(self) -> None: if self._field_validator: self._field_validator.validate(self.body) def execute(self, context: Context) -> dict: hook = ComputeEngineHook( gcp_conn_id=self.gcp_conn_id, api_version=self.api_version, impersonation_chain=self.impersonation_chain, ) self._validate_all_body_fields() self.check_body_fields() try: # Idempotence check (sort of) - we want to check if the new Instance # is already created and if is, then we assume it was created previously - we do # not check if content of the Instance is as expected. # We assume success if the Instance is simply present. existing_instance = hook.get_instance( resource_id=self.resource_id, project_id=self.project_id, zone=self.zone, ) except exceptions.NotFound as e: # We actually expect to get 404 / Not Found here as the should not yet exist if e.code != 404: raise e else: self.log.info("The %s Instance already exists", self.resource_id) ComputeInstanceDetailsLink.persist( context=context, project_id=self.project_id or hook.project_id, ) return Instance.to_dict(existing_instance) self._field_sanitizer.sanitize(self.body) self.log.info("Creating Instance with specified body: %s", self.body) hook.insert_instance( body=self.body, request_id=self.request_id, project_id=self.project_id, zone=self.zone, ) self.log.info("The specified Instance has been created SUCCESSFULLY") new_instance = hook.get_instance( resource_id=self.resource_id, project_id=self.project_id, zone=self.zone, ) ComputeInstanceDetailsLink.persist( context=context, project_id=self.project_id or hook.project_id, ) return Instance.to_dict(new_instance)
ComputeEngineInsertInstanceOperator
python
doocs__leetcode
solution/1100-1199/1138.Alphabet Board Path/Solution.py
{ "start": 0, "end": 572 }
class ____: def alphabetBoardPath(self, target: str) -> str: i = j = 0 ans = [] for c in target: v = ord(c) - ord("a") x, y = v // 5, v % 5 while j > y: j -= 1 ans.append("L") while i > x: i -= 1 ans.append("U") while j < y: j += 1 ans.append("R") while i < x: i += 1 ans.append("D") ans.append("!") return "".join(ans)
Solution
python
pytorch__pytorch
test/test_cpp_extensions_aot.py
{ "start": 14464, "end": 15881 }
class ____(common.TestCase): def setUp(self): super().setUp() @xfailIfTorchDynamo def test_rng(self): fourty_two = torch.full((10,), 42, dtype=torch.int64) t = torch.empty(10, dtype=torch.int64).random_() self.assertNotEqual(t, fourty_two) gen = torch.Generator(device="cpu") t = torch.empty(10, dtype=torch.int64).random_(generator=gen) self.assertNotEqual(t, fourty_two) self.assertEqual(rng_extension.getInstanceCount(), 0) gen = rng_extension.createTestCPUGenerator(42) self.assertEqual(rng_extension.getInstanceCount(), 1) copy = gen self.assertEqual(rng_extension.getInstanceCount(), 1) self.assertEqual(gen, copy) copy2 = rng_extension.identity(copy) self.assertEqual(rng_extension.getInstanceCount(), 1) self.assertEqual(gen, copy2) t = torch.empty(10, dtype=torch.int64).random_(generator=gen) self.assertEqual(rng_extension.getInstanceCount(), 1) self.assertEqual(t, fourty_two) del gen self.assertEqual(rng_extension.getInstanceCount(), 1) del copy self.assertEqual(rng_extension.getInstanceCount(), 1) del copy2 self.assertEqual(rng_extension.getInstanceCount(), 0) @torch.testing._internal.common_utils.markDynamoStrictTest @unittest.skipIf(not TEST_CUDA, "CUDA not found")
TestRNGExtension
python
kubernetes-client__python
kubernetes/client/models/apiextensions_v1_webhook_client_config.py
{ "start": 383, "end": 8076 }
class ____(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'ca_bundle': 'str', 'service': 'ApiextensionsV1ServiceReference', 'url': 'str' } attribute_map = { 'ca_bundle': 'caBundle', 'service': 'service', 'url': 'url' } def __init__(self, ca_bundle=None, service=None, url=None, local_vars_configuration=None): # noqa: E501 """ApiextensionsV1WebhookClientConfig - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._ca_bundle = None self._service = None self._url = None self.discriminator = None if ca_bundle is not None: self.ca_bundle = ca_bundle if service is not None: self.service = service if url is not None: self.url = url @property def ca_bundle(self): """Gets the ca_bundle of this ApiextensionsV1WebhookClientConfig. # noqa: E501 caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. # noqa: E501 :return: The ca_bundle of this ApiextensionsV1WebhookClientConfig. # noqa: E501 :rtype: str """ return self._ca_bundle @ca_bundle.setter def ca_bundle(self, ca_bundle): """Sets the ca_bundle of this ApiextensionsV1WebhookClientConfig. caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used. # noqa: E501 :param ca_bundle: The ca_bundle of this ApiextensionsV1WebhookClientConfig. # noqa: E501 :type: str """ if (self.local_vars_configuration.client_side_validation and ca_bundle is not None and not re.search(r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', ca_bundle)): # noqa: E501 raise ValueError(r"Invalid value for `ca_bundle`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") # noqa: E501 self._ca_bundle = ca_bundle @property def service(self): """Gets the service of this ApiextensionsV1WebhookClientConfig. # noqa: E501 :return: The service of this ApiextensionsV1WebhookClientConfig. # noqa: E501 :rtype: ApiextensionsV1ServiceReference """ return self._service @service.setter def service(self, service): """Sets the service of this ApiextensionsV1WebhookClientConfig. :param service: The service of this ApiextensionsV1WebhookClientConfig. # noqa: E501 :type: ApiextensionsV1ServiceReference """ self._service = service @property def url(self): """Gets the url of this ApiextensionsV1WebhookClientConfig. # noqa: E501 url gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. The scheme must be \"https\"; the URL must begin with \"https://\". A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. Attempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either. # noqa: E501 :return: The url of this ApiextensionsV1WebhookClientConfig. # noqa: E501 :rtype: str """ return self._url @url.setter def url(self, url): """Sets the url of this ApiextensionsV1WebhookClientConfig. url gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. The scheme must be \"https\"; the URL must begin with \"https://\". A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. Attempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either. # noqa: E501 :param url: The url of this ApiextensionsV1WebhookClientConfig. # noqa: E501 :type: str """ self._url = url def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ApiextensionsV1WebhookClientConfig): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, ApiextensionsV1WebhookClientConfig): return True return self.to_dict() != other.to_dict()
ApiextensionsV1WebhookClientConfig
python
joke2k__faker
faker/providers/automotive/it_IT/__init__.py
{ "start": 48, "end": 312 }
class ____(AutomotiveProvider): """Implement automotive provider for ``it_IT`` locale. Sources: - https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_Italy """ license_formats = ( # 1994-present "??###??", )
Provider
python
getsentry__sentry
tests/sentry/api/endpoints/test_organization_spans_fields_stats.py
{ "start": 182, "end": 6580 }
class ____(BaseSpansTestCase, APITestCase): is_eap = True view = "sentry-api-0-organization-spans-fields-stats" def setUp(self) -> None: super().setUp() self.login_as(user=self.user) def do_request(self, query=None, features=None, **kwargs): if features is None: features = ["organizations:performance-spans-fields-stats"] if query and "type" not in query.keys(): query["type"] = "string" if query and "sampling" not in query.keys(): query["sampling"] = "HIGHEST_ACCURACY" with self.feature(features): response = self.client.get( reverse( self.view, kwargs={ "organization_id_or_slug": self.organization.slug, }, ), query, format="json", **kwargs, ) return response def _generate_one_span(self, tags=None): if tags is None: tags = {"foo": "bar"} self.store_segment( self.project.id, uuid4().hex, uuid4().hex, span_id=uuid4().hex[:16], organization_id=self.organization.id, parent_span_id=None, timestamp=before_now(days=0, minutes=10).replace(microsecond=0), transaction="foo", duration=100, exclusive_time=100, tags=tags, is_eap=self.is_eap, ) def test_no_project(self) -> None: response = self.do_request() assert response.status_code == 200, response.data assert response.data == {"attributeDistributions": []} def test_no_feature(self) -> None: response = self.do_request(features=[]) assert response.status_code == 404, response.data def test_invalid_params(self) -> None: self._generate_one_span() response = self.do_request(query={"max_buckets": "invalid", "max_attributes": "invalid"}) assert response.status_code == 400, response.data assert "A valid integer is required" in str(response.data) def test_valid_max_params(self) -> None: self._generate_one_span() response = self.do_request(query={"max_buckets": "50", "max_attributes": "100"}) assert response.status_code == 200, response.data assert "attributeDistributions" in str(response.data) def test_invalid_max_buckets(self) -> None: self._generate_one_span() # max_buckets is more than 100 response = self.do_request(query={"max_buckets": "200", "max_attributes": "100"}) assert response.status_code == 400, response.data assert "Ensure this value is less than or equal to 100" in str(response.data) def test_invalid_date_params(self) -> None: self._generate_one_span() response = self.do_request( query={ "start": "invalid-date", "end": "invalid-date", } ) assert response.status_code == 400, response.data def test_max_attributes(self, max_attributes=3) -> None: tags = [{f"test_tag_{i}": f"value_{i}"} for i in range(max_attributes)] for tag in tags: self._generate_one_span(tag) # set max_attributes smaller than the number of attributes, so we can test if max_attributes is respected response = self.do_request(query={"max_attributes": max_attributes - 1}) assert response.status_code == 200, response.data distributions = response.data["results"][0]["attributeDistributions"]["attributes"] assert len(distributions) == max_attributes - 1 def test_max_buckets(self, max_buckets=3) -> None: tags = [{"test_tag": f"value_{i}"} for i in range(max_buckets)] for tag in tags: self._generate_one_span(tag) # set max_buckets smaller than the number of values, so we can test if max_buckets is respected response = self.do_request(query={"max_buckets": max_buckets - 1}) assert response.status_code == 200, response.data distributions = response.data["results"][0]["attributeDistributions"]["attributes"] attribute = next(a for a in distributions if a["attributeName"] == "test_tag") assert attribute assert len(attribute["buckets"]) == max_buckets - 1 def test_distribution_values(self) -> None: tags = [ {"browser": "chrome", "device": "desktop"}, {"browser": "chrome", "device": "mobile"}, {"browser": "chrome", "device": "desktop"}, {"browser": "safari", "device": "mobile"}, {"browser": "chrome", "device": "desktop"}, ] for tag in tags: self._generate_one_span(tag) response = self.do_request(query={"dataset": "spans"}) assert response.status_code == 200, response.data distributions = response.data["results"][0]["attributeDistributions"]["attributes"] attribute = next(a for a in distributions if a["attributeName"] == "browser") assert attribute assert attribute["buckets"] == [ {"label": "chrome", "value": 4.0}, {"label": "safari", "value": 1.0}, ] attribute = next(a for a in distributions if a["attributeName"] == "device") assert attribute assert attribute["buckets"] == [ {"label": "desktop", "value": 3.0}, {"label": "mobile", "value": 2.0}, ] def test_filter_query(self) -> None: tags = [ {"browser": "chrome", "device": "desktop"}, {"browser": "chrome", "device": "mobile"}, ] for tag in tags: self._generate_one_span(tag) response = self.do_request(query={"query": "device:desktop"}) assert response.status_code == 200, response.data distributions = response.data["results"][0]["attributeDistributions"]["attributes"] attribute = next(a for a in distributions if a["attributeName"] == "browser") assert attribute # the second span has a different device value, so it should not be included in the results assert attribute["buckets"] == [ {"label": "chrome", "value": 1.0}, ]
OrganizationSpansFieldsStatsEndpointTest
python
facebookresearch__faiss
tests/test_search_params.py
{ "start": 13308, "end": 17320 }
class ____(unittest.TestCase): def do_test_with_param( self, index_key, ps_params, params): """ Test equivalence between setting 1. param_name_2 = value with ParameterSpace 2. pass in a SearchParameters with param_name = value """ ds = datasets.SyntheticDataset(32, 1000, 100, 20) index = faiss.index_factory(ds.d, index_key) if index_key.startswith("PQ"): index.polysemous_training.n_iter = 50000 index.polysemous_training.n_redo = 1 index.train(ds.get_train()) index.add(ds.get_database()) I0, D0 = index.search(ds.get_queries(), 10) Dnew, Inew = index.search(ds.get_queries(), 10, params=params) # make sure the parameter does indeed change the result... self.assertFalse(np.all(Inew == I0)) for param_name, value in ps_params.items(): faiss.ParameterSpace().set_index_parameter( index, param_name, value) Dref, Iref = index.search(ds.get_queries(), 10) np.testing.assert_array_equal(Iref, Inew) np.testing.assert_array_equal(Dref, Dnew) def test_nprobe(self): self.do_test_with_param( "IVF32,Flat", {"nprobe": 3}, faiss.SearchParametersIVF(nprobe=3)) def test_efSearch(self): self.do_test_with_param( "HNSW", {"efSearch": 4}, faiss.SearchParametersHNSW(efSearch=4)) def test_quantizer_hnsw(self): self.do_test_with_param( "IVF200_HNSW,Flat", {"quantizer_efSearch": 5, "nprobe": 10}, faiss.SearchParametersIVF( nprobe=10, quantizer_params=faiss.SearchParametersHNSW( efSearch=5) ) ) def test_PQ_polysemous_ht(self): self.do_test_with_param( "PQ4x8", {"ht": 10}, faiss.SearchParametersPQ( polysemous_ht=10, search_type=faiss.IndexPQ.ST_polysemous ) ) def test_max_codes(self): " tests whether the max nb codes is taken into account " ds = datasets.SyntheticDataset(32, 1000, 100, 20) index = faiss.index_factory(ds.d, "IVF32,Flat") index.train(ds.get_train()) index.add(ds.get_database()) stats = faiss.cvar.indexIVF_stats stats.reset() D0, I0 = index.search( ds.get_queries(), 10, params=faiss.SearchParametersIVF(nprobe=8) ) ndis0 = stats.ndis target_ndis = ndis0 // ds.nq # a few queries will be below, a few above for q in range(ds.nq): stats.reset() Dq, Iq = index.search( ds.get_queries()[q:q + 1], 10, params=faiss.SearchParametersIVF( nprobe=8, max_codes=target_ndis ) ) self.assertLessEqual(stats.ndis, target_ndis) if stats.ndis < target_ndis: np.testing.assert_equal(I0[q], Iq[0]) def test_ownership(self): # see https://github.com/facebookresearch/faiss/issues/2996 subset = np.arange(0, 50) sel = faiss.IDSelectorBatch(subset) self.assertTrue(sel.this.own()) params = faiss.SearchParameters(sel=sel) self.assertTrue(sel.this.own()) # otherwise mem leak! # this is a somewhat fragile test because it assumes the # gc decreases refcounts immediately. prev_count = sys.getrefcount(sel) del params new_count = sys.getrefcount(sel) self.assertEqual(new_count, prev_count - 1) # check for other objects as well sel1 = faiss.IDSelectorBatch([1, 2, 3]) sel2 = faiss.IDSelectorBatch([4, 5, 6]) sel = faiss.IDSelectorAnd(sel1, sel2) # make storage is still managed by python self.assertTrue(sel1.this.own()) self.assertTrue(sel2.this.own())
TestSearchParams
python
ray-project__ray
python/ray/data/_internal/logical/operators/from_operators.py
{ "start": 2922, "end": 3010 }
class ____(AbstractFrom): """Logical operator for `from_arrow`.""" pass
FromArrow
python
huggingface__transformers
src/transformers/models/align/modeling_align.py
{ "start": 9526, "end": 11014 }
class ____(nn.Module): r""" This corresponds to the depthwise convolution phase of each block in the original implementation. """ def __init__( self, config: AlignVisionConfig, in_dim: int, stride: int, kernel_size: int, adjust_padding: bool, ): super().__init__() self.stride = stride conv_pad = "valid" if self.stride == 2 else "same" padding = correct_pad(kernel_size, adjust=adjust_padding) self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding) self.depthwise_conv = AlignVisionDepthwiseConv2d( in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False ) self.depthwise_norm = nn.BatchNorm2d( num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum ) self.depthwise_act = ACT2FN[config.hidden_act] def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: # Depthwise convolution if self.stride == 2: hidden_states = self.depthwise_conv_pad(hidden_states) hidden_states = self.depthwise_conv(hidden_states) hidden_states = self.depthwise_norm(hidden_states) hidden_states = self.depthwise_act(hidden_states) return hidden_states # Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetSqueezeExciteLayer with EfficientNet->AlignVision
AlignVisionDepthwiseLayer
python
xlwings__xlwings
xlwings/constants.py
{ "start": 112195, "end": 112321 }
class ____: xlScaleLinear = -4132 # from enum XlScaleType xlScaleLogarithmic = -4133 # from enum XlScaleType
ScaleType
python
has2k1__plotnine
plotnine/scales/scale_identity.py
{ "start": 2350, "end": 2410 }
class ____(scale_color_identity): pass
scale_colour_identity
python
plotly__plotly.py
tests/test_core/test_figure_widget_backend/test_validate_no_frames.py
{ "start": 196, "end": 1038 }
class ____(TestCase): if figure_widget_available: def test_no_frames_in_constructor_kwarg(self): with pytest.raises(ValueError): go.FigureWidget(frames=[{}]) def test_emtpy_frames_ok_as_constructor_kwarg(self): go.FigureWidget(frames=[]) def test_no_frames_in_constructor_dict(self): with pytest.raises(ValueError): go.FigureWidget({"frames": [{}]}) def test_emtpy_frames_ok_as_constructor_dict_key(self): go.FigureWidget({"frames": []}) def test_no_frames_assignment(self): fig = go.FigureWidget() with pytest.raises(ValueError): fig.frames = [{}] def test_emtpy_frames_assignment_ok(self): fig = go.FigureWidget() fig.frames = []
TestNoFrames
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 875072, "end": 875484 }
class ____(sgqlc.types.Type): """An edge in a connection.""" __schema__ = github_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") """A cursor for use in pagination.""" node = sgqlc.types.Field("PullRequestReviewComment", graphql_name="node") """The item at the end of the edge."""
PullRequestReviewCommentEdge
python
ray-project__ray
python/ray/tests/test_async_compat.py
{ "start": 481, "end": 549 }
class ____: def sync_fn(self) -> None: pass
NoAsyncMethods
python
GoogleCloudPlatform__python-docs-samples
appengine/standard_python3/bundled-services/blobstore/wsgi/main.py
{ "start": 867, "end": 1533 }
class ____: def __call__(self, environ, start_response): upload_url = blobstore.create_upload_url("/upload_photo") response = """ <html><body> <form action="{}" method="POST" enctype="multipart/form-data"> Upload File: <input type="file" name="file"><br> <input type="submit" name="submit" value="Submit"> </form> </body></html>""".format( upload_url ) start_response("200 OK", [("Content-Type", "text/html")]) return [response.encode("utf-8")] # [START gae_blobstore_handler_wsgi]
UploadFormHandler
python
ray-project__ray
python/ray/data/_internal/logical/rules/configure_map_task_memory.py
{ "start": 2591, "end": 3709 }
class ____(ConfigureMapTaskMemoryRule): def estimate_per_task_memory_requirement(self, op: MapOperator) -> Optional[int]: # Typically, this configuration won't make a difference because # `average_bytes_per_output` is usually ~128 MiB and each core usually has # 4 GiB of memory. However, if `num_cpus` is small (e.g., 0.01) or # `target_max_block_size` is large (e.g., 1GB), then tasks can OOM even # if it just uses enough memory to produce an output block. By setting # `memory` to the average output size, we can mitigate this case. # # We set it to 1 target block size out of assumption that *at least* 1 copy # of data (to process heap) will be made during processing. # # Note that, unless object store memory is manually specified, by default Ray's # "memory" resource is exclusive of the Object Store memory allocated on the # node (i.e., its total allocatable value is Total memory - Object Store # memory). return op.metrics.average_bytes_per_output
ConfigureMapTaskMemoryUsingOutputSize
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/typedDict16.py
{ "start": 140, "end": 173 }
class ____(TD0): value: str
TD1
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pyupgrade/UP004.py
{ "start": 80, "end": 120 }
class ____( object, # ): ...
A
python
dagster-io__dagster
python_modules/dagster/dagster_tests/core_tests/test_external_execution_plan.py
{ "start": 10098, "end": 12112 }
class ____(CacheableAssetsDefinition): _cacheable_data = AssetsDefinitionCacheableData( keys_by_output_name={"result": dg.AssetKey("foo")}, metadata_by_output_name={ "result": { "some_val": MetadataValue.table_schema( schema=dg.TableSchema(columns=[dg.TableColumn("some_col")]) ) } }, ) def compute_cacheable_data(self): # used for tracking how many times this function gets called over an execution instance = DagsterInstance.get() kvs_key = "compute_cacheable_data_called" compute_cacheable_data_called = int( instance.run_storage.get_cursor_values({kvs_key}).get(kvs_key, "0") ) instance.run_storage.set_cursor_values({kvs_key: str(compute_cacheable_data_called + 1)}) # Skip the tracking if this is called outside the context of a DagsterInstance return [self._cacheable_data] def build_definitions(self, data): assert len(data) == 1 assert data == [self._cacheable_data] # used for tracking how many times this function gets called over an execution instance = DagsterInstance.get() kvs_key = "get_definitions_called" get_definitions_called = int( instance.run_storage.get_cursor_values({kvs_key}).get(kvs_key, "0") ) instance.run_storage.set_cursor_values({kvs_key: str(get_definitions_called + 1)}) @dg.op def _op(): return 1 return [ AssetsDefinition.from_op(_op, keys_by_output_name=cd.keys_by_output_name) for cd in data ] @lazy_repository def cacheable_asset_defs(): @dg.asset def bar(foo): return foo + 1 @dg.repository def cacheable_asset_defs(): return [ bar, MyCacheableAssetsDefinition("xyz"), dg.define_asset_job("all_asset_job"), ] return cacheable_asset_defs
MyCacheableAssetsDefinition
python
sympy__sympy
sympy/plotting/series.py
{ "start": 81513, "end": 96570 }
class ____(BaseSeries): """Representation for 2D Implicit plot.""" is_implicit = True use_cm = False _N = 100 def __init__(self, expr, var_start_end_x, var_start_end_y, label="", **kwargs): super().__init__(**kwargs) self.adaptive = kwargs.get("adaptive", False) self.expr = expr self._label = str(expr) if label is None else label self._latex_label = latex(expr) if label is None else label self.ranges = [var_start_end_x, var_start_end_y] self.var_x, self.start_x, self.end_x = self.ranges[0] self.var_y, self.start_y, self.end_y = self.ranges[1] self._color = kwargs.get("color", kwargs.get("line_color", None)) if self.is_interactive and self.adaptive: raise NotImplementedError("Interactive plot with `adaptive=True` " "is not supported.") # Check whether the depth is greater than 4 or less than 0. depth = kwargs.get("depth", 0) if depth > 4: depth = 4 elif depth < 0: depth = 0 self.depth = 4 + depth self._post_init() @property def expr(self): if self.adaptive: return self._adaptive_expr return self._non_adaptive_expr @expr.setter def expr(self, expr): self._block_lambda_functions(expr) # these are needed for adaptive evaluation expr, has_equality = self._has_equality(sympify(expr)) self._adaptive_expr = expr self.has_equality = has_equality self._label = str(expr) self._latex_label = latex(expr) if isinstance(expr, (BooleanFunction, Ne)) and (not self.adaptive): self.adaptive = True msg = "contains Boolean functions. " if isinstance(expr, Ne): msg = "is an unequality. " warnings.warn( "The provided expression " + msg + "In order to plot the expression, the algorithm " + "automatically switched to an adaptive sampling." ) if isinstance(expr, BooleanFunction): self._non_adaptive_expr = None self._is_equality = False else: # these are needed for uniform meshing evaluation expr, is_equality = self._preprocess_meshgrid_expression(expr, self.adaptive) self._non_adaptive_expr = expr self._is_equality = is_equality @property def line_color(self): return self._color @line_color.setter def line_color(self, v): self._color = v color = line_color def _has_equality(self, expr): # Represents whether the expression contains an Equality, GreaterThan # or LessThan has_equality = False def arg_expand(bool_expr): """Recursively expands the arguments of an Boolean Function""" for arg in bool_expr.args: if isinstance(arg, BooleanFunction): arg_expand(arg) elif isinstance(arg, Relational): arg_list.append(arg) arg_list = [] if isinstance(expr, BooleanFunction): arg_expand(expr) # Check whether there is an equality in the expression provided. if any(isinstance(e, (Equality, GreaterThan, LessThan)) for e in arg_list): has_equality = True elif not isinstance(expr, Relational): expr = Equality(expr, 0) has_equality = True elif isinstance(expr, (Equality, GreaterThan, LessThan)): has_equality = True return expr, has_equality def __str__(self): f = lambda t: float(t) if len(t.free_symbols) == 0 else t return self._str_helper( "Implicit expression: %s for %s over %s and %s over %s") % ( str(self._adaptive_expr), str(self.var_x), str((f(self.start_x), f(self.end_x))), str(self.var_y), str((f(self.start_y), f(self.end_y))), ) def get_data(self): """Returns numerical data. Returns ======= If the series is evaluated with the `adaptive=True` it returns: interval_list : list List of bounding rectangular intervals to be postprocessed and eventually used with Matplotlib's ``fill`` command. dummy : str A string containing ``"fill"``. Otherwise, it returns 2D numpy arrays to be used with Matplotlib's ``contour`` or ``contourf`` commands: x_array : np.ndarray y_array : np.ndarray z_array : np.ndarray plot_type : str A string specifying which plot command to use, ``"contour"`` or ``"contourf"``. """ if self.adaptive: data = self._adaptive_eval() if data is not None: return data return self._get_meshes_grid() def _adaptive_eval(self): """ References ========== .. [1] Jeffrey Allen Tupper. Reliable Two-Dimensional Graphing Methods for Mathematical Formulae with Two Free Variables. .. [2] Jeffrey Allen Tupper. Graphing Equations with Generalized Interval Arithmetic. Master's thesis. University of Toronto, 1996 """ import sympy.plotting.intervalmath.lib_interval as li user_functions = {} printer = IntervalMathPrinter({ 'fully_qualified_modules': False, 'inline': True, 'allow_unknown_functions': True, 'user_functions': user_functions}) keys = [t for t in dir(li) if ("__" not in t) and (t not in ["import_module", "interval"])] vals = [getattr(li, k) for k in keys] d = dict(zip(keys, vals)) func = lambdify((self.var_x, self.var_y), self.expr, modules=[d], printer=printer) data = None try: data = self._get_raster_interval(func) except NameError as err: warnings.warn( "Adaptive meshing could not be applied to the" " expression, as some functions are not yet implemented" " in the interval math module:\n\n" "NameError: %s\n\n" % err + "Proceeding with uniform meshing." ) self.adaptive = False except TypeError: warnings.warn( "Adaptive meshing could not be applied to the" " expression. Using uniform meshing.") self.adaptive = False return data def _get_raster_interval(self, func): """Uses interval math to adaptively mesh and obtain the plot""" np = import_module('numpy') k = self.depth interval_list = [] sx, sy = [float(t) for t in [self.start_x, self.start_y]] ex, ey = [float(t) for t in [self.end_x, self.end_y]] # Create initial 32 divisions xsample = np.linspace(sx, ex, 33) ysample = np.linspace(sy, ey, 33) # Add a small jitter so that there are no false positives for equality. # Ex: y==x becomes True for x interval(1, 2) and y interval(1, 2) # which will draw a rectangle. jitterx = ( (np.random.rand(len(xsample)) * 2 - 1) * (ex - sx) / 2 ** 20 ) jittery = ( (np.random.rand(len(ysample)) * 2 - 1) * (ey - sy) / 2 ** 20 ) xsample += jitterx ysample += jittery xinter = [interval(x1, x2) for x1, x2 in zip(xsample[:-1], xsample[1:])] yinter = [interval(y1, y2) for y1, y2 in zip(ysample[:-1], ysample[1:])] interval_list = [[x, y] for x in xinter for y in yinter] plot_list = [] # recursive call refinepixels which subdivides the intervals which are # neither True nor False according to the expression. def refine_pixels(interval_list): """Evaluates the intervals and subdivides the interval if the expression is partially satisfied.""" temp_interval_list = [] plot_list = [] for intervals in interval_list: # Convert the array indices to x and y values intervalx = intervals[0] intervaly = intervals[1] func_eval = func(intervalx, intervaly) # The expression is valid in the interval. Change the contour # array values to 1. if func_eval[1] is False or func_eval[0] is False: pass elif func_eval == (True, True): plot_list.append([intervalx, intervaly]) elif func_eval[1] is None or func_eval[0] is None: # Subdivide avgx = intervalx.mid avgy = intervaly.mid a = interval(intervalx.start, avgx) b = interval(avgx, intervalx.end) c = interval(intervaly.start, avgy) d = interval(avgy, intervaly.end) temp_interval_list.append([a, c]) temp_interval_list.append([a, d]) temp_interval_list.append([b, c]) temp_interval_list.append([b, d]) return temp_interval_list, plot_list while k >= 0 and len(interval_list): interval_list, plot_list_temp = refine_pixels(interval_list) plot_list.extend(plot_list_temp) k = k - 1 # Check whether the expression represents an equality # If it represents an equality, then none of the intervals # would have satisfied the expression due to floating point # differences. Add all the undecided values to the plot. if self.has_equality: for intervals in interval_list: intervalx = intervals[0] intervaly = intervals[1] func_eval = func(intervalx, intervaly) if func_eval[1] and func_eval[0] is not False: plot_list.append([intervalx, intervaly]) return plot_list, "fill" def _get_meshes_grid(self): """Generates the mesh for generating a contour. In the case of equality, ``contour`` function of matplotlib can be used. In other cases, matplotlib's ``contourf`` is used. """ np = import_module('numpy') xarray, yarray, z_grid = self._evaluate() _re, _im = np.real(z_grid), np.imag(z_grid) _re[np.invert(np.isclose(_im, np.zeros_like(_im)))] = np.nan if self._is_equality: return xarray, yarray, _re, 'contour' return xarray, yarray, _re, 'contourf' @staticmethod def _preprocess_meshgrid_expression(expr, adaptive): """If the expression is a Relational, rewrite it as a single expression. Returns ======= expr : Expr The rewritten expression equality : Boolean Whether the original expression was an Equality or not. """ equality = False if isinstance(expr, Equality): expr = expr.lhs - expr.rhs equality = True elif isinstance(expr, Relational): expr = expr.gts - expr.lts elif not adaptive: raise NotImplementedError( "The expression is not supported for " "plotting in uniform meshed plot." ) return expr, equality def get_label(self, use_latex=False, wrapper="$%s$"): """Return the label to be used to display the expression. Parameters ========== use_latex : bool If False, the string representation of the expression is returned. If True, the latex representation is returned. wrapper : str The backend might need the latex representation to be wrapped by some characters. Default to ``"$%s$"``. Returns ======= label : str """ if use_latex is False: return self._label if self._label == str(self._adaptive_expr): return self._get_wrapped_label(self._latex_label, wrapper) return self._latex_label ############################################################################## # Finding the centers of line segments or mesh faces ############################################################################## def centers_of_segments(array): np = import_module('numpy') return np.mean(np.vstack((array[:-1], array[1:])), 0) def centers_of_faces(array): np = import_module('numpy') return np.mean(np.dstack((array[:-1, :-1], array[1:, :-1], array[:-1, 1:], array[:-1, :-1], )), 2) def flat(x, y, z, eps=1e-3): """Checks whether three points are almost collinear""" np = import_module('numpy') # Workaround plotting piecewise (#8577) vector_a = (x - y).astype(float) vector_b = (z - y).astype(float) dot_product = np.dot(vector_a, vector_b) vector_a_norm = np.linalg.norm(vector_a) vector_b_norm = np.linalg.norm(vector_b) cos_theta = dot_product / (vector_a_norm * vector_b_norm) return abs(cos_theta + 1) < eps def _set_discretization_points(kwargs, pt): """Allow the use of the keyword arguments ``n, n1, n2`` to specify the number of discretization points in one and two directions, while keeping back-compatibility with older keyword arguments like, ``nb_of_points, nb_of_points_*, points``. Parameters ========== kwargs : dict Dictionary of keyword arguments passed into a plotting function. pt : type The type of the series, which indicates the kind of plot we are trying to create. """ replace_old_keywords = { "nb_of_points": "n", "nb_of_points_x": "n1", "nb_of_points_y": "n2", "nb_of_points_u": "n1", "nb_of_points_v": "n2", "points": "n" } for k, v in replace_old_keywords.items(): if k in kwargs.keys(): kwargs[v] = kwargs.pop(k) if pt in [LineOver1DRangeSeries, Parametric2DLineSeries, Parametric3DLineSeries]: if "n" in kwargs.keys(): kwargs["n1"] = kwargs["n"] if hasattr(kwargs["n"], "__iter__") and (len(kwargs["n"]) > 0): kwargs["n1"] = kwargs["n"][0] elif pt in [SurfaceOver2DRangeSeries, ContourSeries, ParametricSurfaceSeries, ImplicitSeries]: if "n" in kwargs.keys(): if hasattr(kwargs["n"], "__iter__") and (len(kwargs["n"]) > 1): kwargs["n1"] = kwargs["n"][0] kwargs["n2"] = kwargs["n"][1] else: kwargs["n1"] = kwargs["n2"] = kwargs["n"] return kwargs
ImplicitSeries
python
pytorch__pytorch
test/export/test_lift_unlift.py
{ "start": 14455, "end": 15620 }
class ____(TestCase): def setUp(self): super().setUp() load_torchbind_test_lib() def test_dict_api(self): constant_attr_map = ConstantAttrMap() const_obj = torch.classes._TorchScriptTesting._Foo(10, 20) const_tensor = torch.ones(2, 3) constant_attr_map.add(const_obj, "foo.bar") constant_attr_map.add(const_tensor, "foo.bar.baz") self.assertEqual(len(constant_attr_map), 2) self.assertEqual(list(constant_attr_map), [const_obj, const_tensor]) self.assertEqual(list(constant_attr_map.keys()), [const_obj, const_tensor]) self.assertEqual( list(constant_attr_map.values()), [["foo.bar"], ["foo.bar.baz"]] ) self.assertEqual(constant_attr_map[const_obj], ["foo.bar"]) self.assertEqual(constant_attr_map[const_tensor], ["foo.bar.baz"]) self.assertTrue(const_obj in constant_attr_map) with self.assertRaises(TypeError): constant_attr_map.add(1, "foo.bar") del constant_attr_map[const_obj] self.assertEqual(len(constant_attr_map), 1) if __name__ == "__main__": run_tests()
ConstantAttrMapTest
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 349989, "end": 350298 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") node = sgqlc.types.Field(GitActor, graphql_name="node")
GitActorEdge
python
pypa__packaging
tests/test_metadata.py
{ "start": 253, "end": 10108 }
class ____: @pytest.mark.parametrize("raw_field", sorted(metadata._STRING_FIELDS)) def test_non_repeating_fields_only_once(self, raw_field: str) -> None: data = "VaLuE" header_field = metadata._RAW_TO_EMAIL_MAPPING[raw_field] single_header = f"{header_field}: {data}" raw, unparsed = metadata.parse_email(single_header) assert not unparsed assert len(raw) == 1 assert raw_field in raw assert raw[raw_field] == data # type: ignore[literal-required] @pytest.mark.parametrize("raw_field", sorted(metadata._STRING_FIELDS)) def test_non_repeating_fields_repeated(self, raw_field: str) -> None: header_field = metadata._RAW_TO_EMAIL_MAPPING[raw_field] data = "VaLuE" single_header = f"{header_field}: {data}" repeated_header = "\n".join([single_header] * 2) raw, unparsed = metadata.parse_email(repeated_header) assert not raw assert len(unparsed) == 1 assert header_field in unparsed assert unparsed[header_field] == [data] * 2 @pytest.mark.parametrize("raw_field", sorted(metadata._LIST_FIELDS)) def test_repeating_fields_only_once(self, raw_field: str) -> None: data = "VaLuE" header_field = metadata._RAW_TO_EMAIL_MAPPING[raw_field] single_header = f"{header_field}: {data}" raw, unparsed = metadata.parse_email(single_header) assert not unparsed assert len(raw) == 1 assert raw_field in raw assert raw[raw_field] == [data] # type: ignore[literal-required] @pytest.mark.parametrize("raw_field", sorted(metadata._LIST_FIELDS)) def test_repeating_fields_repeated(self, raw_field: str) -> None: header_field = metadata._RAW_TO_EMAIL_MAPPING[raw_field] data = "VaLuE" single_header = f"{header_field}: {data}" repeated_header = "\n".join([single_header] * 2) raw, unparsed = metadata.parse_email(repeated_header) assert not unparsed assert len(raw) == 1 assert raw_field in raw assert raw[raw_field] == [data] * 2 # type: ignore[literal-required] @pytest.mark.parametrize( ("given", "expected"), [ ("A", ["A"]), ("A ", ["A"]), (" A", ["A"]), ("A, B", ["A", "B"]), ("A,B", ["A", "B"]), (" A, B", ["A", "B"]), ("A,B ", ["A", "B"]), ("A B", ["A B"]), ], ) def test_keywords(self, given: str, expected: list[str]) -> None: header = f"Keywords: {given}" raw, unparsed = metadata.parse_email(header) assert not unparsed assert len(raw) == 1 assert "keywords" in raw assert raw["keywords"] == expected @pytest.mark.parametrize( ("given", "expected"), [ ("", {"": ""}), ("A", {"A": ""}), ("A,B", {"A": "B"}), ("A, B", {"A": "B"}), (" A,B", {"A": "B"}), ("A,B ", {"A": "B"}), ("A,B,C", {"A": "B,C"}), ], ) def test_project_urls_parsing(self, given: str, expected: dict[str, str]) -> None: header = f"project-url: {given}" raw, unparsed = metadata.parse_email(header) assert not unparsed assert len(raw) == 1 assert "project_urls" in raw assert raw["project_urls"] == expected def test_duplicate_project_urls(self) -> None: header = "project-url: A, B\nproject-url: A, C" raw, unparsed = metadata.parse_email(header) assert not raw assert len(unparsed) == 1 assert "project-url" in unparsed assert unparsed["project-url"] == ["A, B", "A, C"] def test_str_input(self) -> None: name = "Tarek Ziadé" header = f"author: {name}" raw, unparsed = metadata.parse_email(header) assert not unparsed assert len(raw) == 1 assert "author" in raw assert raw["author"] == name def test_bytes_input(self) -> None: name = "Tarek Ziadé" header = f"author: {name}".encode() raw, unparsed = metadata.parse_email(header) assert not unparsed assert len(raw) == 1 assert "author" in raw assert raw["author"] == name def test_header_mojibake(self) -> None: value = "\xc0msterdam" header_name = "value" header_bytes = f"{header_name}: {value}".encode("latin1") raw, unparsed = metadata.parse_email(header_bytes) # Sanity check with pytest.raises(UnicodeDecodeError): header_bytes.decode("utf-8") assert not raw assert len(unparsed) == 1 assert header_name in unparsed assert unparsed[header_name] == [value] @pytest.mark.parametrize("given", ["hello", "description: hello", b"hello"]) def test_description(self, given: str | bytes) -> None: raw, unparsed = metadata.parse_email(given) assert not unparsed assert len(raw) == 1 assert "description" in raw assert raw["description"] == "hello" def test_description_non_utf8(self) -> None: header = "\xc0msterdam" header_bytes = header.encode("latin1") raw, unparsed = metadata.parse_email(header_bytes) assert not raw assert len(unparsed) == 1 assert "description" in unparsed # TODO: type annotations are not happy about this, investigate. assert unparsed["description"] == [header_bytes] # type: ignore[comparison-overlap] @pytest.mark.parametrize( ("given", "expected"), [ ("description: 1\ndescription: 2", ["1", "2"]), ("description: 1\n\n2", ["1", "2"]), ("description: 1\ndescription: 2\n\n3", ["1", "2", "3"]), ], ) def test_description_multiple( self, given: str | bytes, expected: list[str] ) -> None: raw, unparsed = metadata.parse_email(given) assert not raw assert len(unparsed) == 1 assert "description" in unparsed assert unparsed["description"] == expected def test_lowercase_keys(self) -> None: header = "AUTHOR: Tarek Ziadé\nWhatever: Else" raw, unparsed = metadata.parse_email(header) assert len(raw) == 1 assert "author" in raw assert len(unparsed) == 1 assert "whatever" in unparsed def test_complete(self) -> None: """Test all fields (except `Obsoletes-Dist`). `Obsoletes-Dist` was sacrificed to provide a value for `Dynamic`. """ path = pathlib.Path(__file__).parent / "metadata" / "everything.metadata" with path.open("r", encoding="utf-8") as file: metadata_contents = file.read() raw, unparsed = metadata.parse_email(metadata_contents) assert len(unparsed) == 1 # "ThisIsNotReal" key assert unparsed["thisisnotreal"] == ["Hello!"] assert len(raw) == 28 assert raw["metadata_version"] == "2.5" assert raw["name"] == "BeagleVote" assert raw["version"] == "1.0a2" assert raw["platforms"] == ["ObscureUnix", "RareDOS"] assert raw["supported_platforms"] == ["RedHat 7.2", "i386-win32-2791"] assert raw["summary"] == "A module for collecting votes from beagles." assert ( raw["description_content_type"] == "text/markdown; charset=UTF-8; variant=GFM" ) assert raw["keywords"] == ["dog", "puppy", "voting", "election"] assert raw["home_page"] == "http://www.example.com/~cschultz/bvote/" assert raw["download_url"] == "…/BeagleVote-0.45.tgz" assert raw["author"] == ( "C. Schultz, Universal Features Syndicate,\n" " Los Angeles, CA <cschultz@peanuts.example.com>" ) assert raw["author_email"] == '"C. Schultz" <cschultz@example.com>' assert raw["maintainer"] == ( "C. Schultz, Universal Features Syndicate,\n" " Los Angeles, CA <cschultz@peanuts.example.com>" ) assert raw["maintainer_email"] == '"C. Schultz" <cschultz@example.com>' assert raw["license"] == ( "This software may only be obtained by sending the\n" " author a postcard, and then the user promises not\n" " to redistribute it." ) assert raw["license_expression"] == "Apache-2.0 OR BSD-2-Clause" assert raw["license_files"] == ["LICENSE.APACHE", "LICENSE.BSD"] assert raw["classifiers"] == [ "Development Status :: 4 - Beta", "Environment :: Console (Text Based)", ] assert raw["provides_extra"] == ["pdf"] assert raw["requires_dist"] == [ "reportlab; extra == 'pdf'", "pkginfo", "PasteDeploy", "zope.interface (>3.5.0)", "pywin32 >1.0; sys_platform == 'win32'", ] assert raw["requires_python"] == ">=3" assert raw["requires_external"] == [ "C", "libpng (>=1.5)", 'make; sys_platform != "win32"', ] assert raw["project_urls"] == { "Bug Tracker": "http://bitbucket.org/tarek/distribute/issues/", "Documentation": "https://example.com/BeagleVote", } assert raw["provides_dist"] == [ "OtherProject", "AnotherProject (3.4)", 'virtual_package; python_version >= "3.4"', ] assert raw["dynamic"] == ["Obsoletes-Dist"] assert raw["description"] == "This description intentionally left blank.\n" assert raw["import_names"] == ["beaglevote", "_beaglevote ; private"] assert raw["import_namespaces"] == ["spam", "_bacon ; private"]
TestRawMetadata
python
getsentry__sentry
src/sentry/uptime/models.py
{ "start": 6985, "end": 10572 }
class ____(DataSourceTypeHandler[UptimeSubscription]): @override @staticmethod def bulk_get_query_object( data_sources: list[DataSource], ) -> dict[int, UptimeSubscription | None]: uptime_subscription_ids: list[int] = [] for ds in data_sources: try: uptime_subscription_id = int(ds.source_id) uptime_subscription_ids.append(uptime_subscription_id) except ValueError: logger.exception( "Invalid DataSource.source_id fetching UptimeSubscription", extra={"id": ds.id, "source_id": ds.source_id}, ) qs_lookup = { str(uptime_subscription.id): uptime_subscription for uptime_subscription in UptimeSubscription.objects.filter( id__in=uptime_subscription_ids ) } return {ds.id: qs_lookup.get(ds.source_id) for ds in data_sources} @override @staticmethod def related_model(instance) -> list[ModelRelation]: return [ModelRelation(UptimeSubscription, {"id": instance.source_id})] @override @staticmethod def get_instance_limit(org: Organization) -> int | None: return None @override @staticmethod def get_current_instance_count(org: Organization) -> int: # We don't have a limit at the moment, so no need to count. raise NotImplementedError @override @staticmethod def get_relocation_model_name() -> str: return "uptime.uptimesubscription" def get_detector(uptime_subscription: UptimeSubscription, prefetch_workflow_data=False) -> Detector: """ Fetches a workflow_engine Detector given an existing uptime_subscription. This is used during the transition period moving uptime to detector. """ data_source = DataSource.objects.filter( type=DATA_SOURCE_UPTIME_SUBSCRIPTION, source_id=str(uptime_subscription.id), ) qs = Detector.objects_for_deletion.filter( type=GROUP_TYPE_UPTIME_DOMAIN_CHECK_FAILURE, data_sources=data_source[:1] ) select_related = ["project", "project__organization"] if prefetch_workflow_data: select_related.append("workflow_condition_group") qs = qs.prefetch_related("workflow_condition_group__conditions") qs = qs.select_related(*select_related) return qs.get() def get_uptime_subscription(detector: Detector) -> UptimeSubscription: """ Given a detector get the matching uptime subscription """ data_source = detector.data_sources.first() assert data_source return UptimeSubscription.objects.get_from_cache(id=int(data_source.source_id)) def get_audit_log_data(detector: Detector): """Get audit log data from a detector.""" uptime_subscription = get_uptime_subscription(detector) owner_user_id = None owner_team_id = None if detector.owner: if detector.owner.is_user: owner_user_id = detector.owner.id elif detector.owner.is_team: owner_team_id = detector.owner.id return { "project": detector.project_id, "name": detector.name, "owner_user_id": owner_user_id, "owner_team_id": owner_team_id, "url": uptime_subscription.url, "interval_seconds": uptime_subscription.interval_seconds, "timeout": uptime_subscription.timeout_ms, "method": uptime_subscription.method, "headers": uptime_subscription.headers, "body": uptime_subscription.body, }
UptimeSubscriptionDataSourceHandler
python
django__django
tests/dbshell/tests.py
{ "start": 202, "end": 613 }
class ____(SimpleTestCase): def test_command_missing(self): msg = ( "You appear not to have the %r program installed or on your path." % connection.client.executable_name ) with self.assertRaisesMessage(CommandError, msg): with mock.patch("subprocess.run", side_effect=FileNotFoundError): call_command("dbshell")
DbshellCommandTestCase
python
ethereum__web3.py
web3/_utils/ens.py
{ "start": 1053, "end": 1294 }
class ____: def __init__(self, name_addr_pairs: dict[str, ChecksumAddress]) -> None: self.registry = dict(name_addr_pairs) def address(self, name: str) -> ChecksumAddress: return self.registry.get(name, None)
StaticENS
python
allegroai__clearml
clearml/backend_api/services/v2_20/tasks.py
{ "start": 302963, "end": 323837 }
class ____(Response): """ Response of tasks.get_by_id endpoint. :param task: Task info :type task: Task """ _service = "tasks" _action = "get_by_id" _version = "2.20" _schema = { "definitions": { "artifact": { "properties": { "content_size": { "description": "Raw data length in bytes", "type": "integer", }, "display_data": { "description": "User-defined list of key/value pairs, sorted", "items": {"items": {"type": "string"}, "type": "array"}, "type": "array", }, "hash": { "description": "Hash of entire raw data", "type": "string", }, "key": {"description": "Entry key", "type": "string"}, "mode": { "$ref": "#/definitions/artifact_mode_enum", "description": "System defined input/output indication", }, "timestamp": { "description": "Epoch time when artifact was created", "type": "integer", }, "type": {"description": "System defined type", "type": "string"}, "type_data": { "$ref": "#/definitions/artifact_type_data", "description": "Additional fields defined by the system", }, "uri": {"description": "Raw data location", "type": "string"}, }, "required": ["key", "type"], "type": "object", }, "artifact_mode_enum": { "default": "output", "enum": ["input", "output"], "type": "string", }, "artifact_type_data": { "properties": { "content_type": { "description": "System defined raw data content type", "type": ["string", "null"], }, "data_hash": { "description": "Hash of raw data, without any headers or descriptive parts", "type": ["string", "null"], }, "preview": { "description": "Description or textual data", "type": ["string", "null"], }, }, "type": "object", }, "configuration_item": { "properties": { "description": { "description": "The parameter description. Optional", "type": ["string", "null"], }, "name": { "description": "Name of the parameter. Should be unique", "type": ["string", "null"], }, "type": { "description": "Type of the parameter. Optional", "type": ["string", "null"], }, "value": { "description": "Value of the parameter", "type": ["string", "null"], }, }, "type": "object", }, "execution": { "properties": { "artifacts": { "description": "Task artifacts", "items": {"$ref": "#/definitions/artifact"}, "type": ["array", "null"], }, "framework": { "description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ", "type": ["string", "null"], }, "model_desc": { "additionalProperties": True, "description": "Json object representing the Model descriptors", "type": ["object", "null"], }, "model_labels": { "additionalProperties": {"type": "integer"}, "description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks", "type": ["object", "null"], }, "parameters": { "additionalProperties": True, "description": "Json object containing the Task parameters", "type": ["object", "null"], }, "queue": { "description": "Queue ID where task was queued.", "type": ["string", "null"], }, }, "type": "object", }, "last_metrics_event": { "properties": { "max_value": { "description": "Maximum value reported", "type": ["number", "null"], }, "metric": { "description": "Metric name", "type": ["string", "null"], }, "min_value": { "description": "Minimum value reported", "type": ["number", "null"], }, "value": { "description": "Last value reported", "type": ["number", "null"], }, "variant": { "description": "Variant name", "type": ["string", "null"], }, }, "type": "object", }, "last_metrics_variants": { "additionalProperties": {"$ref": "#/definitions/last_metrics_event"}, "description": "Last metric events, one for each variant hash", "type": "object", }, "output": { "properties": { "destination": { "description": "Storage id. This is where output files will be stored.", "type": ["string", "null"], }, "error": { "description": "Last error text", "type": ["string", "null"], }, "model": {"description": "Model id.", "type": ["string", "null"]}, "result": { "description": "Task result. Values: 'success', 'failure'", "type": ["string", "null"], }, }, "type": "object", }, "params_item": { "properties": { "description": { "description": "The parameter description. Optional", "type": ["string", "null"], }, "name": { "description": "Name of the parameter. The combination of section and name should be unique", "type": ["string", "null"], }, "section": { "description": "Section that the parameter belongs to", "type": ["string", "null"], }, "type": { "description": "Type of the parameter. Optional", "type": ["string", "null"], }, "value": { "description": "Value of the parameter", "type": ["string", "null"], }, }, "type": "object", }, "script": { "properties": { "binary": { "default": "python", "description": "Binary to use when running the script", "type": ["string", "null"], }, "branch": { "description": "Repository branch id If not provided and tag not provided, default repository branch is used.", "type": ["string", "null"], }, "diff": { "description": "Uncommitted changes found in the repository when task was run", "type": ["string", "null"], }, "entry_point": { "description": "Path to execute within the repository", "type": ["string", "null"], }, "repository": { "description": "Name of the repository where the script is located", "type": ["string", "null"], }, "requirements": { "description": "A JSON object containing requirements strings by key", "type": ["object", "null"], }, "tag": { "description": "Repository tag", "type": ["string", "null"], }, "version_num": { "description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.", "type": ["string", "null"], }, "working_dir": { "description": "Path to the folder from which to run the script Default - root folder of repository", "type": ["string", "null"], }, }, "type": "object", }, "section_params": { "additionalProperties": {"$ref": "#/definitions/params_item"}, "description": "Task section params", "type": "object", }, "task": { "properties": { "active_duration": { "description": "Task duration time (seconds)", "type": ["integer", "null"], }, "comment": { "description": "Free text comment", "type": ["string", "null"], }, "company": { "description": "Company ID", "type": ["string", "null"], }, "completed": { "description": "Task end time (UTC)", "format": "date-time", "type": ["string", "null"], }, "configuration": { "additionalProperties": {"$ref": "#/definitions/configuration_item"}, "description": "Task configuration params", "type": ["object", "null"], }, "container": { "type": "object", "description": "Docker container parameters", "additionalProperties": {"type": ["string", "null"]}, }, "created": { "description": "Task creation time (UTC) ", "format": "date-time", "type": ["string", "null"], }, "execution": { "description": "Task execution params", "oneOf": [ {"$ref": "#/definitions/execution"}, {"type": "null"}, ], }, "hyperparams": { "additionalProperties": {"$ref": "#/definitions/section_params"}, "description": "Task hyper params per section", "type": ["object", "null"], }, "id": {"description": "Task id", "type": ["string", "null"]}, "last_change": { "description": "Last time any update was done to the task", "format": "date-time", "type": ["string", "null"], }, "last_iteration": { "description": "Last iteration reported for this task", "type": ["integer", "null"], }, "last_metrics": { "additionalProperties": {"$ref": "#/definitions/last_metrics_variants"}, "description": "Last metric variants (hash to events), one for each metric hash", "type": ["object", "null"], }, "last_update": { "description": "Last time this task was created, edited, changed or events for this task were reported", "format": "date-time", "type": ["string", "null"], }, "last_worker": { "description": "ID of last worker that handled the task", "type": ["string", "null"], }, "last_worker_report": { "description": "Last time a worker reported while working on this task", "format": "date-time", "type": ["string", "null"], }, "models": { "description": "Task models", "oneOf": [ {"$ref": "#/definitions/task_models"}, {"type": "null"}, ], }, "name": {"description": "Task Name", "type": ["string", "null"]}, "output": { "description": "Task output params", "oneOf": [{"$ref": "#/definitions/output"}, {"type": "null"}], }, "parent": { "description": "Parent task id", "type": ["string", "null"], }, "project": { "description": "Project ID of the project to which this task is assigned", "type": ["string", "null"], }, "published": { "description": "Last status change time", "format": "date-time", "type": ["string", "null"], }, "runtime": { "additionalProperties": True, "description": "Task runtime mapping", "type": ["object", "null"], }, "script": { "description": "Script info", "oneOf": [{"$ref": "#/definitions/script"}, {"type": "null"}], }, "started": { "description": "Task start time (UTC)", "format": "date-time", "type": ["string", "null"], }, "status": { "description": "", "oneOf": [ {"$ref": "#/definitions/task_status_enum"}, {"type": "null"}, ], }, "status_changed": { "description": "Last status change time", "format": "date-time", "type": ["string", "null"], }, "status_message": { "description": "free text string representing info about the status", "type": ["string", "null"], }, "status_reason": { "description": "Reason for last status change", "type": ["string", "null"], }, "system_tags": { "description": "System tags list. This field is reserved for system use, please don't use it.", "items": {"type": "string"}, "type": ["array", "null"], }, "tags": { "description": "User-defined tags list", "items": {"type": "string"}, "type": ["array", "null"], }, "type": { "description": "Type of task. Values: 'training', 'testing'", "oneOf": [ {"$ref": "#/definitions/task_type_enum"}, {"type": "null"}, ], }, "user": { "description": "Associated user id", "type": ["string", "null"], }, }, "type": "object", }, "task_model_item": { "properties": { "model": {"description": "The model ID", "type": "string"}, "name": {"description": "The task model name", "type": "string"}, }, "required": ["name", "model"], "type": "object", }, "task_models": { "properties": { "input": { "description": "The list of task input models", "items": {"$ref": "#/definitions/task_model_item"}, "type": ["array", "null"], }, "output": { "description": "The list of task output models", "items": {"$ref": "#/definitions/task_model_item"}, "type": ["array", "null"], }, }, "type": "object", }, "task_status_enum": { "enum": [ "created", "queued", "in_progress", "stopped", "published", "publishing", "closed", "failed", "completed", "unknown", ], "type": "string", }, "task_type_enum": { "enum": [ "training", "testing", "inference", "data_processing", "application", "monitor", "controller", "optimizer", "service", "qc", "custom", ], "type": "string", }, }, "properties": { "task": { "description": "Task info", "oneOf": [{"$ref": "#/definitions/task"}, {"type": "null"}], } }, "type": "object", } def __init__(self, task: Any = None, **kwargs: Any) -> None: super(GetByIdResponse, self).__init__(**kwargs) self.task = task @schema_property("task") def task(self) -> Any: return self._property_task @task.setter def task(self, value: Any) -> None: if value is None: self._property_task = None return if isinstance(value, dict): value = Task.from_dict(value) else: self.assert_isinstance(value, "task", Task) self._property_task = value
GetByIdResponse
python
Textualize__textual
docs/examples/styles/max_height.py
{ "start": 112, "end": 509 }
class ____(App): CSS_PATH = "max_height.tcss" def compose(self): yield Horizontal( Placeholder("max-height: 10w", id="p1"), Placeholder("max-height: 999", id="p2"), Placeholder("max-height: 50%", id="p3"), Placeholder("max-height: 10", id="p4"), ) if __name__ == "__main__": app = MaxHeightApp() app.run()
MaxHeightApp
python
pyodide__pyodide
src/py/pyodide/webloop.py
{ "start": 5750, "end": 35124 }
class ____(asyncio.AbstractEventLoop): """A custom event loop for use in Pyodide. Schedules tasks on the browser event loop. Does no lifecycle management and runs forever. :py:meth:`~asyncio.loop.run_forever` and :py:meth:`~asyncio.loop.run_until_complete` cannot block like a normal event loop would because we only have one thread so blocking would stall the browser event loop and prevent anything from ever happening. We defer all work to the browser event loop using the :js:func:`setTimeout` function. To ensure that this event loop doesn't stall out UI and other browser handling, we want to make sure that each task is scheduled on the browser event loop as a task not as a microtask. ``setTimeout(callback, 0)`` enqueues the callback as a task so it works well for our purposes. See the Python :external:doc:`library/asyncio-eventloop` documentation. """ def __init__(self): self._task_factory = None asyncio._set_running_loop(self) self._exception_handler = None self._current_handle = None self._in_progress = 0 self._no_in_progress_handler = None self._keyboard_interrupt_handler = None self._system_exit_handler = None # Debug mode is currently no-op (actual asyncio debug features not implemented) self._debug = sys.flags.dev_mode or ( not sys.flags.ignore_environment and bool(os.environ.get("PYTHONASYNCIODEBUG")) ) # The preserved state of async generator hooks self._old_agen_hooks: tuple[Any, Any] | None = None self._asyncgens: weakref.WeakSet[AsyncGenerator[Any, Any]] = weakref.WeakSet() self._asyncgens_shutdown_called: bool = False def get_debug(self): """Return the debug mode of the event loop.""" return self._debug def set_debug(self, enabled: bool) -> None: """Set the debug mode of the event loop.""" self._debug = enabled # # Async generator lifecycle management # def _asyncgen_firstiter_hook(self, agen: AsyncGenerator[Any, Any]) -> None: """Called when an async generator starts iteration. Tracks new async generators and issues warnings if they're created after shutdown_asyncgens() has been called. """ if self._asyncgens_shutdown_called: warnings.warn( f"asynchronous generator {agen!r} was scheduled after " f"loop.shutdown_asyncgens() call", ResourceWarning, source=self, stacklevel=2, ) self._asyncgens.add(agen) def _asyncgen_finalizer_hook(self, agen: AsyncGenerator[Any, Any]) -> None: """Called when an async generator is being finalized. Removes the generator from tracking and schedules its cleanup. """ self._asyncgens.discard(agen) # WebLoop never closes, but keep check for consistency with asyncio if not self.is_closed(): self.call_soon(asyncio.ensure_future, agen.aclose()) def _install_asyncgen_hooks(self) -> None: """Install async generator hooks if not already installed.""" if self._old_agen_hooks is not None: return self._old_agen_hooks = sys.get_asyncgen_hooks() sys.set_asyncgen_hooks( firstiter=self._asyncgen_firstiter_hook, finalizer=self._asyncgen_finalizer_hook, ) async def shutdown_asyncgens(self) -> None: """Shutdown all active async generators. This closes all tracked async generators and prevents new ones from being created. """ self._asyncgens_shutdown_called = True closing_asyncgens = list(self._asyncgens) self._asyncgens.clear() if not closing_asyncgens: return results = await asyncio.gather( *[ag.aclose() for ag in closing_asyncgens], return_exceptions=True ) for result, agen in zip(results, closing_asyncgens, strict=False): if isinstance(result, Exception): self.call_exception_handler( { "message": ( f"an error occurred during closing of " f"asynchronous generator {agen!r}" ), "exception": result, "asyncgen": agen, } ) async def shutdown_default_executor(self): """Schedule the shutdown of the default executor. This is a no-op since WebLoop doesn't use thread executors. """ pass # # Lifecycle methods: We ignore all lifecycle management # def is_running(self) -> bool: """Returns ``True`` if the event loop is running. Always returns ``True`` because WebLoop has no lifecycle management. """ return True def is_closed(self) -> bool: """Returns ``True`` if the event loop was closed. Always returns ``False`` because WebLoop has no lifecycle management. """ return False def close(self) -> None: """Ignore request to close WebLoop""" pass def _check_closed(self): """Used in create_task. Would raise an error if ``self.is_closed()``, but we are skipping all lifecycle stuff. """ pass def run_forever(self): """Run the event loop forever. Does nothing in this implementation. We cannot block like a normal event loop would because we only have one thread so blocking would stall the browser event loop and prevent anything from ever happening. """ pass def run_until_complete(self, future): """Run until future is done. If the argument is a coroutine, it is wrapped in a Task. The native event loop `run_until_complete` blocks until evaluation of the future is complete and then returns the result of the future. Since we cannot block, we just ensure that the future is scheduled and return the future. This makes this method a bit useless. Instead, use `future.add_done_callback(do_something_with_result)` or: ```python async def wrapper(): result = await future do_something_with_result(result) ``` """ from pyodide_js._api import config if config.enableRunUntilComplete: self._install_asyncgen_hooks() return run_sync(future) return asyncio.ensure_future(future) def stop(self): """Stop the event loop as soon as reasonable. This is a no-op in WebLoop since it runs forever on the browser event loop. """ pass # # Scheduling methods: use browser.setTimeout to schedule tasks on the browser event loop. # def _timer_handle_cancelled(self, handle): """Notification that a TimerHandle has been cancelled. This is a no-op since we use browser setTimeout which handles cancellation automatically. """ pass def call_soon( # type: ignore[override] self, callback: Callable[..., Any], *args: Any, context: contextvars.Context | None = None, ) -> asyncio.Handle: """Arrange for a callback to be called as soon as possible. Any positional arguments after the callback will be passed to the callback when it is called. This schedules the callback on the browser event loop using ``setTimeout(callback, 0)``. """ delay = 0 return self.call_later(delay, callback, *args, context=context) def call_soon_threadsafe( # type: ignore[override] self, callback: Callable[..., Any], *args: Any, context: contextvars.Context | None = None, ) -> asyncio.Handle: """Like ``call_soon()``, but thread-safe. We have no threads so everything is "thread safe", and we just use ``call_soon``. """ return self.call_soon(callback, *args, context=context) def call_later( # type: ignore[override] self, delay: float, callback: Callable[..., Any], *args: Any, context: contextvars.Context | None = None, ) -> asyncio.Handle: """Arrange for a callback to be called at a given time. Return a Handle: an opaque object with a cancel() method that can be used to cancel the call. The delay can be an int or float, expressed in seconds. It is always relative to the current time. Each callback will be called exactly once. If two callbacks are scheduled for exactly the same time, it undefined which will be called first. Any positional arguments after the callback will be passed to the callback when it is called. This uses `setTimeout(callback, delay)` """ if delay < 0: raise ValueError("Can't schedule in the past") h = asyncio.Handle(callback, args, self, context=context) def run_handle(): self._install_asyncgen_hooks() if h.cancelled(): return try: h._run() except SystemExit as e: if self._system_exit_handler: self._system_exit_handler(e.code) else: raise except KeyboardInterrupt: if self._keyboard_interrupt_handler: self._keyboard_interrupt_handler() else: raise scheduleCallback( create_once_callable(run_handle, _may_syncify=True), delay * 1000 ) return h def _decrement_in_progress(self, fut=None): if ( fut and getattr(fut, "_num_done_callbacks", None) == 1 and not fut.cancelled() and (exc := fut.exception()) ): # Only callback is this one, let's say it's an unhandled exception self.call_exception_handler({"exception": exc}) self._in_progress -= 1 if self._no_in_progress_handler and self._in_progress == 0: self._no_in_progress_handler() def call_at( # type: ignore[override] self, when: float, callback: Callable[..., Any], *args: Any, context: contextvars.Context | None = None, ) -> asyncio.Handle: """Like ``call_later()``, but uses an absolute time. Absolute time corresponds to the event loop's ``time()`` method. This uses ``setTimeout(callback, when - cur_time)`` """ cur_time = self.time() delay = when - cur_time return self.call_later(delay, callback, *args, context=context) def run_in_executor(self, executor, func, *args): # type: ignore[override] """Arrange for func to be called in the specified executor. This is normally supposed to run func(*args) in a separate process or thread and signal back to our event loop when it is done. It's possible to make the executor, but if we actually try to submit any functions to it, it will try to create a thread and throw an error. Best we can do is to run func(args) in this thread and stick the result into a future. """ fut = self.create_future() try: fut.set_result(func(*args)) except BaseException as e: fut.set_exception(e) return fut def set_default_executor(self, executor): """Set the default executor. This is a no-op since WebLoop doesn't use thread executors. All functions are executed in the main thread via run_in_executor. """ pass def create_future(self) -> asyncio.Future[Any]: """Create a Future object attached to the loop.""" self._in_progress += 1 fut: PyodideFuture[Any] = PyodideFuture(loop=self) fut.add_done_callback(self._decrement_in_progress) return fut # # The remaining methods are copied directly from BaseEventLoop # def time(self) -> float: """Return the time according to the event loop's clock. This is a float expressed in seconds since an epoch, but the epoch, precision, accuracy and drift are unspecified and may differ per event loop. Copied from ``BaseEventLoop.time`` """ return time.monotonic() def create_task( self, coro: Coroutine[T, Any, Any], *, name: str | None = None, context: contextvars.Context | None = None, ) -> Task[T]: """Schedule a coroutine object. Return a task object. Copied from ``BaseEventLoop.create_task`` """ self._check_closed() if self._task_factory is None: task: PyodideTask[T] = PyodideTask( coro, loop=self, name=name, context=context ) if task._source_traceback: # type: ignore[attr-defined] # Added comment: # this only happens if get_debug() returns True. # In that case, remove create_task from _source_traceback. del task._source_traceback[-1] # type: ignore[attr-defined] else: task = self._task_factory(self, coro) asyncio.tasks._set_task_name(task, name) # type: ignore[attr-defined] self._in_progress += 1 task.add_done_callback(self._decrement_in_progress) try: return task finally: # gh-128552: prevent a refcycle of # task.exception().__traceback__->BaseEventLoop.create_task->task del task def set_task_factory(self, factory): """Set a task factory that will be used by loop.create_task(). If factory is None the default task factory will be set. If factory is a callable, it should have a signature matching '(loop, coro)', where 'loop' will be a reference to the active event loop, 'coro' will be a coroutine object. The callable must return a Future. Copied from ``BaseEventLoop.set_task_factory`` """ if factory is not None and not callable(factory): raise TypeError("task factory must be a callable or None") self._task_factory = factory def get_task_factory(self): """Return a task factory, or None if the default one is in use. Copied from ``BaseEventLoop.get_task_factory`` """ return self._task_factory def get_exception_handler(self): """Return an exception handler, or None if the default one is in use.""" return self._exception_handler def set_exception_handler(self, handler): """Set handler as the new event loop exception handler. If handler is None, the default exception handler will be set. If handler is a callable object, it should have a signature matching '(loop, context)', where 'loop' will be a reference to the active event loop, 'context' will be a dict object (see `call_exception_handler()` documentation for details about context). """ if handler is not None and not callable(handler): raise TypeError(f"A callable object or None is expected, got {handler!r}") self._exception_handler = handler def default_exception_handler(self, context): """Default exception handler. This is called when an exception occurs and no exception handler is set, and can be called by a custom exception handler that wants to defer to the default behavior. This default handler logs the error message and other context-dependent information. In debug mode, a truncated stack trace is also appended showing where the given object (e.g. a handle or future or task) was created, if any. The context parameter has the same meaning as in `call_exception_handler()`. """ message = context.get("message") if not message: message = "Unhandled exception in event loop" if ( "source_traceback" not in context and self._current_handle is not None and self._current_handle._source_traceback ): context["handle_traceback"] = self._current_handle._source_traceback log_lines = [message] for key in sorted(context): if key in {"message", "exception"}: continue value = context[key] if key == "source_traceback": tb = "".join(traceback.format_list(value)) value = "Object created at (most recent call last):\n" value += tb.rstrip() elif key == "handle_traceback": tb = "".join(traceback.format_list(value)) value = "Handle created at (most recent call last):\n" value += tb.rstrip() else: value = repr(value) log_lines.append(f"{key}: {value}") if exception := context.get("exception"): log_lines += traceback.format_exception(exception) print("\n".join(log_lines), file=sys.stderr) def call_exception_handler(self, context): """Call the current event loop's exception handler. The context argument is a dict containing the following keys: - 'message': Error message; - 'exception' (optional): Exception object; - 'future' (optional): Future instance; - 'task' (optional): Task instance; - 'handle' (optional): Handle instance; - 'protocol' (optional): Protocol instance; - 'transport' (optional): Transport instance; - 'socket' (optional): Socket instance; - 'asyncgen' (optional): Asynchronous generator that caused the exception. New keys maybe introduced in the future. Note: do not overload this method in an event loop subclass. For custom exception handling, use the `set_exception_handler()` method. """ if self._exception_handler is None: try: self.default_exception_handler(context) except (SystemExit, KeyboardInterrupt): raise except BaseException: # Second protection layer for unexpected errors # in the default implementation, as well as for subclassed # event loops with overloaded "default_exception_handler". print("Exception in default exception handler", file=sys.stderr) traceback.print_exc() else: try: self._exception_handler(self, context) except (SystemExit, KeyboardInterrupt): raise except BaseException as exc: # Exception in the user set custom exception handler. try: # Let's try default handler. self.default_exception_handler( { "message": "Unhandled error in exception handler", "exception": exc, "context": context, } ) except (SystemExit, KeyboardInterrupt): raise except BaseException: # Guard 'default_exception_handler' in case it is # overloaded. print( "Exception in default exception handler " "while handling an unexpected error " "in custom exception handler", file=sys.stderr, ) traceback.print_exc() # # File descriptor readiness methods - Not available in browser environments # def add_reader(self, fd, callback, *args): # type: ignore[override] """Register a reader callback for a file descriptor (unsupported on WebLoop).""" raise NotImplementedError( "add_reader() is not available in browser environments due to lack of POSIX file descriptors." ) def add_writer(self, fd, callback, *args): # type: ignore[override] """Register a writer callback for a file descriptor (unsupported on WebLoop).""" raise NotImplementedError( "add_writer() is not available in browser environments due to lack of POSIX file descriptors." ) def remove_reader(self, fd): """Remove a reader callback for a file descriptor (unsupported on WebLoop).""" raise NotImplementedError( "remove_reader() is not available in browser environments due to lack of POSIX file descriptors." ) def remove_writer(self, fd): """Remove a writer callback for a file descriptor (unsupported on WebLoop).""" raise NotImplementedError( "remove_writer() is not available in browser environments due to lack of POSIX file descriptors." ) # # Pipes & zero-copy file transfer methods — not available in browser environments # async def connect_read_pipe(self, protocol_factory, pipe): """Connect a read pipe to the event loop (unsupported on WebLoop).""" raise NotImplementedError( "connect_read_pipe() is not available in browser environments due to absence of OS pipes." ) async def connect_write_pipe(self, protocol_factory, pipe): """Connect a write pipe to the event loop (unsupported on WebLoop).""" raise NotImplementedError( "connect_write_pipe() is not available in browser environments due to absence of OS pipes." ) async def sendfile(self, transport, file, offset=0, count=None, *, fallback=True): """Send a file over a transport (unsupported on WebLoop).""" raise NotImplementedError( "sendfile() is not available in browser environments due to missing OS file descriptors and zero-copy facilities." ) # # High-level networking (TCP/UDP/DNS/TLS) methods — not available in browser environments # async def getaddrinfo(self, host, port, *, family=0, type=0, proto=0, flags=0): """Asynchronous version of socket.getaddrinfo() (unsupported on WebLoop).""" raise NotImplementedError( "getaddrinfo() is not available in browser environments due to restricted raw network access." ) async def getnameinfo(self, sockaddr, flags=0): """Asynchronous version of socket.getnameinfo() (unsupported on WebLoop).""" raise NotImplementedError( "getnameinfo() is not available in browser environments due to restricted raw network access." ) async def create_connection(self, protocol_factory, host=None, port=None, **kwargs): """Open a streaming transport connection to a given address (unsupported on WebLoop).""" raise NotImplementedError( "create_connection() is not available in browser environments due to restricted raw socket access." ) async def create_server(self, protocol_factory, host=None, port=None, **kwargs): """Create a TCP server (unsupported on WebLoop).""" raise NotImplementedError( "create_server() is not available in browser environments due to restricted raw socket access." ) async def create_unix_connection(self, protocol_factory, path=None, **kwargs): """Open a connection to a UNIX domain socket (unsupported on WebLoop).""" raise NotImplementedError( "create_unix_connection() is not available in browser environments due to absence of Unix domain sockets." ) async def create_unix_server(self, protocol_factory, path=None, **kwargs): """Create a UNIX domain socket server (unsupported on WebLoop).""" raise NotImplementedError( "create_unix_server() is not available in browser environments due to absence of Unix domain sockets." ) async def connect_accepted_socket(self, protocol_factory, sock, **kwargs): """Wrap an already accepted socket into a transport and protocol pair (unsupported on WebLoop).""" raise NotImplementedError( "connect_accepted_socket() is not available in browser environments due to restricted raw socket access." ) async def create_datagram_endpoint(self, protocol_factory, **kwargs): # type: ignore[override] """Create a datagram (UDP) connection (unsupported on WebLoop).""" raise NotImplementedError( "create_datagram_endpoint() is not available in browser environments due to restricted raw socket access." ) async def start_tls(self, transport, protocol, sslcontext, **kwargs): """Upgrade an existing connection to TLS (unsupported on WebLoop).""" raise NotImplementedError( "start_tls() is not available in browser environments due to lack of low-level TLS controls." ) # # Low-level socket operations methods — not available in browser environments # async def sock_recv(self, sock, nbytes): """Receive up to nbytes (unsupported on WebLoop).""" raise NotImplementedError( "sock_recv() is not available in browser environments due to restricted raw socket access." ) async def sock_recv_into(self, sock, buf): """Receive into buf (unsupported on WebLoop).""" raise NotImplementedError( "sock_recv_into() is not available in browser environments due to restricted raw socket access." ) async def sock_recvfrom(self, sock, bufsize): """Receive a datagram up to bufsize (unsupported on WebLoop).""" raise NotImplementedError( "sock_recvfrom() is not available in browser environments due to restricted raw socket access." ) async def sock_recvfrom_into(self, sock, buf, nbytes=0): """Receive a datagram into buf (unsupported on WebLoop).""" raise NotImplementedError( "sock_recvfrom_into() is not available in browser environments due to restricted raw socket access." ) async def sock_sendall(self, sock, data): """Send all data to the socket (unsupported on WebLoop).""" raise NotImplementedError( "sock_sendall() is not available in browser environments due to restricted raw socket access." ) async def sock_sendto(self, sock, data, address): """Send a datagram to address (unsupported on WebLoop).""" raise NotImplementedError( "sock_sendto() is not available in browser environments due to restricted raw socket access." ) async def sock_connect(self, sock, address): """Connect the socket to a remote address (unsupported on WebLoop).""" raise NotImplementedError( "sock_connect() is not available in browser environments due to restricted raw socket access." ) async def sock_accept(self, sock): """Accept a connection (unsupported on WebLoop).""" raise NotImplementedError( "sock_accept() is not available in browser environments due to restricted raw socket access." ) async def sock_sendfile(self, sock, file, offset=0, count=None, *, fallback=None): """Send a file (uses os.sendfile if possible) (unsupported on WebLoop).""" raise NotImplementedError( "sock_sendfile() is not available in browser environments due to missing OS file descriptors and zero-copy facilities." ) # # Subprocess methods — not available in browser environments # async def subprocess_shell(self, protocol_factory, cmd, **kwargs): """Create a subprocess from string args (unsupported on WebLoop).""" raise NotImplementedError( "subprocess_shell() is not available in browser environments due to absence of OS process APIs." ) async def subprocess_exec(self, protocol_factory, *args, **kwargs): """Run a subprocess from a shell command line (unsupported on WebLoop).""" raise NotImplementedError( "subprocess_exec() is not available in browser environments due to absence of OS process APIs." ) # # Signals methods — not available in browser environments # def add_signal_handler(self, sig, callback, *args): # type: ignore[override] """Set callback as the handler for the given signal (unsupported on WebLoop).""" raise NotImplementedError( "add_signal_handler() is not available in browser environments due to lack of POSIX signals." ) def remove_signal_handler(self, sig): """Remove the handler for the given signal (unsupported on WebLoop).""" raise NotImplementedError( "remove_signal_handler() is not available in browser environments due to lack of POSIX signals." )
WebLoop
python
huggingface__transformers
src/transformers/models/speecht5/modeling_speecht5.py
{ "start": 132131, "end": 134333 }
class ____(nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5), leaky_relu_slope=0.1): super().__init__() self.leaky_relu_slope = leaky_relu_slope self.convs1 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, stride=1, dilation=dilation[i], padding=self.get_padding(kernel_size, dilation[i]), ) for i in range(len(dilation)) ] ) self.convs2 = nn.ModuleList( [ nn.Conv1d( channels, channels, kernel_size, stride=1, dilation=1, padding=self.get_padding(kernel_size, 1), ) for _ in range(len(dilation)) ] ) def get_padding(self, kernel_size, dilation=1): return (kernel_size * dilation - dilation) // 2 def apply_weight_norm(self): weight_norm = nn.utils.weight_norm if hasattr(nn.utils.parametrizations, "weight_norm"): weight_norm = nn.utils.parametrizations.weight_norm for layer in self.convs1: weight_norm(layer) for layer in self.convs2: weight_norm(layer) def remove_weight_norm(self): for layer in self.convs1: nn.utils.remove_weight_norm(layer) for layer in self.convs2: nn.utils.remove_weight_norm(layer) def forward(self, hidden_states): for conv1, conv2 in zip(self.convs1, self.convs2): residual = hidden_states hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = conv1(hidden_states) hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope) hidden_states = conv2(hidden_states) hidden_states = hidden_states + residual return hidden_states @auto_docstring( custom_intro=""" HiFi-GAN vocoder. """ )
HifiGanResidualBlock
python
apache__airflow
airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_connections.py
{ "start": 37712, "end": 51541 }
class ____(TestConnectionEndpoint): @pytest.mark.parametrize( ("actions", "expected_results"), [ pytest.param( { "actions": [ { "action": "create", "entities": [ { "connection_id": "NOT_EXISTING_CONN_ID", "conn_type": "NOT_EXISTING_CONN_TYPE", } ], "action_on_existence": "skip", } ] }, { "create": { "success": ["NOT_EXISTING_CONN_ID"], "errors": [], } }, id="test_successful_create", ), pytest.param( { "actions": [ { "action": "create", "entities": [ { "connection_id": TEST_CONN_ID, "conn_type": TEST_CONN_TYPE, }, { "connection_id": "NOT_EXISTING_CONN_ID", "conn_type": "NOT_EXISTING_CONN_TYPE", }, ], "action_on_existence": "skip", } ] }, { "create": { "success": ["NOT_EXISTING_CONN_ID"], "errors": [], } }, id="test_successful_create_with_skip", ), pytest.param( { "actions": [ { "action": "create", "entities": [ { "connection_id": TEST_CONN_ID, "conn_type": TEST_CONN_TYPE, "description": "new_description", } ], "action_on_existence": "overwrite", } ] }, { "create": { "success": [TEST_CONN_ID], "errors": [], } }, id="test_create_with_overwrite", ), pytest.param( { "actions": [ { "action": "create", "entities": [ { "connection_id": TEST_CONN_ID, "conn_type": TEST_CONN_TYPE, "description": TEST_CONN_DESCRIPTION, "host": TEST_CONN_HOST, "port": TEST_CONN_PORT, "login": TEST_CONN_LOGIN, }, ], "action_on_existence": "fail", } ] }, { "create": { "success": [], "errors": [ { "error": "The connections with these connection_ids: {'test_connection_id'} already exist.", "status_code": 409, }, ], } }, id="test_create_conflict", ), pytest.param( { "actions": [ { "action": "update", "entities": [ { "connection_id": TEST_CONN_ID, "conn_type": TEST_CONN_TYPE, "description": "new_description", } ], "action_on_non_existence": "skip", } ] }, { "update": { "success": [TEST_CONN_ID], "errors": [], } }, id="test_successful_update", ), pytest.param( { "actions": [ { "action": "update", "entities": [ { "connection_id": "NOT_EXISTING_CONN_ID", "conn_type": "NOT_EXISTING_CONN_TYPE", } ], "action_on_non_existence": "skip", } ] }, { "update": { "success": [], "errors": [], } }, id="test_update_with_skip", ), pytest.param( { "actions": [ { "action": "update", "entities": [ { "connection_id": "NOT_EXISTING_CONN_ID", "conn_type": "NOT_EXISTING_CONN_TYPE", } ], "action_on_non_existence": "fail", } ] }, { "update": { "success": [], "errors": [ { "error": "The connections with these connection_ids: {'NOT_EXISTING_CONN_ID'} were not found.", "status_code": 404, } ], } }, id="test_update_with_fail", ), pytest.param( { "actions": [ { "action": "update", "entities": [ { "connection_id": TEST_CONN_ID, "conn_type": TEST_CONN_TYPE, "description": "updated_description", } ], "update_mask": ["description"], "action_on_non_existence": "fail", } ] }, {"update": {"success": [TEST_CONN_ID], "errors": []}}, id="test_connection_update_with_valid_update_mask", ), pytest.param( { "actions": [ { "action": "delete", "entities": [TEST_CONN_ID], } ] }, { "delete": { "success": [TEST_CONN_ID], "errors": [], } }, id="test_successful_delete", ), pytest.param( { "actions": [ { "action": "delete", "entities": ["NOT_EXISTING_CONN_ID"], "action_on_non_existence": "skip", } ] }, { "delete": { "success": [], "errors": [], } }, id="test_delete_with_skip", ), pytest.param( { "actions": [ { "action": "delete", "entities": ["NOT_EXISTING_CONN_ID"], "action_on_non_existence": "fail", } ] }, { "delete": { "success": [], "errors": [ { "error": "The connections with these connection_ids: {'NOT_EXISTING_CONN_ID'} were not found.", "status_code": 404, } ], } }, id="test_delete_not_found", ), pytest.param( { "actions": [ { "action": "create", "entities": [ { "connection_id": "NOT_EXISTING_CONN_ID", "conn_type": "NOT_EXISTING_CONN_TYPE", } ], "action_on_existence": "skip", }, { "action": "update", "entities": [ { "connection_id": TEST_CONN_ID, "conn_type": TEST_CONN_TYPE, "description": "new_description", } ], "action_on_non_existence": "skip", }, { "action": "delete", "entities": [TEST_CONN_ID], "action_on_non_existence": "skip", }, ] }, { "create": { "success": ["NOT_EXISTING_CONN_ID"], "errors": [], }, "update": { "success": [TEST_CONN_ID], "errors": [], }, "delete": { "success": [TEST_CONN_ID], "errors": [], }, }, id="test_create_update_delete", ), pytest.param( { "actions": [ { "action": "update", "entities": [ { "connection_id": TEST_CONN_ID, "conn_type": TEST_CONN_TYPE, "description": "updated_description", } ], "update_mask": ["description"], "action_on_non_existence": "fail", }, { "action": "delete", "entities": [TEST_CONN_ID], "action_on_non_existence": "fail", }, ] }, { "update": {"success": [TEST_CONN_ID], "errors": []}, "delete": {"success": [TEST_CONN_ID], "errors": []}, }, id="test_connection_create_update_delete_with_update_mask", ), ], ) def test_bulk_connections(self, test_client, actions, expected_results, session): self.create_connections() response = test_client.patch("/connections", json=actions) response_data = response.json() for connection_id, value in expected_results.items(): assert response_data[connection_id] == value _check_last_log(session, dag_id=None, event="bulk_connections", logical_date=None) def test_should_respond_401(self, unauthenticated_test_client): response = unauthenticated_test_client.patch("/connections", json={}) assert response.status_code == 401 def test_should_respond_403(self, unauthorized_test_client): response = unauthorized_test_client.patch( "/connections", json={ "actions": [ { "action": "create", "entities": [ {"connection_id": "test1", "conn_type": "test1"}, ], }, ] }, ) assert response.status_code == 403
TestBulkConnections
python
django-compressor__django-compressor
compressor/tests/test_offline.py
{ "start": 17188, "end": 17738 }
class ____(OfflineTestCaseMixin, TestCase): templates_dir = "test_with_context" expected_hash = ["8b4a7452e1c5", "55b3123e884c", "bfc63829cc58"] additional_test_settings = { "COMPRESS_OFFLINE_CONTEXT": list(offline_context_generator()) } def _prepare_contexts(self, engine): if engine == "django": return [Context(c) for c in settings.COMPRESS_OFFLINE_CONTEXT] if engine == "jinja2": return settings.COMPRESS_OFFLINE_CONTEXT return None
OfflineCompressTestCaseWithContextList
python
pytorch__pytorch
torch/utils/flop_counter.py
{ "start": 24452, "end": 29476 }
class ____: """ ``FlopCounterMode`` is a context manager that counts the number of flops within its context. It does this using a ``TorchDispatchMode``. It also supports hierarchical output by passing a module (or list of modules) to FlopCounterMode on construction. If you do not need hierarchical output, you do not need to use it with a module. Example usage .. code-block:: python mod = ... with FlopCounterMode(mod) as flop_counter: mod.sum().backward() """ def __init__( self, mods: torch.nn.Module | list[torch.nn.Module] | None = None, depth: int = 2, display: bool = True, custom_mapping: dict[Any, Any] | None = None) -> None: super().__init__() self.flop_counts: dict[str, dict[Any, int]] = defaultdict(lambda: defaultdict(int)) self.depth = depth self.display = display self.mode: _FlopCounterMode | None = None if custom_mapping is None: custom_mapping = {} if mods is not None: warnings.warn("mods argument is not needed anymore, you can stop passing it", stacklevel=2) self.flop_registry = { **flop_registry, **{k: v if getattr(v, "_get_raw", False) else shape_wrapper(v) for k, v in custom_mapping.items()} } self.mod_tracker = ModuleTracker() def get_total_flops(self) -> int: return sum(self.flop_counts['Global'].values()) def get_flop_counts(self) -> dict[str, dict[Any, int]]: """Return the flop counts as a dictionary of dictionaries. The outer dictionary is keyed by module name, and the inner dictionary is keyed by operation name. Returns: Dict[str, Dict[Any, int]]: The flop counts as a dictionary. """ return {k: dict(v) for k, v in self.flop_counts.items()} def get_table(self, depth=None): if depth is None: depth = self.depth if depth is None: depth = 999999 import tabulate tabulate.PRESERVE_WHITESPACE = True header = ["Module", "FLOP", "% Total"] values = [] global_flops = self.get_total_flops() global_suffix = get_suffix_str(global_flops) is_global_subsumed = False def process_mod(mod_name, depth): nonlocal is_global_subsumed total_flops = sum(self.flop_counts[mod_name].values()) is_global_subsumed |= total_flops >= global_flops padding = " " * depth values = [] values.append([ padding + mod_name, convert_num_with_suffix(total_flops, global_suffix), convert_to_percent_str(total_flops, global_flops) ]) for k, v in self.flop_counts[mod_name].items(): values.append([ padding + " - " + str(k), convert_num_with_suffix(v, global_suffix), convert_to_percent_str(v, global_flops) ]) return values for mod in sorted(self.flop_counts.keys()): if mod == 'Global': continue mod_depth = mod.count(".") + 1 if mod_depth > depth: continue cur_values = process_mod(mod, mod_depth - 1) values.extend(cur_values) # We do a bit of messing around here to only output the "Global" value # if there are any FLOPs in there that aren't already fully contained by # a module. if 'Global' in self.flop_counts and not is_global_subsumed: for value in values: value[0] = " " + value[0] values = process_mod('Global', 0) + values if len(values) == 0: values = [["Global", "0", "0%"]] return tabulate.tabulate(values, headers=header, colalign=("left", "right", "right")) # NB: This context manager is NOT reentrant def __enter__(self): self.flop_counts.clear() self.mod_tracker.__enter__() self.mode = _FlopCounterMode(self) self.mode.__enter__() return self def __exit__(self, *args): if self.mode is None: raise AssertionError("Internal error: FlopCounter.__exit__ called but mode is None") b = self.mode.__exit__(*args) self.mode = None # break cycles self.mod_tracker.__exit__() if self.display: print(self.get_table(self.depth)) return b def _count_flops(self, func_packet, out, args, kwargs): if func_packet in self.flop_registry: flop_count_func = self.flop_registry[func_packet] flop_count = flop_count_func(*args, **kwargs, out_val=out) # type: ignore[operator] for par in set(self.mod_tracker.parents): self.flop_counts[par][func_packet] += flop_count return out
FlopCounterMode
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 38769, "end": 39090 }
class ____(sgqlc.types.Enum): """ See source code for more info. """ __schema__ = graphql_schema __choices__ = ( "CANCELLED_SPONSORSHIP", "NEW_SPONSORSHIP", "PENDING_CHANGE", "REFUND", "SPONSOR_MATCH_DISABLED", "TIER_CHANGE", )
SponsorsActivityAction
python
pytorch__pytorch
torch/_dynamo/variables/distributed.py
{ "start": 6074, "end": 9963 }
class ____(DistributedVariable): @staticmethod def is_placement(value: object) -> bool: # we can't rely on importing/accessing torch distributed, it is not always built. if not DistributedVariable.is_available(): return False from torch.distributed.tensor.placement_types import Placement return isinstance(value, Placement) def as_python_constant(self) -> Any: return self.value def var_getattr(self, tx: "InstructionTranslator", name: str) -> VariableTracker: if name == "dim": return ConstantVariable.create(self.value.dim) return super().var_getattr(tx, name) def call_method( self, tx: "InstructionTranslator", name: str, args: Sequence[VariableTracker], kwargs: dict[str, VariableTracker], ) -> VariableTracker: from . import ConstantVariable # Placement types dynamo tracking only allows following methods # and __setattr__ is for case like `Shard(dim)` and methods. # Methods in the list must satisfy: # 1. Input arguments are constants and do not need to be guarded on; # 2. Output is constant with respect to their inputs constant_fold_functions = [ "__init__", "__setattr__", "is_shard", "is_partial", "is_replicate", ] if name in constant_fold_functions: try: value_type = type(self.value) if inspect.getattr_static(value_type, "__getattr__", None) is not None: unimplemented( gb_type="Placement with custom __getattr__ not supported", context=f"{value_type.__name__} with custom __getattr__", explanation="Dynamo does not support Placement types with custom __getattr__ methods", hints=[ "Use Placement types without custom __getattr__ methods", "Move the Placement usage outside the compiled region", ], ) method = inspect.getattr_static(value_type, name) except AttributeError: method = None if method is object.__init__: return ConstantVariable.create(None) args = [x.as_python_constant() for x in args] kwargs = {k: v.as_python_constant() for k, v in kwargs.items()} assert method is not None if name == "__setattr__": method(self.value, *args, **kwargs) return self constant_val = method(self.value, *args, **kwargs) return ConstantVariable.create(constant_val) return super().call_method(tx, name, args, kwargs) # type: ignore[arg-type] def reconstruct(self, codegen: "PyCodegen") -> None: # Reconstruct the Placement object by calling its constructor # e.g., Shard(0), Replicate(), Partial() from torch.distributed.tensor.placement_types import Partial, Replicate, Shard placement_type = type(self.value) # Load the placement class codegen.add_push_null( lambda: codegen.load_import_from( "torch.distributed.tensor.placement_types", placement_type.__name__ ) ) # For Shard, we need to pass the dim argument if isinstance(self.value, Shard): codegen(ConstantVariable.create(self.value.dim)) codegen.extend_output(create_call_function(1, False)) # Replicate and Partial have no required args elif istype(self.value, (Replicate, Partial)): codegen.extend_output(create_call_function(0, False)) else: super().reconstruct(codegen)
PlacementVariable
python
apache__airflow
providers/databricks/tests/unit/databricks/hooks/test_databricks.py
{ "start": 58426, "end": 60062 }
class ____: """ Tests for DatabricksHook when auth is done with AAD token for SP as user inside workspace. """ @pytest.fixture(autouse=True) def setup_connections(self, create_connection_without_db): create_connection_without_db( Connection( conn_id=DEFAULT_CONN_ID, conn_type="databricks", host=None, login="9ff815a6-4404-4ab8-85cb-cd0e6f879c1d", password="secret", extra=json.dumps( { "azure_tenant_id": "3ff810a6-5504-4ab8-85cb-cd0e6f879c1d", } ), ) ) self.hook = DatabricksHook(retry_args=DEFAULT_RETRY_ARGS) @mock.patch("airflow.providers.databricks.hooks.databricks_base.requests") @mock.patch.object(azure.identity, "ClientSecretCredential") def test_submit_run(self, mock_azure_identity, mock_requests): mock_requests.codes.ok = 200 mock_requests.post.side_effect = [create_successful_response_mock({"run_id": "1"})] mock_azure_identity().get_token.return_value = create_aad_token_for_resource() status_code_mock = mock.PropertyMock(return_value=200) type(mock_requests.post.return_value).status_code = status_code_mock data = {"notebook_task": NOTEBOOK_TASK, "new_cluster": NEW_CLUSTER} run_id = self.hook.submit_run(data) assert run_id == "1" args = mock_requests.post.call_args kwargs = args[1] assert kwargs["auth"].token == TOKEN @pytest.mark.db_test
TestDatabricksHookAadToken
python
wandb__wandb
wandb/sdk/integration_utils/auto_logging.py
{ "start": 388, "end": 596 }
class ____(Protocol[K, V]): def __getitem__(self, key: K) -> V: ... # pragma: no cover def get( self, key: K, default: Optional[V] = None ) -> Optional[V]: ... # pragma: no cover
Response
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/decorators/decorator_assets_definition_builder.py
{ "start": 8667, "end": 9718 }
class ____(NamedTuple): output_name: str output: Out def make_keys_by_output_name( asset_outs: Mapping[AssetKey, tuple[str, Out]], ) -> Mapping[str, AssetKey]: return {output_name: asset_key for asset_key, (output_name, _) in asset_outs.items()} def compute_required_resource_keys( required_resource_keys: AbstractSet[str], resource_defs: Mapping[str, ResourceDefinition], fn: Callable[..., Any], decorator_name: str, ) -> AbstractSet[str]: bare_required_resource_keys = set(required_resource_keys) resource_defs_keys = set(resource_defs.keys()) required_resource_keys = bare_required_resource_keys | resource_defs_keys arg_resource_keys = {arg.name for arg in get_resource_args(fn)} check.param_invariant( len(bare_required_resource_keys or []) == 0 or len(arg_resource_keys) == 0, f"Cannot specify resource requirements in both {decorator_name} decorator and as" " arguments to the decorated function", ) return required_resource_keys - arg_resource_keys
NamedOut
python
astropy__astropy
astropy/coordinates/representation/geodetic.py
{ "start": 6544, "end": 6735 }
class ____(BaseGeodeticRepresentation): """Representation of points in WGS72 3D geodetic coordinates.""" _ellipsoid = "WGS72" @format_doc(geodetic_base_doc)
WGS72GeodeticRepresentation
python
django__django
tests/m2m_regress/models.py
{ "start": 1212, "end": 1346 }
class ____(SelfRefer): pass # Many-to-Many relation between models, where one of the PK's isn't an # Autofield
SelfReferChildSibling
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_format09.py
{ "start": 315, "end": 1675 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_format09.xlsx") def test_create_file(self): """Test the creation of an XlsxWriter file with chart formatting.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "line"}) chart.axis_ids = [46115072, 46157184] data = [ [1, 2, 3, 4, 5], [2, 4, 6, 8, 10], [3, 6, 9, 12, 15], ] worksheet.write_column("A1", data[0]) worksheet.write_column("B1", data[1]) worksheet.write_column("C1", data[2]) chart.add_series( { "categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$B$1:$B$5", "line": { "color": "red", "width": 1.25, "dash_type": "square_dot", }, } ) chart.add_series( { "categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$C$1:$C$5", } ) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
sympy__sympy
sympy/integrals/manualintegrate.py
{ "start": 12289, "end": 12660 }
class ____(Rule): subfunctions: Sequence[tuple[Rule, bool | Boolean]] def eval(self) -> Expr: return Piecewise(*[(substep.eval(), cond) for substep, cond in self.subfunctions]) def contains_dont_know(self) -> bool: return any(substep.contains_dont_know() for substep, _ in self.subfunctions) @dataclass
PiecewiseRule
python
jd__tenacity
tenacity/__init__.py
{ "start": 5741, "end": 6470 }
class ____: """Manage attempt context.""" def __init__(self, retry_state: "RetryCallState"): self.retry_state = retry_state def __enter__(self) -> None: pass def __exit__( self, exc_type: t.Optional[t.Type[BaseException]], exc_value: t.Optional[BaseException], traceback: t.Optional["types.TracebackType"], ) -> t.Optional[bool]: if exc_type is not None and exc_value is not None: self.retry_state.set_exception((exc_type, exc_value, traceback)) return True # Swallow exception. else: # We don't have the result, actually. self.retry_state.set_result(None) return None
AttemptManager
python
joke2k__faker
faker/providers/phone_number/pl_PL/__init__.py
{ "start": 49, "end": 895 }
class ____(PhoneNumberProvider): formats = ( # Mobile # Government website: http://www.uke.gov.pl/numeracja-843 "50# ### ###", "51# ### ###", "53# ### ###", "57# ### ###", "60# ### ###", "66# ### ###", "69# ### ###", "72# ### ###", "73# ### ###", "78# ### ###", "79# ### ###", "88# ### ###", "+48 50# ### ###", "+48 51# ### ###", "+48 53# ### ###", "+48 57# ### ###", "+48 60# ### ###", "+48 66# ### ###", "+48 69# ### ###", "+48 72# ### ###", "+48 73# ### ###", "+48 78# ### ###", "+48 79# ### ###", "+48 88# ### ###", "32 ### ## ##", "+48 32 ### ## ##", "22 ### ## ##", "+48 22 ### ## ##", )
Provider
python
airbytehq__airbyte
airbyte-ci/connectors/connectors_qa/tests/unit_tests/test_checks/test_version.py
{ "start": 230, "end": 2743 }
class ____: @pytest.fixture def mock_connector(self, mocker, tmp_path): connector = mocker.Mock(code_directory=str(tmp_path), technical_name="mock-connector") return connector def _get_version_increment_check(self, mocker, master_version="1.0.0", current_version="1.0.1"): mocker.patch( "connectors_qa.checks.version.CheckVersionIncrement._get_master_connector_version", return_value=semver.Version.parse(master_version), ) mocker.patch( "connectors_qa.checks.version.CheckVersionIncrement._get_current_connector_version", return_value=semver.Version.parse(current_version), ) return CheckVersionIncrement() def test_validate_success(self, mocker, mock_connector): version_increment_check = self._get_version_increment_check(mocker, master_version="1.0.0", current_version="1.0.1") result = version_increment_check._run(mock_connector) assert result.status == CheckStatus.PASSED def test_validate_failure_no_increment(self, mock_connector, mocker): version_increment_check = self._get_version_increment_check(mocker, master_version="1.0.0", current_version="1.0.0") result = version_increment_check._run(mock_connector) assert result.status == CheckStatus.FAILED assert ( result.message == f"The dockerImageTag in {consts.METADATA_FILE_NAME} was not incremented. Master version is 1.0.0, current version is 1.0.0" ) def test_validate_success_rc_increment(self, mock_connector, mocker): version_increment_check = self._get_version_increment_check(mocker, master_version="1.0.1-rc.1", current_version="1.0.1-rc.2") result = version_increment_check._run(mock_connector) assert result.status == CheckStatus.PASSED def test_validate_failure_rc_with_different_versions(self, mock_connector, mocker): version_increment_check = self._get_version_increment_check(mocker, master_version="1.0.0-rc.1", current_version="1.0.1-rc.1") result = version_increment_check._run(mock_connector) assert result.status == CheckStatus.FAILED assert ( result.message == f"Master and current version are release candidates but they have different major, minor or patch versions. Release candidates should only differ in the prerelease part. Master version is 1.0.0-rc.1, current version is 1.0.1-rc.1" )
TestVersionIncrementCheck
python
getsentry__sentry
src/sentry/issues/endpoints/organization_searches.py
{ "start": 879, "end": 4725 }
class ____(OrganizationEndpoint): publish_status = { "GET": ApiPublishStatus.PRIVATE, "POST": ApiPublishStatus.PRIVATE, } owner = ApiOwner.ISSUES permission_classes = (OrganizationSearchPermission,) def get(self, request: Request, organization: Organization) -> Response: """ List an Organization's saved searches ````````````````````````````````````` Retrieve a list of saved searches for a given Organization. For custom saved searches, return them for all projects even if we have duplicates. For default searches, just return one of each search :auth: required """ try: search_type = SearchType(int(request.GET.get("type", 0))) except ValueError as e: return Response({"detail": "Invalid input for `type`. Error: %s" % str(e)}, status=400) query = ( SavedSearch.objects # Do not include pinned or personal searches from other users in # the same organization. DOES include the requesting users pinned # search .exclude( ~Q(owner_id=request.user.id), visibility__in=(Visibility.OWNER, Visibility.OWNER_PINNED), ) .filter( Q(organization=organization) | Q(is_global=True), type=search_type, ) .extra(order_by=["name"]) ) return Response(serialize(list(query), request.user)) def post(self, request: Request, organization: Organization) -> Response: serializer: BaseOrganizationSearchSerializer if request.access.has_scope("org:write"): serializer = OrganizationSearchAdminSerializer(data=request.data) else: serializer = OrganizationSearchMemberSerializer(data=request.data) if not serializer.is_valid(): return Response(serializer.errors, status=400) result = serializer.validated_data if result["visibility"] == Visibility.ORGANIZATION: # Check for conflicts against existing org searches if SavedSearch.objects.filter( is_global=False, organization=organization, type=SearchType.ISSUE.value, visibility=Visibility.ORGANIZATION, query=result["query"], ).exists(): return Response( {"detail": f"An organization search for '{result['query']}' already exists"}, status=400, ) elif result["visibility"] == Visibility.OWNER: # Check for conflicts against the user's searches if SavedSearch.objects.filter( is_global=False, organization=organization, type=SearchType.ISSUE.value, visibility=Visibility.OWNER, owner_id=request.user.id, query=result["query"], ).exists(): return Response( {"detail": f"A search for '{result['query']}' already exists"}, status=400, ) saved_search = SavedSearch.objects.create( organization=organization, owner_id=request.user.id, type=result["type"], name=result["name"], query=result["query"], sort=result["sort"], visibility=result["visibility"], ) analytics.record( OrganizationSavedSearchCreatedEvent( search_type=SearchType(saved_search.type).name, org_id=organization.id, query=saved_search.query, ) ) return Response(serialize(saved_search, request.user))
OrganizationSearchesEndpoint
python
getsentry__sentry
src/sentry/notifications/notification_action/issue_alert_registry/handlers/pagerduty_issue_alert_handler.py
{ "start": 407, "end": 800 }
class ____(BaseIssueAlertHandler): @classmethod def get_target_display(cls, action: Action, mapping: ActionFieldMapping) -> dict[str, Any]: return {} @classmethod def get_additional_fields(cls, action: Action, mapping: ActionFieldMapping) -> dict[str, Any]: blob = OnCallDataBlob(**action.data) return {"severity": blob.priority}
PagerDutyIssueAlertHandler
python
altair-viz__altair
altair/vegalite/v6/schema/_config.py
{ "start": 249551, "end": 256680 }
class ____(TypedDict, total=False): """ :class:`altair.ScaleConfig` ``TypedDict`` wrapper. Parameters ---------- animationDuration Default animation duration (in seconds) for time encodings, except for `band <https://vega.github.io/vega-lite/docs/scale.html#band>`__ scales. **Default value:** ``5`` bandPaddingInner Default inner padding for ``x`` and ``y`` band scales. **Default value:** * ``nestedOffsetPaddingInner`` for x/y scales with nested x/y offset scales. * ``barBandPaddingInner`` for bar marks (``0.1`` by default) * ``rectBandPaddingInner`` for rect and other marks (``0`` by default) bandPaddingOuter Default outer padding for ``x`` and ``y`` band scales. **Default value:** ``paddingInner/2`` (which makes *width/height = number of unique values * step*) bandWithNestedOffsetPaddingInner Default inner padding for ``x`` and ``y`` band scales with nested ``xOffset`` and ``yOffset`` encoding. **Default value:** ``0.2`` bandWithNestedOffsetPaddingOuter Default outer padding for ``x`` and ``y`` band scales with nested ``xOffset`` and ``yOffset`` encoding. **Default value:** ``0.2`` barBandPaddingInner Default inner padding for ``x`` and ``y`` band-ordinal scales of ``"bar"`` marks. **Default value:** ``0.1`` clamp If true, values that exceed the data domain are clamped to either the minimum or maximum range value continuousPadding Default padding for continuous x/y scales. **Default:** The bar width for continuous x-scale of a vertical bar and continuous y-scale of a horizontal bar.; ``0`` otherwise. framesPerSecond Default framerate (frames per second) for time `band <https://vega.github.io/vega-lite/docs/scale.html#band>`__ scales. **Default value:** ``2`` invalid An object that defines scale outputs per channel for invalid values (nulls and NaNs on a continuous scale). * The keys in this object are the scale channels. * The values is either ``"zero-or-min"`` (use zero if the scale includes zero or min value otherwise) or a value definition ``{value: ...}``. *Example:* Setting this ``config.scale.invalid`` property to ``{color: {value: '#aaa'}}`` will make the visualization color all invalid values with '#aaa'. See [https://vega.github.io/vega-lite/docs/invalid-data.html](Invalid Data Docs) for more details. maxBandSize The default max value for mapping quantitative fields to bar's size/bandSize. If undefined (default), we will use the axis's size (width or height) - 1. maxFontSize The default max value for mapping quantitative fields to text's size/fontSize scale. **Default value:** ``40`` maxOpacity Default max opacity for mapping a field to opacity. **Default value:** ``0.8`` maxSize Default max value for point size scale. maxStrokeWidth Default max strokeWidth for the scale of strokeWidth for rule and line marks and of size for trail marks. **Default value:** ``4`` minBandSize The default min value for mapping quantitative fields to bar and tick's size/bandSize scale. **Default value:** ``2`` minFontSize The default min value for mapping quantitative fields to text's size/fontSize scale. **Default value:** ``8`` minOpacity Default minimum opacity for mapping a field to opacity. **Default value:** ``0.3`` minSize Default minimum value for point size scale. **Default value:** ``9`` minStrokeWidth Default minimum strokeWidth for the scale of strokeWidth for rule and line marks and of size for trail marks. **Default value:** ``1`` offsetBandPaddingInner Default padding inner for xOffset/yOffset's band scales. **Default Value:** ``0`` offsetBandPaddingOuter Default padding outer for xOffset/yOffset's band scales. **Default Value:** ``0`` pointPadding Default outer padding for ``x`` and ``y`` point-ordinal scales. **Default value:** ``0.5`` (which makes *width/height = number of unique values * step*) quantileCount Default range cardinality for `quantile <https://vega.github.io/vega-lite/docs/scale.html#quantile>`__ scale. **Default value:** ``4`` quantizeCount Default range cardinality for `quantize <https://vega.github.io/vega-lite/docs/scale.html#quantize>`__ scale. **Default value:** ``4`` rectBandPaddingInner Default inner padding for ``x`` and ``y`` band-ordinal scales of ``"rect"`` marks. **Default value:** ``0`` round If true, rounds numeric output values to integers. This can be helpful for snapping to the pixel grid. (Only available for ``x``, ``y``, and ``size`` scales.) tickBandPaddingInner Default inner padding for ``x`` and ``y`` band-ordinal scales of ``"tick"`` marks. **Default value:** ``0.25`` useUnaggregatedDomain Use the source data range before aggregation as scale domain instead of aggregated data for aggregate axis. This is equivalent to setting ``domain`` to ``"unaggregate"`` for aggregated *quantitative* fields by default. This property only works with aggregate functions that produce values within the raw data domain (``"mean"``, ``"average"``, ``"median"``, ``"q1"``, ``"q3"``, ``"min"``, ``"max"``). For other aggregations that produce values outside of the raw data domain (e.g. ``"count"``, ``"sum"``), this property is ignored. **Default value:** ``false`` xReverse Reverse x-scale by default (useful for right-to-left charts). zero Default ``scale.zero`` for `continuous <https://vega.github.io/vega-lite/docs/scale.html#continuous>`__ scales except for (1) x/y-scales of non-ranged bar or area charts and (2) size scales. **Default value:** ``true`` """ animationDuration: float bandPaddingInner: float bandPaddingOuter: float bandWithNestedOffsetPaddingInner: float bandWithNestedOffsetPaddingOuter: float barBandPaddingInner: float clamp: bool continuousPadding: float framesPerSecond: float invalid: ScaleInvalidDataConfigKwds maxBandSize: float maxFontSize: float maxOpacity: float maxSize: float maxStrokeWidth: float minBandSize: float minFontSize: float minOpacity: float minSize: float minStrokeWidth: float offsetBandPaddingInner: float offsetBandPaddingOuter: float pointPadding: float quantileCount: float quantizeCount: float rectBandPaddingInner: float round: bool tickBandPaddingInner: float useUnaggregatedDomain: bool xReverse: bool zero: bool
ScaleConfigKwds
python
django__django
django/contrib/sessions/exceptions.py
{ "start": 69, "end": 171 }
class ____(SuspiciousOperation): """Invalid characters in session key""" pass
InvalidSessionKey
python
apache__airflow
providers/google/tests/unit/google/cloud/links/test_managed_kafka.py
{ "start": 2615, "end": 2990 }
class ____: def test_class_attributes(self): assert ApacheKafkaClusterListLink.key == EXPECTED_MANAGED_KAFKA_CLUSTER_LIST_LINK_KEY assert ApacheKafkaClusterListLink.name == EXPECTED_MANAGED_KAFKA_CLUSTER_LIST_LINK_NAME assert ApacheKafkaClusterListLink.format_str == EXPECTED_MANAGED_KAFKA_CLUSTER_LIST_LINK_FORMAT_STR
TestApacheKafkaClusterListLink
python
doocs__leetcode
solution/3500-3599/3578.Count Partitions With Max-Min Difference at Most K/Solution.py
{ "start": 0, "end": 514 }
class ____: def countPartitions(self, nums: List[int], k: int) -> int: mod = 10**9 + 7 sl = SortedList() n = len(nums) f = [1] + [0] * n g = [1] + [0] * n l = 1 for r, x in enumerate(nums, 1): sl.add(x) while sl[-1] - sl[0] > k: sl.remove(nums[l - 1]) l += 1 f[r] = (g[r - 1] - (g[l - 2] if l >= 2 else 0) + mod) % mod g[r] = (g[r - 1] + f[r]) % mod return f[n]
Solution
python
bokeh__bokeh
src/bokeh/document/modules.py
{ "start": 1478, "end": 4952 }
class ____: ''' Keep track of and clean up after modules created while building Bokeh Documents. ''' _document: weakref.ReferenceType[Document] _modules: list[ModuleType] def __init__(self, document: Document): ''' Args: document (Document): A Document to manage modules for A weak reference to the Document will be retained ''' self._document = weakref.ref(document) self._modules = [] def __len__(self) -> int: return len(self._modules) def add(self, module: ModuleType) -> None: ''' Add a module associated with a Document. .. note:: This method will install the module in ``sys.modules`` Args: module (Module) : a module to install for the configured Document Returns: None ''' if module.__name__ in sys.modules: raise RuntimeError(f"Add called already-added module {module.__name__!r} for {self._document()!r}") sys.modules[module.__name__] = module self._modules.append(module) def destroy(self) -> None: ''' Clean up any added modules, and check that there are no unexpected referrers afterwards. Returns: None ''' from gc import get_referrers from types import FrameType log.debug(f"Deleting {len(self._modules)} modules for document {self._document()!r}") for module in self._modules: # Modules created for a Document should have three referrers at this point: # # - sys.modules # - self._modules # - a frame object # # This function will take care of removing these expected references. # # If there are any additional referrers, this probably means the module will be # leaked. Here we perform a detailed check that the only referrers are expected # ones. Otherwise issue an error log message with details. referrers = get_referrers(module) referrers = [x for x in referrers if x is not sys.modules] referrers = [x for x in referrers if x is not self._modules] referrers = [x for x in referrers if not isinstance(x, FrameType)] if len(referrers) != 0: log.error(f"Module {module!r} has extra unexpected referrers! This could indicate a serious memory leak. Extra referrers: {referrers!r}") # remove the reference from sys.modules if module.__name__ in sys.modules: del sys.modules[module.__name__] # explicitly clear the module contents and the module here itself module.__dict__.clear() del module # remove the references from self._modules self._modules = [] # the frame reference will take care of itself #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
DocumentModuleManager
python
bokeh__bokeh
src/bokeh/models/tickers.py
{ "start": 11868, "end": 13286 }
class ____(CompositeTicker): ''' Generate nice ticks across different date and time scales. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) num_minor_ticks = Override(default=0) # TODO: (bev) InstanceDefault for this, someday tickers = Override(default=lambda: [ AdaptiveTicker( mantissas=[1, 2, 5], base=10, min_interval=0, max_interval=500*ONE_MILLI, num_minor_ticks=0, ), AdaptiveTicker( mantissas=[1, 2, 5, 10, 15, 20, 30], base=60, min_interval=ONE_SECOND, max_interval=30*ONE_MINUTE, num_minor_ticks=0, ), AdaptiveTicker( mantissas=[1, 2, 4, 6, 8, 12], base=24, min_interval=ONE_HOUR, max_interval=12*ONE_HOUR, num_minor_ticks=0, ), DaysTicker(days=list(range(1, 32))), DaysTicker(days=list(range(1, 31, 3))), DaysTicker(days=[1, 8, 15, 22]), DaysTicker(days=[1, 15]), MonthsTicker(months=list(range(0, 12, 1))), MonthsTicker(months=list(range(0, 12, 2))), MonthsTicker(months=list(range(0, 12, 4))), MonthsTicker(months=list(range(0, 12, 6))), YearsTicker(), ])
DatetimeTicker
python
Pylons__pyramid
src/pyramid/config/assets.py
{ "start": 3165, "end": 6700 }
class ____: # pkg_resources arg in kw args below for testing def __init__(self, package, pkg_resources=pkg_resources): loader = self._real_loader = getattr(package, '__loader__', None) if isinstance(loader, self.__class__): self._real_loader = None # We register ourselves as a __loader__ *only* to support the # setuptools _find_adapter adapter lookup; this class doesn't # actually support the PEP 302 loader "API". This is # excusable due to the following statement in the spec: # ... Loader objects are not # required to offer any useful functionality (any such functionality, # such as the zipimport get_data() method mentioned above, is # optional)... # A __loader__ attribute is basically metadata, and setuptools # uses it as such. package.__loader__ = self # we call register_loader_type for every instantiation of this # class; that's OK, it's idempotent to do it more than once. pkg_resources.register_loader_type(self.__class__, OverrideProvider) self.overrides = [] self.overridden_package_name = package.__name__ def insert(self, path, source): if not path or path.endswith('/'): override = DirectoryOverride(path, source) else: override = FileOverride(path, source) self.overrides.insert(0, override) return override def filtered_sources(self, resource_name): for override in self.overrides: o = override(resource_name) if o is not None: yield o def get_filename(self, resource_name): for source, path in self.filtered_sources(resource_name): result = source.get_filename(path) if result is not None: return result def get_stream(self, resource_name): for source, path in self.filtered_sources(resource_name): result = source.get_stream(path) if result is not None: return result def get_string(self, resource_name): for source, path in self.filtered_sources(resource_name): result = source.get_string(path) if result is not None: return result def has_resource(self, resource_name): for source, path in self.filtered_sources(resource_name): if source.exists(path): return True def isdir(self, resource_name): for source, path in self.filtered_sources(resource_name): result = source.isdir(path) if result is not None: return result def listdir(self, resource_name): for source, path in self.filtered_sources(resource_name): result = source.listdir(path) if result is not None: return result @property def real_loader(self): if self._real_loader is None: raise NotImplementedError() return self._real_loader def get_data(self, path): """See IPEP302Loader.""" return self.real_loader.get_data(path) def is_package(self, fullname): """See IPEP302Loader.""" return self.real_loader.is_package(fullname) def get_code(self, fullname): """See IPEP302Loader.""" return self.real_loader.get_code(fullname) def get_source(self, fullname): """See IPEP302Loader.""" return self.real_loader.get_source(fullname)
PackageOverrides
python
Pylons__pyramid
tests/test_scripts/dummy.py
{ "start": 3389, "end": 3540 }
class ____: def __call__(self, config_uri, global_conf): self.config_uri = config_uri self.defaults = global_conf
dummy_setup_logging
python
aio-libs__aiohttp
aiohttp/web_server.py
{ "start": 673, "end": 4206 }
class ____(Generic[_Request]): request_factory: _RequestFactory[_Request] @overload def __init__( self: "Server[BaseRequest]", handler: Callable[[_Request], Awaitable[StreamResponse]], *, debug: bool | None = None, handler_cancellation: bool = False, **kwargs: Any, # TODO(PY311): Use Unpack to define kwargs from RequestHandler ) -> None: ... @overload def __init__( self, handler: Callable[[_Request], Awaitable[StreamResponse]], *, request_factory: _RequestFactory[_Request] | None, debug: bool | None = None, handler_cancellation: bool = False, **kwargs: Any, ) -> None: ... def __init__( self, handler: Callable[[_Request], Awaitable[StreamResponse]], *, request_factory: _RequestFactory[_Request] | None = None, debug: bool | None = None, handler_cancellation: bool = False, **kwargs: Any, ) -> None: if debug is not None: warnings.warn( "debug argument is no-op since 4.0 and scheduled for removal in 5.0", DeprecationWarning, stacklevel=2, ) self._loop = asyncio.get_running_loop() self._connections: dict[RequestHandler[_Request], asyncio.Transport] = {} self._kwargs = kwargs # requests_count is the number of requests being processed by the server # for the lifetime of the server. self.requests_count = 0 self.request_handler = handler self.request_factory = request_factory or self._make_request # type: ignore[assignment] self.handler_cancellation = handler_cancellation @property def connections(self) -> list[RequestHandler[_Request]]: return list(self._connections.keys()) def connection_made( self, handler: RequestHandler[_Request], transport: asyncio.Transport ) -> None: self._connections[handler] = transport def connection_lost( self, handler: RequestHandler[_Request], exc: BaseException | None = None ) -> None: if handler in self._connections: if handler._task_handler: handler._task_handler.add_done_callback( lambda f: self._connections.pop(handler, None) ) else: del self._connections[handler] def _make_request( self, message: RawRequestMessage, payload: StreamReader, protocol: RequestHandler[BaseRequest], writer: AbstractStreamWriter, task: "asyncio.Task[None]", ) -> BaseRequest: return BaseRequest(message, payload, protocol, writer, task, self._loop) def pre_shutdown(self) -> None: for conn in self._connections: conn.close() async def shutdown(self, timeout: float | None = None) -> None: coros = (conn.shutdown(timeout) for conn in self._connections) await asyncio.gather(*coros) self._connections.clear() def __call__(self) -> RequestHandler[_Request]: try: return RequestHandler(self, loop=self._loop, **self._kwargs) except TypeError: # Failsafe creation: remove all custom handler_args kwargs = { k: v for k, v in self._kwargs.items() if k in ["debug", "access_log_class"] } return RequestHandler(self, loop=self._loop, **kwargs)
Server
python
wandb__wandb
wandb/vendor/graphql-core-1.1/wandb_graphql/language/ast.py
{ "start": 12079, "end": 12784 }
class ____(Value): __slots__ = ('loc', 'value',) _fields = ('value',) def __init__(self, value, loc=None): self.loc = loc self.value = value def __eq__(self, other): return ( self is other or ( isinstance(other, FloatValue) and # self.loc == other.loc and self.value == other.value ) ) def __repr__(self): return ('FloatValue(' 'value={self.value!r}' ')').format(self=self) def __copy__(self): return type(self)( self.value, self.loc ) def __hash__(self): return id(self)
FloatValue
python
apache__airflow
providers/edge3/src/airflow/providers/edge3/worker_api/datamodels.py
{ "start": 4235, "end": 5317 }
class ____(WorkerQueuesBase): """Details of the worker state sent to the scheduler.""" state: Annotated[EdgeWorkerState, Field(description="State of the worker from the view of the worker.")] jobs_active: Annotated[int, Field(description="Number of active jobs the worker is running.")] = 0 queues: Annotated[ list[str] | None, Field( description="List of queues the worker is pulling jobs from. If not provided, worker pulls from all queues." ), ] = None sysinfo: Annotated[ dict[str, str | int], Field( description="System information of the worker.", examples=[ { "concurrency": 4, "free_concurrency": 3, "airflow_version": "2.0.0", "edge_provider_version": "1.0.0", } ], ), ] maintenance_comments: Annotated[ str | None, Field(description="Comments about the maintenance state of the worker."), ] = None
WorkerStateBody
python
airbytehq__airbyte
airbyte-integrations/bases/base-normalization/normalization/transform_catalog/table_name_registry.py
{ "start": 788, "end": 1231 }
class ____: """ A record summary of a name conflict detected and resolved in TableNameRegistry """ def __init__(self, schema: str, json_path: List[str], table_name_conflict: str, table_name_resolved: str): self.schema: str = schema self.json_path: List[str] = json_path self.table_name_conflict: str = table_name_conflict self.table_name_resolved: str = table_name_resolved
ConflictedNameMetadata
python
scrapy__scrapy
scrapy/downloadermiddlewares/retry.py
{ "start": 4752, "end": 6809 }
class ____: crawler: Crawler def __init__(self, settings: BaseSettings): if not settings.getbool("RETRY_ENABLED"): raise NotConfigured self.max_retry_times = settings.getint("RETRY_TIMES") self.retry_http_codes = {int(x) for x in settings.getlist("RETRY_HTTP_CODES")} self.priority_adjust = settings.getint("RETRY_PRIORITY_ADJUST") self.exceptions_to_retry = tuple( load_object(x) if isinstance(x, str) else x for x in settings.getlist("RETRY_EXCEPTIONS") ) @classmethod def from_crawler(cls, crawler: Crawler) -> Self: o = cls(crawler.settings) o.crawler = crawler return o @_warn_spider_arg def process_response( self, request: Request, response: Response, spider: Spider | None = None ) -> Request | Response: if request.meta.get("dont_retry", False): return response if response.status in self.retry_http_codes: reason = response_status_message(response.status) return self._retry(request, reason) or response return response @_warn_spider_arg def process_exception( self, request: Request, exception: Exception, spider: Spider | None = None ) -> Request | Response | None: if isinstance(exception, self.exceptions_to_retry) and not request.meta.get( "dont_retry", False ): return self._retry(request, exception) return None def _retry( self, request: Request, reason: str | Exception | type[Exception] ) -> Request | None: max_retry_times = request.meta.get("max_retry_times", self.max_retry_times) priority_adjust = request.meta.get("priority_adjust", self.priority_adjust) assert self.crawler.spider return get_retry_request( request, reason=reason, spider=self.crawler.spider, max_retry_times=max_retry_times, priority_adjust=priority_adjust, )
RetryMiddleware
python
doocs__leetcode
lcci/01.02.Check Permutation/Solution2.py
{ "start": 0, "end": 114 }
class ____: def CheckPermutation(self, s1: str, s2: str) -> bool: return sorted(s1) == sorted(s2)
Solution
python
PrefectHQ__prefect
src/prefect/events/clients.py
{ "start": 4334, "end": 5372 }
class ____(abc.ABC): """The abstract interface for all Prefect Events clients""" @property def client_name(self) -> str: return self.__class__.__name__ async def emit(self, event: Event) -> None: """Emit a single event""" if not hasattr(self, "_in_context"): raise TypeError( "Events may only be emitted while this client is being used as a " "context manager" ) try: return await self._emit(event) finally: EVENTS_EMITTED.labels(self.client_name).inc() @abc.abstractmethod async def _emit(self, event: Event) -> None: # pragma: no cover ... async def __aenter__(self) -> Self: self._in_context = True return self async def __aexit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: del self._in_context return None
EventsClient
python
huggingface__transformers
src/transformers/models/fastspeech2_conformer/modeling_fastspeech2_conformer.py
{ "start": 8082, "end": 9427 }
class ____(nn.Module): def __init__(self, config, layer_id=0): super().__init__() if layer_id == 0: in_conv_dim = config.num_mel_bins else: in_conv_dim = config.speech_decoder_postnet_units if layer_id == config.speech_decoder_postnet_layers - 1: out_conv_dim = config.num_mel_bins else: out_conv_dim = config.speech_decoder_postnet_units self.conv = nn.Conv1d( in_conv_dim, out_conv_dim, kernel_size=config.speech_decoder_postnet_kernel, stride=1, padding=(config.speech_decoder_postnet_kernel - 1) // 2, bias=False, ) self.batch_norm = nn.BatchNorm1d(out_conv_dim) if layer_id < config.speech_decoder_postnet_layers - 1: self.activation = nn.Tanh() else: self.activation = None self.dropout = nn.Dropout(config.speech_decoder_postnet_dropout) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.batch_norm(hidden_states) if self.activation is not None: hidden_states = self.activation(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states
FastSpeech2ConformerBatchNormConvLayer
python
huggingface__transformers
src/transformers/models/dac/modeling_dac.py
{ "start": 18926, "end": 20069 }
class ____(nn.Module): """DAC Encoder""" def __init__(self, config: DacConfig): super().__init__() strides = config.downsampling_ratios # Create first convolution self.conv1 = nn.Conv1d(1, config.encoder_hidden_size, kernel_size=7, padding=3) self.block = [] # Create EncoderBlocks that double channels as they downsample by `stride` for stride_index, stride in enumerate(strides): stride_index = stride_index + 1 self.block += [DacEncoderBlock(config, stride=stride, stride_index=stride_index)] self.block = nn.ModuleList(self.block) d_model = config.encoder_hidden_size * 2**stride_index self.snake1 = Snake1d(d_model) self.conv2 = nn.Conv1d(d_model, config.hidden_size, kernel_size=3, padding=1) def forward(self, hidden_state): hidden_state = self.conv1(hidden_state) for module in self.block: hidden_state = module(hidden_state) hidden_state = self.snake1(hidden_state) hidden_state = self.conv2(hidden_state) return hidden_state @auto_docstring
DacEncoder
python
facebookresearch__faiss
faiss/gpu/test/test_gpu_index.py
{ "start": 459, "end": 3645 }
class ____(unittest.TestCase): def test_ivfflat_search_preassigned(self): res = faiss.StandardGpuResources() d = 50 nb = 50000 nq = 100 nlist = 128 nprobe = 10 k = 50 config = faiss.GpuIndexIVFFlatConfig() config.use_cuvs = False idx_gpu = faiss.GpuIndexIVFFlat(res, d, nlist, faiss.METRIC_L2, config) idx_gpu.nprobe = nprobe rs = np.random.RandomState(567) xb = rs.rand(nb, d).astype('float32') xq = rs.rand(nq, d).astype('float32') idx_gpu.train(xb) idx_gpu.add(xb) # Search separately using the same quantizer q_d, q_i = idx_gpu.quantizer.search(xq, nprobe) preassigned_d, preassigned_i = ivf_tools.search_preassigned( idx_gpu, xq, k, q_i, q_d) # Search using the standard API d, i = idx_gpu.search(xq, k) # The two results should be exactly the same self.assertEqual((d == preassigned_d).sum(), d.size) self.assertEqual((i == preassigned_i).sum(), i.size) def test_ivfpq_search_preassigned(self): res = faiss.StandardGpuResources() d = 64 nb = 50000 nq = 100 nlist = 128 nprobe = 5 k = 50 config = faiss.GpuIndexIVFPQConfig() config.use_cuvs = False idx_gpu = faiss.GpuIndexIVFPQ(res, d, nlist, 4, 8, faiss.METRIC_L2, config) idx_gpu.nprobe = nprobe rs = np.random.RandomState(567) xb = rs.rand(nb, d).astype('float32') xq = rs.rand(nq, d).astype('float32') idx_gpu.train(xb) idx_gpu.add(xb) # Search separately using the same quantizer q_d, q_i = idx_gpu.quantizer.search(xq, nprobe) preassigned_d, preassigned_i = ivf_tools.search_preassigned( idx_gpu, xq, k, q_i, q_d) # Search using the standard API d, i = idx_gpu.search(xq, k) # The two results should be exactly the same self.assertEqual((d == preassigned_d).sum(), d.size) self.assertEqual((i == preassigned_i).sum(), i.size) def test_ivfsq_search_preassigned(self): res = faiss.StandardGpuResources() d = 64 nb = 50000 nq = 100 nlist = 128 nprobe = 5 k = 50 idx_gpu = faiss.GpuIndexIVFScalarQuantizer( res, d, nlist, faiss.ScalarQuantizer.QT_6bit, faiss.METRIC_INNER_PRODUCT) idx_gpu.nprobe = nprobe rs = np.random.RandomState(567) xb = rs.rand(nb, d).astype('float32') xq = rs.rand(nq, d).astype('float32') idx_gpu.train(xb) idx_gpu.add(xb) # Search separately using the same quantizer q_d, q_i = idx_gpu.quantizer.search(xq, nprobe) preassigned_d, preassigned_i = ivf_tools.search_preassigned( idx_gpu, xq, k, q_i, q_d) # Search using the standard API d, i = idx_gpu.search(xq, k) # The two results should be exactly the same self.assertEqual((d == preassigned_d).sum(), d.size) self.assertEqual((i == preassigned_i).sum(), i.size)
TestIVFSearchPreassigned
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 576805, "end": 577191 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("client_mutation_id", "verification_token") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") verification_token = sgqlc.types.Field(String, graphql_name="verificationToken")
RegenerateVerifiableDomainTokenPayload
python
oauthlib__oauthlib
oauthlib/oauth2/rfc6749/endpoints/resource.py
{ "start": 315, "end": 3243 }
class ____(BaseEndpoint): """Authorizes access to protected resources. The client accesses protected resources by presenting the access token to the resource server. The resource server MUST validate the access token and ensure that it has not expired and that its scope covers the requested resource. The methods used by the resource server to validate the access token (as well as any error responses) are beyond the scope of this specification but generally involve an interaction or coordination between the resource server and the authorization server:: # For most cases, returning a 403 should suffice. The method in which the client utilizes the access token to authenticate with the resource server depends on the type of access token issued by the authorization server. Typically, it involves using the HTTP "Authorization" request header field [RFC2617] with an authentication scheme defined by the specification of the access token type used, such as [RFC6750]:: # Access tokens may also be provided in query and body https://example.com/protected?access_token=kjfch2345sdf # Query access_token=sdf23409df # Body """ def __init__(self, default_token, token_types): BaseEndpoint.__init__(self) self._tokens = token_types self._default_token = default_token @property def default_token(self): return self._default_token @property def default_token_type_handler(self): return self.tokens.get(self.default_token) @property def tokens(self): return self._tokens @catch_errors_and_unavailability def verify_request(self, uri, http_method='GET', body=None, headers=None, scopes=None): """Validate client, code etc, return body + headers""" request = Request(uri, http_method, body, headers) request.token_type = self.find_token_type(request) request.scopes = scopes token_type_handler = self.tokens.get(request.token_type, self.default_token_type_handler) log.debug('Dispatching token_type %s request to %r.', request.token_type, token_type_handler) return token_type_handler.validate_request(request), request def find_token_type(self, request): """Token type identification. RFC 6749 does not provide a method for easily differentiating between different token types during protected resource access. We estimate the most likely token type (if any) by asking each known token type to give an estimation based on the request. """ estimates = sorted(((t.estimate_type(request), n) for n, t in self.tokens.items()), reverse=True) return estimates[0][1] if estimates else None
ResourceEndpoint
python
readthedocs__readthedocs.org
readthedocs/api/v3/serializers.py
{ "start": 35673, "end": 36368 }
class ____(BaseLinksSerializer): _self = serializers.SerializerMethodField() project = serializers.SerializerMethodField() def get__self(self, obj): path = reverse( "projects-environmentvariables-detail", kwargs={ "parent_lookup_project__slug": obj.project.slug, "environmentvariable_pk": obj.pk, }, ) return self._absolute_url(path) def get_project(self, obj): path = reverse( "projects-detail", kwargs={ "project_slug": obj.project.slug, }, ) return self._absolute_url(path)
EnvironmentVariableLinksSerializer
python
getsentry__sentry
src/sentry/core/endpoints/organization_member_team_details.py
{ "start": 2784, "end": 3140 }
class ____(Serializer): def serialize( self, obj: OrganizationMemberTeam, attrs: Mapping[Any, Any], user: Any, **kwargs: Any ) -> OrganizationMemberTeamSerializerResponse: return { "isActive": obj.is_active, "teamRole": obj.role, # type:ignore[typeddict-item] }
OrganizationMemberTeamDetailsSerializer
python
ionelmc__pytest-benchmark
src/pytest_benchmark/stats.py
{ "start": 4449, "end": 7581 }
class ____: cprofile_stats: pstats.Stats def __init__(self, fixture, iterations, options): self.name = fixture.name self.fullname = fixture.fullname self.group = fixture.group self.param = fixture.param self.params = fixture.params self.extra_info = fixture.extra_info self.cprofile_stats = fixture.cprofile_stats self.iterations = iterations self.stats = Stats() self.options = options self.fixture = fixture def __bool__(self): return bool(self.stats) def __nonzero__(self): return bool(self.stats) def get(self, key, default=None): try: return getattr(self.stats, key) except AttributeError: return getattr(self, key, default) def __getitem__(self, key): try: return getattr(self.stats, key) except AttributeError: return getattr(self, key) @property def has_error(self): return self.fixture.has_error def as_dict(self, include_data=True, flat=False, stats=True, cprofile=None): result = { 'group': self.group, 'name': self.name, 'fullname': self.fullname, 'params': self.params, 'param': self.param, 'extra_info': self.extra_info, 'options': {k: funcname(v) if callable(v) else v for k, v in self.options.items()}, } if self.cprofile_stats: cprofile_list = result['cprofile'] = [] cprofile_functions = get_cprofile_functions(self.cprofile_stats) stats_columns = ['cumtime', 'tottime', 'ncalls', 'ncalls_recursion', 'tottime_per', 'cumtime_per', 'function_name'] cprofile_sort_by, cprofile_top = (None, 25) if cprofile is None else cprofile # move column first if cprofile_sort_by is not None: stats_columns.remove(cprofile_sort_by) stats_columns.insert(0, cprofile_sort_by) for column in stats_columns: cprofile_functions.sort(key=operator.itemgetter(column), reverse=True) for cprofile_function in cprofile_functions[:cprofile_top]: if cprofile_function not in cprofile_list: cprofile_list.append(cprofile_function) # if we want only one column, or we already have all available functions if cprofile_sort_by is None or len(cprofile_functions) == len(cprofile_list): break if stats: stats = self.stats.as_dict() if include_data: stats['data'] = self.stats.data stats['iterations'] = self.iterations if flat: result.update(stats) else: result['stats'] = stats return result def update(self, duration): self.stats.update(duration / self.iterations) def normalize_stats(stats): if 'ops' not in stats: # fill field added in 3.1.0 stats['ops'] = 1 / stats['mean'] return stats
Metadata
python
Textualize__textual
docs/examples/styles/outline.py
{ "start": 384, "end": 553 }
class ____(App): CSS_PATH = "outline.tcss" def compose(self): yield Label(TEXT) if __name__ == "__main__": app = OutlineApp() app.run()
OutlineApp
python
encode__starlette
starlette/templating.py
{ "start": 987, "end": 2065 }
class ____(HTMLResponse): def __init__( self, template: Any, context: dict[str, Any], status_code: int = 200, headers: Mapping[str, str] | None = None, media_type: str | None = None, background: BackgroundTask | None = None, ): self.template = template self.context = context content = template.render(context) super().__init__(content, status_code, headers, media_type, background) async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: request = self.context.get("request", {}) extensions = request.get("extensions", {}) if "http.response.debug" in extensions: # pragma: no branch await send( { "type": "http.response.debug", "info": { "template": self.template, "context": self.context, }, } ) await super().__call__(scope, receive, send)
_TemplateResponse
python
django__django
tests/view_tests/models.py
{ "start": 681, "end": 941 }
class ____(BaseArticle): """ An Article class with a get_absolute_url defined. """ date_created = models.DateTimeField() def get_absolute_url(self): return "/urlarticles/%s/" % self.slug get_absolute_url.purge = True
UrlArticle
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 987615, "end": 989329 }
class ____(sgqlc.types.relay.Connection): """The connection type for StatusCheckRollupContext.""" __schema__ = github_schema __field_names__ = ( "check_run_count", "check_run_counts_by_state", "edges", "nodes", "page_info", "status_context_count", "status_context_counts_by_state", "total_count", ) check_run_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="checkRunCount") """The number of check runs in this rollup.""" check_run_counts_by_state = sgqlc.types.Field( sgqlc.types.list_of(sgqlc.types.non_null(CheckRunStateCount)), graphql_name="checkRunCountsByState" ) """Counts of check runs by state.""" edges = sgqlc.types.Field(sgqlc.types.list_of("StatusCheckRollupContextEdge"), graphql_name="edges") """A list of edges.""" nodes = sgqlc.types.Field(sgqlc.types.list_of("StatusCheckRollupContext"), graphql_name="nodes") """A list of nodes.""" page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo") """Information to aid in pagination.""" status_context_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="statusContextCount") """The number of status contexts in this rollup.""" status_context_counts_by_state = sgqlc.types.Field( sgqlc.types.list_of(sgqlc.types.non_null("StatusContextStateCount")), graphql_name="statusContextCountsByState" ) """Counts of status contexts by state.""" total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount") """Identifies the total count of items in the connection."""
StatusCheckRollupContextConnection
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_dataflow.py
{ "start": 10332, "end": 13399 }
class ____: @pytest.fixture def sync_operator(self): return DataflowStartFlexTemplateOperator( task_id="start_flex_template_streaming_beam_sql", body={"launchParameter": TEST_FLEX_PARAMETERS}, do_xcom_push=True, project_id=TEST_PROJECT, location=TEST_LOCATION, expected_terminal_state=DataflowJobStatus.JOB_STATE_DONE, ) @pytest.fixture def deferrable_operator(self): return DataflowStartFlexTemplateOperator( task_id="start_flex_template_streaming_beam_sql", body={"launchParameter": TEST_FLEX_PARAMETERS}, do_xcom_push=True, project_id=TEST_PROJECT, location=TEST_LOCATION, deferrable=True, ) @mock.patch(f"{DATAFLOW_PATH}.DataflowHook") def test_execute(self, mock_dataflow, sync_operator): sync_operator.execute(mock.MagicMock()) mock_dataflow.assert_called_once_with( gcp_conn_id="google_cloud_default", drain_pipeline=False, expected_terminal_state=DataflowJobStatus.JOB_STATE_DONE, cancel_timeout=600, wait_until_finished=None, impersonation_chain=None, ) mock_dataflow.return_value.start_flex_template.assert_called_once_with( body={"launchParameter": TEST_FLEX_PARAMETERS}, location=TEST_LOCATION, project_id=TEST_PROJECT, on_new_job_callback=mock.ANY, ) def test_on_kill(self, sync_operator): sync_operator.hook = mock.MagicMock() sync_operator.job = {"id": JOB_ID, "projectId": TEST_PROJECT, "location": TEST_LOCATION} sync_operator.on_kill() sync_operator.hook.cancel_job.assert_called_once_with( job_id="test-dataflow-pipeline-id", project_id=TEST_PROJECT, location=TEST_LOCATION ) def test_validation_deferrable_params_raises_error(self): init_kwargs = { "task_id": "start_flex_template_streaming_beam_sql", "body": {"launchParameter": TEST_FLEX_PARAMETERS}, "do_xcom_push": True, "location": TEST_LOCATION, "project_id": TEST_PROJECT, "wait_until_finished": True, "deferrable": True, } with pytest.raises(ValueError, match=CONFLICTING_DEFERABLE_WAIT_UNTIL_FINISHED): DataflowStartFlexTemplateOperator(**init_kwargs) @mock.patch(f"{DATAFLOW_PATH}.DataflowStartFlexTemplateOperator.defer") @mock.patch(f"{DATAFLOW_PATH}.DataflowHook") def test_execute_with_deferrable_mode(self, mock_hook, mock_defer_method, deferrable_operator): deferrable_operator.execute(mock.MagicMock()) mock_hook.return_value.launch_job_with_flex_template.assert_called_once_with( body={"launchParameter": TEST_FLEX_PARAMETERS}, location=TEST_LOCATION, project_id=TEST_PROJECT, ) mock_defer_method.assert_called_once()
TestDataflowStartFlexTemplateOperator
python
tensorflow__tensorflow
tensorflow/python/keras/losses.py
{ "start": 35337, "end": 37170 }
class ____(LossFunctionWrapper): """Computes the Poisson loss between `y_true` and `y_pred`. `loss = y_pred - y_true * log(y_pred)` Standalone usage: >>> y_true = [[0., 1.], [0., 0.]] >>> y_pred = [[1., 1.], [0., 0.]] >>> # Using 'auto'/'sum_over_batch_size' reduction type. >>> p = tf.keras.losses.Poisson() >>> p(y_true, y_pred).numpy() 0.5 >>> # Calling with 'sample_weight'. >>> p(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy() 0.4 >>> # Using 'sum' reduction type. >>> p = tf.keras.losses.Poisson( ... reduction=tf.keras.losses.Reduction.SUM) >>> p(y_true, y_pred).numpy() 0.999 >>> # Using 'none' reduction type. >>> p = tf.keras.losses.Poisson( ... reduction=tf.keras.losses.Reduction.NONE) >>> p(y_true, y_pred).numpy() array([0.999, 0.], dtype=float32) Usage with the `compile()` API: ```python model.compile(optimizer='sgd', loss=tf.keras.losses.Poisson()) ``` """ def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'): """Initializes `Poisson` instance. Args: reduction: Type of `tf.keras.losses.Reduction` to apply to loss. Default value is `AUTO`. `AUTO` indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to `SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of built-in training loops such as `tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this custom training [tutorial]( https://www.tensorflow.org/tutorials/distribute/custom_training) for more details. name: Optional name for the instance. Defaults to 'poisson'. """ super().__init__(poisson, name=name, reduction=reduction)
Poisson
python
openai__openai-python
src/openai/resources/beta/threads/messages.py
{ "start": 27314, "end": 28524 }
class ____: def __init__(self, messages: AsyncMessages) -> None: self._messages = messages self.create = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( messages.create, # pyright: ignore[reportDeprecated], ) ) self.retrieve = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( messages.retrieve, # pyright: ignore[reportDeprecated], ) ) self.update = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( messages.update, # pyright: ignore[reportDeprecated], ) ) self.list = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( messages.list, # pyright: ignore[reportDeprecated], ) ) self.delete = ( # pyright: ignore[reportDeprecated] _legacy_response.async_to_raw_response_wrapper( messages.delete, # pyright: ignore[reportDeprecated], ) )
AsyncMessagesWithRawResponse
python
airbytehq__airbyte
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/streams.py
{ "start": 13637, "end": 13751 }
class ____(AdsInsights): breakdowns = ["age"] action_breakdowns = ["action_type"]
AdsInsightsDemographicsAge
python
great-expectations__great_expectations
contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_gtin_variable_measure_trade_item.py
{ "start": 1031, "end": 2075 }
class ____(ColumnMapMetricProvider): # This is the id string that will be used to reference your metric. condition_metric_name = "column_values.gtin_variable_measure_trade_item" # This method implements the core logic for the PandasExecutionEngine @column_condition_partial(engine=PandasExecutionEngine) def _pandas(cls, column, **kwargs): return column.apply(lambda x: is_gtin_variable_measure_trade_item(x)) # This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine # @column_condition_partial(engine=SqlAlchemyExecutionEngine) # def _sqlalchemy(cls, column, _dialect, **kwargs): # raise NotImplementedError # This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine # @column_condition_partial(engine=SparkDFExecutionEngine) # def _spark(cls, column, **kwargs): # raise NotImplementedError # This class defines the Expectation itself
ColumnValuesToBeGtinVariableMeasureTradeItem
python
tensorflow__tensorflow
tensorflow/python/autograph/impl/api.py
{ "start": 3568, "end": 3690 }
class ____(AutoGraphError): """Raised during the staging (i.e. Python execution) of converted code.""" pass
StagingError
python
allegroai__clearml
clearml/backend_api/services/v2_23/models.py
{ "start": 114840, "end": 117694 }
class ____(Request): """ Move models to a project :param ids: Models to move :type ids: Sequence[str] :param project: Target project ID. If not provided, `project_name` must be provided. Use null for the root project :type project: str :param project_name: Target project name. If provided and a project with this name does not exist, a new project will be created. If not provided, `project` must be provided. :type project_name: str """ _service = "models" _action = "move" _version = "2.23" _schema = { "definitions": {}, "properties": { "ids": { "description": "Models to move", "items": {"type": "string"}, "type": "array", }, "project": { "description": "Target project ID. If not provided, `project_name` must be provided. Use null for the root project", "type": "string", }, "project_name": { "description": "Target project name. If provided and a project with this name does not exist, a new project will be created. If not provided, `project` must be provided.", "type": "string", }, }, "required": ["ids"], "type": "object", } def __init__( self, ids: List[str], project: Optional[str] = None, project_name: Optional[str] = None, **kwargs: Any ) -> None: super(MoveRequest, self).__init__(**kwargs) self.ids = ids self.project = project self.project_name = project_name @schema_property("ids") def ids(self) -> List[str]: return self._property_ids @ids.setter def ids(self, value: List[str]) -> None: if value is None: self._property_ids = None return self.assert_isinstance(value, "ids", (list, tuple)) self.assert_isinstance(value, "ids", six.string_types, is_array=True) self._property_ids = value @schema_property("project") def project(self) -> Optional[str]: return self._property_project @project.setter def project(self, value: Optional[str]) -> None: if value is None: self._property_project = None return self.assert_isinstance(value, "project", six.string_types) self._property_project = value @schema_property("project_name") def project_name(self) -> Optional[str]: return self._property_project_name @project_name.setter def project_name(self, value: Optional[str]) -> None: if value is None: self._property_project_name = None return self.assert_isinstance(value, "project_name", six.string_types) self._property_project_name = value
MoveRequest
python
getsentry__sentry
src/sentry/search/events/builder/spans_indexed.py
{ "start": 1612, "end": 3607 }
class ____(BaseQueryBuilder): requires_organization_condition = True uuid_fields = SPAN_UUID_FIELDS span_id_fields = SPAN_ID_FIELDS duration_fields = DURATION_FIELDS size_fields = SIZE_FIELDS config_class = SpansEAPDatasetConfig def get_field_type(self, field: str) -> str | None: tag_match = constants.TYPED_TAG_KEY_RE.search(field) field_type = tag_match.group("type") if tag_match else None if field_type == "number": return "number" return super().get_field_type(field) def resolve_field(self, raw_field: str, alias: bool = False) -> Column: # try the typed regex first if len(raw_field) > constants.MAX_TAG_KEY_LENGTH: raise InvalidSearchQuery(f"{raw_field} is too long, can be a maximum of 200 characters") tag_match = constants.TYPED_TAG_KEY_RE.search(raw_field) field = tag_match.group("tag") if tag_match else None field_type = tag_match.group("type") if tag_match else None if ( field is None or field_type is None or not constants.VALID_FIELD_PATTERN.match(field) # attr field is less permissive than tags, we can't have - in them or "-" in field ): return super().resolve_field(raw_field, alias) if field_type not in ["number", "string"]: raise InvalidSearchQuery( f"Unknown type for field {raw_field}, only string and number are supported" ) if field_type == "string": field_col = Column(f"attr_str[{field}]") else: field_col = Column(f"attr_num[{field}]") if alias: field_alias = f"tags_{field}@{field_type}" self.typed_tag_to_alias_map[raw_field] = field_alias self.alias_to_typed_tag_map[field_alias] = raw_field field_col = AliasedExpression(field_col, field_alias) return field_col
SpansEAPQueryBuilder
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/dataflow.py
{ "start": 8066, "end": 20959 }
class ____(GoogleCloudBaseOperator): """ Start a Dataflow job with a classic template; the parameters of the operation will be passed to the job. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:DataflowTemplatedJobStartOperator` :param template: The reference to the Dataflow template. :param job_name: The 'jobName' to use when executing the Dataflow template (templated). :param options: Map of job runtime environment options. It will update environment argument if passed. .. seealso:: For more information on possible configurations, look at the API documentation `https://cloud.google.com/dataflow/pipelines/specifying-exec-params <https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment>`__ :param dataflow_default_options: Map of default job environment options. :param parameters: Map of job specific parameters for the template. :param project_id: Optional, the Google Cloud project ID in which to start a job. If set to None or missing, the default project_id from the Google Cloud connection is used. :param location: Job location. :param gcp_conn_id: The connection ID to use connecting to Google Cloud. :param poll_sleep: The time in seconds to sleep between polling Google Cloud Platform for the dataflow job status while the job is in the JOB_STATE_RUNNING state. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :param environment: Optional, Map of job runtime environment options. .. seealso:: For more information on possible configurations, look at the API documentation `https://cloud.google.com/dataflow/pipelines/specifying-exec-params <https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment>`__ :param cancel_timeout: How long (in seconds) operator should wait for the pipeline to be successfully cancelled when task is being killed. :param append_job_name: True if unique suffix has to be appended to job name. :param wait_until_finished: (Optional) If True, wait for the end of pipeline execution before exiting. If False, only submits job. If None, default behavior. The default behavior depends on the type of pipeline: * for the streaming pipeline, wait for jobs to start, * for the batch pipeline, wait for the jobs to complete. .. warning:: You cannot call ``PipelineResult.wait_until_finish`` method in your pipeline code for the operator to work properly. i. e. you must use asynchronous execution. Otherwise, your pipeline will always wait until finished. For more information, look at: `Asynchronous execution <https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#python_10>`__ The process of starting the Dataflow job in Airflow consists of two steps: * running a subprocess and reading the stderr/stderr log for the job id. * loop waiting for the end of the job ID from the previous step. This loop checks the status of the job. Step two is started just after step one has finished, so if you have wait_until_finished in your pipeline code, step two will not start until the process stops. When this process stops, steps two will run, but it will only execute one iteration as the job will be in a terminal state. If you in your pipeline do not call the wait_for_pipeline method but pass wait_until_finish=True to the operator, the second loop will wait for the job's terminal state. If you in your pipeline do not call the wait_for_pipeline method, and pass wait_until_finish=False to the operator, the second loop will check once is job not in terminal state and exit the loop. :param expected_terminal_state: The expected terminal state of the operator on which the corresponding Airflow task succeeds. When not specified, it will be determined by the hook. It's a good practice to define dataflow_* parameters in the default_args of the dag like the project, zone and staging location. .. seealso:: https://cloud.google.com/dataflow/docs/reference/rest/v1b3/LaunchTemplateParameters https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment .. code-block:: python default_args = { "dataflow_default_options": { "zone": "europe-west1-d", "tempLocation": "gs://my-staging-bucket/staging/", } } You need to pass the path to your dataflow template as a file reference with the ``template`` parameter. Use ``parameters`` to pass on parameters to your job. Use ``environment`` to pass on runtime environment variables to your job. .. code-block:: python t1 = DataflowTemplatedJobStartOperator( task_id="dataflow_example", template="{{var.value.gcp_dataflow_base}}", parameters={ "inputFile": "gs://bucket/input/my_input.txt", "outputFile": "gs://bucket/output/my_output.txt", }, gcp_conn_id="airflow-conn-id", dag=my_dag, ) ``template``, ``dataflow_default_options``, ``parameters``, and ``job_name`` are templated, so you can use variables in them. Note that ``dataflow_default_options`` is expected to save high-level options for project information, which apply to all dataflow operators in the DAG. .. seealso:: https://cloud.google.com/dataflow/docs/reference/rest/v1b3 /LaunchTemplateParameters https://cloud.google.com/dataflow/docs/reference/rest/v1b3/RuntimeEnvironment For more detail on job template execution have a look at the reference: https://cloud.google.com/dataflow/docs/templates/executing-templates :param deferrable: Run operator in the deferrable mode. """ template_fields: Sequence[str] = ( "template", "job_name", "options", "parameters", "project_id", "location", "gcp_conn_id", "impersonation_chain", "environment", "dataflow_default_options", ) ui_color = "#0273d4" operator_extra_links = (DataflowJobLink(),) def __init__( self, *, template: str, project_id: str = PROVIDE_PROJECT_ID, job_name: str = "{{task.task_id}}", options: dict[str, Any] | None = None, dataflow_default_options: dict[str, Any] | None = None, parameters: dict[str, str] | None = None, location: str | None = None, gcp_conn_id: str = "google_cloud_default", poll_sleep: int = 10, impersonation_chain: str | Sequence[str] | None = None, environment: dict | None = None, cancel_timeout: int | None = 10 * 60, wait_until_finished: bool | None = None, append_job_name: bool = True, deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False), expected_terminal_state: str | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.template = template self.job_name = job_name self.options = options or {} self.dataflow_default_options = dataflow_default_options or {} self.parameters = parameters or {} self.project_id = project_id self.location = location self.gcp_conn_id = gcp_conn_id self.poll_sleep = poll_sleep self.impersonation_chain = impersonation_chain self.environment = environment self.cancel_timeout = cancel_timeout self.wait_until_finished = wait_until_finished self.append_job_name = append_job_name self.deferrable = deferrable self.expected_terminal_state = expected_terminal_state self.job: dict[str, str] | None = None self._validate_deferrable_params() def _validate_deferrable_params(self): if self.deferrable and self.wait_until_finished: raise ValueError( "Conflict between deferrable and wait_until_finished parameters " "because it makes operator as blocking when it requires to be deferred. " "It should be True as deferrable parameter or True as wait_until_finished." ) if self.deferrable and self.wait_until_finished is None: self.wait_until_finished = False @cached_property def hook(self) -> DataflowHook: hook = DataflowHook( gcp_conn_id=self.gcp_conn_id, poll_sleep=self.poll_sleep, impersonation_chain=self.impersonation_chain, cancel_timeout=self.cancel_timeout, wait_until_finished=self.wait_until_finished, expected_terminal_state=self.expected_terminal_state, ) return hook def execute(self, context: Context): def set_current_job(current_job): self.job = current_job DataflowJobLink.persist( context=context, project_id=self.project_id, region=self.location, job_id=self.job.get("id"), ) options = self.dataflow_default_options options.update(self.options) if not self.location: self.location = DEFAULT_DATAFLOW_LOCATION if not self.deferrable: self.job = self.hook.start_template_dataflow( job_name=self.job_name, variables=options, parameters=self.parameters, dataflow_template=self.template, on_new_job_callback=set_current_job, project_id=self.project_id, location=self.location, environment=self.environment, append_job_name=self.append_job_name, ) job_id = self.hook.extract_job_id(self.job) context["task_instance"].xcom_push(key="job_id", value=job_id) return job_id self.job = self.hook.launch_job_with_template( job_name=self.job_name, variables=options, parameters=self.parameters, dataflow_template=self.template, project_id=self.project_id, append_job_name=self.append_job_name, location=self.location, environment=self.environment, ) job_id = self.hook.extract_job_id(self.job) DataflowJobLink.persist( context=context, project_id=self.project_id, region=self.location, job_id=job_id ) self.defer( trigger=TemplateJobStartTrigger( project_id=self.project_id, job_id=job_id, location=self.location, gcp_conn_id=self.gcp_conn_id, poll_sleep=self.poll_sleep, impersonation_chain=self.impersonation_chain, cancel_timeout=self.cancel_timeout, ), method_name=GOOGLE_DEFAULT_DEFERRABLE_METHOD_NAME, ) def execute_complete(self, context: Context, event: dict[str, Any]) -> str: """Execute after trigger finishes its work.""" if event["status"] in ("error", "stopped"): self.log.info("status: %s, msg: %s", event["status"], event["message"]) raise AirflowException(event["message"]) job_id = event["job_id"] context["task_instance"].xcom_push(key="job_id", value=job_id) self.log.info("Task %s completed with response %s", self.task_id, event["message"]) return job_id def on_kill(self) -> None: self.log.info("On kill.") if self.job is not None: self.log.info("Cancelling job %s", self.job_name) self.hook.cancel_job( job_name=self.job_name, job_id=self.job.get("id"), project_id=self.job.get("projectId"), location=self.job.get("location"), )
DataflowTemplatedJobStartOperator
python
apache__airflow
airflow-ctl/src/airflowctl/api/datamodels/generated.py
{ "start": 48135, "end": 49697 }
class ____(BaseModel): """ DAG Run serializer for responses. """ dag_run_id: Annotated[str, Field(title="Dag Run Id")] dag_id: Annotated[str, Field(title="Dag Id")] logical_date: Annotated[datetime | None, Field(title="Logical Date")] = None queued_at: Annotated[datetime | None, Field(title="Queued At")] = None start_date: Annotated[datetime | None, Field(title="Start Date")] = None end_date: Annotated[datetime | None, Field(title="End Date")] = None duration: Annotated[float | None, Field(title="Duration")] = None data_interval_start: Annotated[datetime | None, Field(title="Data Interval Start")] = None data_interval_end: Annotated[datetime | None, Field(title="Data Interval End")] = None run_after: Annotated[datetime, Field(title="Run After")] last_scheduling_decision: Annotated[datetime | None, Field(title="Last Scheduling Decision")] = None run_type: DagRunType state: DagRunState triggered_by: DagRunTriggeredByType | None = None triggering_user_name: Annotated[str | None, Field(title="Triggering User Name")] = None conf: Annotated[dict[str, Any] | None, Field(title="Conf")] = None note: Annotated[str | None, Field(title="Note")] = None dag_versions: Annotated[list[DagVersionResponse], Field(title="Dag Versions")] bundle_version: Annotated[str | None, Field(title="Bundle Version")] = None dag_display_name: Annotated[str, Field(title="Dag Display Name")] partition_key: Annotated[str | None, Field(title="Partition Key")] = None
DAGRunResponse
python
huggingface__transformers
tests/quantization/gptq/test_gptq.py
{ "start": 2485, "end": 11828 }
class ____(unittest.TestCase): model_name = "bigscience/bloom-560m" input_text = "Hello my name is" EXPECTED_OUTPUTS = set() # flaky test: gptqmodel and auto-gptq are not output equivalent nor is string compare deterministic even between transformer/torch versions EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a professional photographer and I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a student in the University of") EXPECTED_OUTPUTS.add("Hello my name is John and I am a very good looking man.") EXPECTED_OUTPUTS.add("Hello my name is Alyson, I am a student in the") EXPECTED_OUTPUTS.add("Hello my name is Alyson and I am a very sweet,") EXPECTED_OUTPUTS.add("Hello my name is Aiden, I am a student at the University") EXPECTED_OUTPUTS.add("Hello my name is Nate and I am a member of the N") EXPECTED_OUTPUTS.add("Hello my name is Nellie and I am a student at the") EXPECTED_OUTPUTS.add("Hello my name is Nate and I am a new member of the") # this seems a little small considering that we are doing 4bit quant but we have a small model and ww don't quantize the embeddings EXPECTED_RELATIVE_DIFFERENCE = 1.664253062 bits = 4 sym = True group_size = 128 desc_act = False use_exllama = False dataset = [ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm." ] device_map = "cpu" if is_gptqmodel_available() else None # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.model_fp16 = AutoModelForCausalLM.from_pretrained( cls.model_name, dtype=torch.float16, device_map=cls.device_map ) cls.mem_fp16 = cls.model_fp16.get_memory_footprint() cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) cls.config = AutoConfig.from_pretrained(cls.model_name) cls.quantization_config = GPTQConfig( bits=cls.bits, dataset=cls.dataset, tokenizer=cls.tokenizer, group_size=cls.group_size, desc_act=cls.desc_act, sym=cls.sym, use_exllama=cls.use_exllama, ) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, dtype=torch.float16, device_map=cls.device_map, quantization_config=cls.quantization_config, ) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model """ mem_quantized = self.quantized_model.get_memory_footprint() self.assertAlmostEqual(self.mem_fp16 / mem_quantized, self.EXPECTED_RELATIVE_DIFFERENCE, places=4) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after quantization will throw an error. Checks also if other models are casted correctly. """ # This should work if self.device_map in (None, "cpu"): _ = self.quantized_model.to(0) with self.assertRaises(ValueError): # Tries with a `dtype`` self.quantized_model.to(torch.float16) def test_original_dtype(self): r""" A simple test to check if the model successfully stores the original dtype """ self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.quantized_model.config._pre_quantization_dtype == torch.float16) def test_quantized_layers_class(self): """ Simple test to check if the model conversion has been done correctly by checking on the class type of the linear layers of the converted models """ if is_gptqmodel_available(): from gptqmodel.utils.importer import hf_select_quant_linear if hasattr(self.config, "quantization_config"): checkpoint_format = self.config.quantization_config.get("checkpoint_format") meta = self.config.quantization_config.get("meta") else: checkpoint_format = "gptq" meta = None QuantLinear = hf_select_quant_linear( bits=self.bits, group_size=self.group_size, desc_act=self.desc_act, sym=self.sym, device_map=self.device_map, checkpoint_format=checkpoint_format, meta=meta, backend=self.quantization_config.backend, ) elif is_auto_gptq_available(): from auto_gptq.utils.import_utils import dynamically_import_QuantLinear as hf_select_quant_linear QuantLinear = hf_select_quant_linear( use_triton=False, desc_act=self.desc_act, group_size=self.group_size, bits=self.bits, disable_exllama=not self.use_exllama, disable_exllamav2=True, ) self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear) def check_inference_correctness(self, model): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(model.device), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def check_quantized_layers_type(self, model, value): self.assertTrue(model.transformer.h[0].mlp.dense_4h_to_h.QUANT_TYPE == value) def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ if self.device_map is None: self.check_inference_correctness(self.quantized_model.to(0)) else: if self.device_map == "cpu" and self.quantized_model.device.type != "cpu": self.quantized_model.to("cpu") self.check_inference_correctness(self.quantized_model) def test_serialization(self): """ Test the serialization of the model and the loading of the quantized weights works """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if is_auto_gptq_available() and not is_gptqmodel_available(): quant_type = "cuda-old" if not self.use_exllama else "exllama" if not self.use_exllama: quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, quantization_config=GPTQConfig(use_exllama=False, bits=4) ) if self.device_map != "cpu": quantized_model_from_saved = quantized_model_from_saved.to(0) else: quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, device_map=self.device_map ) else: if self.device_map == "cpu": quant_type = "ipex" if is_ipex_available() else "torch" else: # We expect tritonv2 to be used here, because exllama backend doesn't support packing https://github.com/ModelCloud/GPTQModel/issues/1354 # TODO: Remove this once GPTQModel exllama kernels supports packing quant_type = "tritonv2" quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, device_map=self.device_map ) self.check_quantized_layers_type(quantized_model_from_saved, quant_type) self.check_inference_correctness(quantized_model_from_saved) @require_accelerate def test_serialization_big_model_inference(self): """ Test the serialization of the model and the loading of the quantized weights with big model inference """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) device_map = self.device_map or "auto" quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=device_map) self.check_inference_correctness(quantized_model_from_saved) @require_torch_gpu
GPTQTest
python
Lightning-AI__lightning
src/lightning/pytorch/loggers/wandb.py
{ "start": 1716, "end": 25507 }
class ____(Logger): r"""Log using `Weights and Biases <https://docs.wandb.ai/guides/integrations/lightning>`_. **Installation and set-up** Install with pip: .. code-block:: bash pip install wandb Create a `WandbLogger` instance: .. code-block:: python from lightning.pytorch.loggers import WandbLogger wandb_logger = WandbLogger(project="MNIST") Pass the logger instance to the `Trainer`: .. code-block:: python trainer = Trainer(logger=wandb_logger) A new W&B run will be created when training starts if you have not created one manually before with `wandb.init()`. **Log metrics** Log from :class:`~lightning.pytorch.core.LightningModule`: .. code-block:: python class LitModule(LightningModule): def training_step(self, batch, batch_idx): self.log("train/loss", loss) Use directly wandb module: .. code-block:: python wandb.log({"train/loss": loss}) **Log hyper-parameters** Save :class:`~lightning.pytorch.core.LightningModule` parameters: .. code-block:: python class LitModule(LightningModule): def __init__(self, *args, **kwarg): self.save_hyperparameters() Add other config parameters: .. code-block:: python # add one parameter wandb_logger.experiment.config["key"] = value # add multiple parameters wandb_logger.experiment.config.update({key1: val1, key2: val2}) # use directly wandb module wandb.config["key"] = value wandb.config.update() **Log gradients, parameters and model topology** Call the `watch` method for automatically tracking gradients: .. code-block:: python # log gradients and model topology wandb_logger.watch(model) # log gradients, parameter histogram and model topology wandb_logger.watch(model, log="all") # change log frequency of gradients and parameters (100 steps by default) wandb_logger.watch(model, log_freq=500) # do not log graph (in case of errors) wandb_logger.watch(model, log_graph=False) The `watch` method adds hooks to the model which can be removed at the end of training: .. code-block:: python wandb_logger.experiment.unwatch(model) **Log model checkpoints** Log model checkpoints at the end of training: .. code-block:: python wandb_logger = WandbLogger(log_model=True) Log model checkpoints as they get created during training: .. code-block:: python wandb_logger = WandbLogger(log_model="all") Custom checkpointing can be set up through :class:`~lightning.pytorch.callbacks.ModelCheckpoint`: .. code-block:: python # log model only if `val_accuracy` increases wandb_logger = WandbLogger(log_model="all") checkpoint_callback = ModelCheckpoint(monitor="val_accuracy", mode="max") trainer = Trainer(logger=wandb_logger, callbacks=[checkpoint_callback]) `latest` and `best` aliases are automatically set to easily retrieve a model checkpoint: .. code-block:: python # reference can be retrieved in artifacts panel # "VERSION" can be a version (ex: "v2") or an alias ("latest or "best") checkpoint_reference = "USER/PROJECT/MODEL-RUN_ID:VERSION" # download checkpoint locally (if not already cached) run = wandb.init(project="MNIST") artifact = run.use_artifact(checkpoint_reference, type="model") artifact_dir = artifact.download() # load checkpoint model = LitModule.load_from_checkpoint(Path(artifact_dir) / "model.ckpt") **Log media** Log text with: .. code-block:: python # using columns and data columns = ["input", "label", "prediction"] data = [["cheese", "english", "english"], ["fromage", "french", "spanish"]] wandb_logger.log_text(key="samples", columns=columns, data=data) # using a pandas DataFrame wandb_logger.log_text(key="samples", dataframe=my_dataframe) Log images with: .. code-block:: python # using tensors, numpy arrays or PIL images wandb_logger.log_image(key="samples", images=[img1, img2]) # adding captions wandb_logger.log_image(key="samples", images=[img1, img2], caption=["tree", "person"]) # using file path wandb_logger.log_image(key="samples", images=["img_1.jpg", "img_2.jpg"]) More arguments can be passed for logging segmentation masks and bounding boxes. Refer to `Image Overlays documentation <https://docs.wandb.ai/guides/track/log/media#image-overlays>`_. **Log Tables** `W&B Tables <https://docs.wandb.ai/guides/tables/visualize-tables>`_ can be used to log, query and analyze tabular data. They support any type of media (text, image, video, audio, molecule, html, etc) and are great for storing, understanding and sharing any form of data, from datasets to model predictions. .. code-block:: python columns = ["caption", "image", "sound"] data = [["cheese", wandb.Image(img_1), wandb.Audio(snd_1)], ["wine", wandb.Image(img_2), wandb.Audio(snd_2)]] wandb_logger.log_table(key="samples", columns=columns, data=data) **Downloading and Using Artifacts** To download an artifact without starting a run, call the ``download_artifact`` function on the class: .. code-block:: python from lightning.pytorch.loggers import WandbLogger artifact_dir = WandbLogger.download_artifact(artifact="path/to/artifact") To download an artifact and link it to an ongoing run call the ``download_artifact`` function on the logger instance: .. code-block:: python class MyModule(LightningModule): def any_lightning_module_function_or_hook(self): self.logger.download_artifact(artifact="path/to/artifact") To link an artifact from a previous run you can use ``use_artifact`` function: .. code-block:: python from lightning.pytorch.loggers import WandbLogger wandb_logger = WandbLogger(project="my_project", name="my_run") wandb_logger.use_artifact(artifact="path/to/artifact") See Also: - `Demo in Google Colab <http://wandb.me/lightning>`__ with hyperparameter search and model logging - `W&B Documentation <https://docs.wandb.ai/guides/integrations/lightning>`__ Args: name: Display name for the run. save_dir: Path where data is saved. version: Sets the version, mainly used to resume a previous run. offline: Run offline (data can be streamed later to wandb servers). dir: Same as save_dir. id: Same as version. anonymous: Enables or explicitly disables anonymous logging. project: The name of the project to which this run will belong. If not set, the environment variable `WANDB_PROJECT` will be used as a fallback. If both are not set, it defaults to ``'lightning_logs'``. log_model: Log checkpoints created by :class:`~lightning.pytorch.callbacks.ModelCheckpoint` as W&B artifacts. `latest` and `best` aliases are automatically set. * if ``log_model == 'all'``, checkpoints are logged during training. * if ``log_model == True``, checkpoints are logged at the end of training, except when :paramref:`~lightning.pytorch.callbacks.ModelCheckpoint.save_top_k` ``== -1`` which also logs every checkpoint during training. * if ``log_model == False`` (default), no checkpoint is logged. prefix: A string to put at the beginning of metric keys. experiment: WandB experiment object. Automatically set when creating a run. checkpoint_name: Name of the model checkpoint artifact being logged. add_file_policy: If "mutable", copies file to tempdirectory before upload. \**kwargs: Arguments passed to :func:`wandb.init` like `entity`, `group`, `tags`, etc. Raises: ModuleNotFoundError: If required WandB package is not installed on the device. MisconfigurationException: If both ``log_model`` and ``offline`` is set to ``True``. """ LOGGER_JOIN_CHAR = "-" def __init__( self, name: Optional[str] = None, save_dir: _PATH = ".", version: Optional[str] = None, offline: bool = False, dir: Optional[_PATH] = None, id: Optional[str] = None, anonymous: Optional[bool] = None, project: Optional[str] = None, log_model: Union[Literal["all"], bool] = False, experiment: Union["Run", "RunDisabled", None] = None, prefix: str = "", checkpoint_name: Optional[str] = None, add_file_policy: Literal["mutable", "immutable"] = "mutable", **kwargs: Any, ) -> None: if not _WANDB_AVAILABLE: raise ModuleNotFoundError(str(_WANDB_AVAILABLE)) if offline and log_model: raise MisconfigurationException( f"Providing log_model={log_model} and offline={offline} is an invalid configuration" " since model checkpoints cannot be uploaded in offline mode.\n" "Hint: Set `offline=False` to log your model." ) super().__init__() self._offline = offline self._log_model = log_model self._prefix = prefix self._experiment = experiment self._logged_model_time: dict[str, float] = {} self._checkpoint_callbacks: dict[int, ModelCheckpoint] = {} self.add_file_policy = add_file_policy # paths are processed as strings if save_dir is not None: save_dir = os.fspath(save_dir) elif dir is not None: dir = os.fspath(dir) project = project or os.environ.get("WANDB_PROJECT", "lightning_logs") # set wandb init arguments self._wandb_init: dict[str, Any] = { "name": name, "project": project, "dir": save_dir or dir, "id": version or id, "resume": "allow", "anonymous": ("allow" if anonymous else None), } self._wandb_init.update(**kwargs) # extract parameters self._project = self._wandb_init.get("project") self._save_dir = self._wandb_init.get("dir") self._name = self._wandb_init.get("name") self._id = self._wandb_init.get("id") self._checkpoint_name = checkpoint_name def __getstate__(self) -> dict[str, Any]: import wandb # Hack: If the 'spawn' launch method is used, the logger will get pickled and this `__getstate__` gets called. # We create an experiment here in the main process, and attach to it in the worker process. # Using wandb-service, we persist the same experiment even if multiple `Trainer.fit/test/validate` calls # are made. wandb.require("service") _ = self.experiment state = self.__dict__.copy() # args needed to reload correct experiment if self._experiment is not None: state["_id"] = getattr(self._experiment, "id", None) state["_attach_id"] = getattr(self._experiment, "_attach_id", None) state["_name"] = self._experiment.name # cannot be pickled state["_experiment"] = None return state @property @rank_zero_experiment def experiment(self) -> Union["Run", "RunDisabled"]: r"""Actual wandb object. To use wandb features in your :class:`~lightning.pytorch.core.LightningModule` do the following. Example:: .. code-block:: python self.logger.experiment.some_wandb_function() """ import wandb from wandb.sdk.lib import RunDisabled from wandb.wandb_run import Run if self._experiment is None: if self._offline: os.environ["WANDB_MODE"] = "dryrun" attach_id = getattr(self, "_attach_id", None) if wandb.run is not None: # wandb process already created in this instance rank_zero_warn( "There is a wandb run already in progress and newly created instances of `WandbLogger` will reuse" " this run. If this is not desired, call `wandb.finish()` before instantiating `WandbLogger`." ) self._experiment = wandb.run elif attach_id is not None and hasattr(wandb, "_attach"): # attach to wandb process referenced self._experiment = wandb._attach(attach_id) else: # create new wandb process self._experiment = wandb.init(**self._wandb_init) # define default x-axis if isinstance(self._experiment, (Run, RunDisabled)) and getattr( self._experiment, "define_metric", None ): if self._wandb_init.get("sync_tensorboard"): self._experiment.define_metric("*", step_metric="global_step") else: self._experiment.define_metric("trainer/global_step") self._experiment.define_metric("*", step_metric="trainer/global_step", step_sync=True) return self._experiment def watch( self, model: nn.Module, log: Optional[str] = "gradients", log_freq: int = 100, log_graph: bool = True ) -> None: self.experiment.watch(model, log=log, log_freq=log_freq, log_graph=log_graph) @override @rank_zero_only def log_hyperparams(self, params: Union[dict[str, Any], Namespace]) -> None: params = _convert_params(params) params = _sanitize_callable_params(params) params = _convert_json_serializable(params) self.experiment.config.update(params, allow_val_change=True) @override @rank_zero_only def log_metrics(self, metrics: Mapping[str, float], step: Optional[int] = None) -> None: assert rank_zero_only.rank == 0, "experiment tried to log from global_rank != 0" metrics = _add_prefix(metrics, self._prefix, self.LOGGER_JOIN_CHAR) if step is not None and not self._wandb_init.get("sync_tensorboard"): self.experiment.log(dict(metrics, **{"trainer/global_step": step})) else: self.experiment.log(metrics) @rank_zero_only def log_table( self, key: str, columns: Optional[list[str]] = None, data: Optional[list[list[Any]]] = None, dataframe: Any = None, step: Optional[int] = None, ) -> None: """Log a Table containing any object type (text, image, audio, video, molecule, html, etc). Can be defined either with `columns` and `data` or with `dataframe`. """ import wandb metrics = {key: wandb.Table(columns=columns, data=data, dataframe=dataframe)} self.log_metrics(metrics, step) @rank_zero_only def log_text( self, key: str, columns: Optional[list[str]] = None, data: Optional[list[list[str]]] = None, dataframe: Any = None, step: Optional[int] = None, ) -> None: """Log text as a Table. Can be defined either with `columns` and `data` or with `dataframe`. """ self.log_table(key, columns, data, dataframe, step) @rank_zero_only def log_image(self, key: str, images: list[Any], step: Optional[int] = None, **kwargs: Any) -> None: """Log images (tensors, numpy arrays, PIL Images or file paths). Optional kwargs are lists passed to each image (ex: caption, masks, boxes). """ if not isinstance(images, list): raise TypeError(f'Expected a list as "images", found {type(images)}') n = len(images) for k, v in kwargs.items(): if len(v) != n: raise ValueError(f"Expected {n} items but only found {len(v)} for {k}") kwarg_list = [{k: kwargs[k][i] for k in kwargs} for i in range(n)] import wandb metrics = {key: [wandb.Image(img, **kwarg) for img, kwarg in zip(images, kwarg_list)]} self.log_metrics(metrics, step) # type: ignore[arg-type] @rank_zero_only def log_audio(self, key: str, audios: list[Any], step: Optional[int] = None, **kwargs: Any) -> None: r"""Log audios (numpy arrays, or file paths). Args: key: The key to be used for logging the audio files audios: The list of audio file paths, or numpy arrays to be logged step: The step number to be used for logging the audio files \**kwargs: Optional kwargs are lists passed to each ``Wandb.Audio`` instance (ex: caption, sample_rate). Optional kwargs are lists passed to each audio (ex: caption, sample_rate). """ if not isinstance(audios, list): raise TypeError(f'Expected a list as "audios", found {type(audios)}') n = len(audios) for k, v in kwargs.items(): if len(v) != n: raise ValueError(f"Expected {n} items but only found {len(v)} for {k}") kwarg_list = [{k: kwargs[k][i] for k in kwargs} for i in range(n)] import wandb metrics = {key: [wandb.Audio(audio, **kwarg) for audio, kwarg in zip(audios, kwarg_list)]} self.log_metrics(metrics, step) # type: ignore[arg-type] @rank_zero_only def log_video(self, key: str, videos: list[Any], step: Optional[int] = None, **kwargs: Any) -> None: """Log videos (numpy arrays, or file paths). Args: key: The key to be used for logging the video files videos: The list of video file paths, or numpy arrays to be logged step: The step number to be used for logging the video files **kwargs: Optional kwargs are lists passed to each Wandb.Video instance (ex: caption, fps, format). Optional kwargs are lists passed to each video (ex: caption, fps, format). """ if not isinstance(videos, list): raise TypeError(f'Expected a list as "videos", found {type(videos)}') n = len(videos) for k, v in kwargs.items(): if len(v) != n: raise ValueError(f"Expected {n} items but only found {len(v)} for {k}") kwarg_list = [{k: kwargs[k][i] for k in kwargs} for i in range(n)] import wandb metrics = {key: [wandb.Video(video, **kwarg) for video, kwarg in zip(videos, kwarg_list)]} self.log_metrics(metrics, step) # type: ignore[arg-type] @property @override def save_dir(self) -> Optional[str]: """Gets the save directory. Returns: The path to the save directory. """ return self._save_dir @property @override def name(self) -> Optional[str]: """The project name of this experiment. Returns: The name of the project the current experiment belongs to. This name is not the same as `wandb.Run`'s name. To access wandb's internal experiment name, use ``logger.experiment.name`` instead. """ return self._project @property @override def version(self) -> Optional[str]: """Gets the id of the experiment. Returns: The id of the experiment if the experiment exists else the id given to the constructor. """ # don't create an experiment if we don't have one return self._experiment.id if self._experiment else self._id @override def after_save_checkpoint(self, checkpoint_callback: ModelCheckpoint) -> None: # log checkpoints as artifacts if self._log_model == "all" or self._log_model is True and checkpoint_callback.save_top_k == -1: self._scan_and_log_checkpoints(checkpoint_callback) elif self._log_model is True: self._checkpoint_callbacks[id(checkpoint_callback)] = checkpoint_callback @staticmethod @rank_zero_only def download_artifact( artifact: str, save_dir: Optional[_PATH] = None, artifact_type: Optional[str] = None, use_artifact: Optional[bool] = True, ) -> str: """Downloads an artifact from the wandb server. Args: artifact: The path of the artifact to download. save_dir: The directory to save the artifact to. artifact_type: The type of artifact to download. use_artifact: Whether to add an edge between the artifact graph. Returns: The path to the downloaded artifact. """ import wandb if wandb.run is not None and use_artifact: artifact = wandb.run.use_artifact(artifact) else: api = wandb.Api() artifact = api.artifact(artifact, type=artifact_type) save_dir = None if save_dir is None else os.fspath(save_dir) return artifact.download(root=save_dir) def use_artifact(self, artifact: str, artifact_type: Optional[str] = None) -> "Artifact": """Logs to the wandb dashboard that the mentioned artifact is used by the run. Args: artifact: The path of the artifact. artifact_type: The type of artifact being used. Returns: wandb Artifact object for the artifact. """ return self.experiment.use_artifact(artifact, type=artifact_type) @override @rank_zero_only def finalize(self, status: str) -> None: if status != "success": # Currently, checkpoints only get logged on success return # log checkpoints as artifacts if self._experiment is not None: for checkpoint_callback in self._checkpoint_callbacks.values(): self._scan_and_log_checkpoints(checkpoint_callback) def _scan_and_log_checkpoints(self, checkpoint_callback: ModelCheckpoint) -> None: import wandb # get checkpoints to be saved with associated score checkpoints = _scan_checkpoints(checkpoint_callback, self._logged_model_time) # log iteratively all new checkpoints for t, p, s, tag in checkpoints: metadata = { "score": s.item() if isinstance(s, Tensor) else s, "original_filename": Path(p).name, checkpoint_callback.__class__.__name__: { k: getattr(checkpoint_callback, k) for k in [ "monitor", "mode", "save_last", "save_top_k", "save_weights_only", "_every_n_train_steps", ] # ensure it does not break if `ModelCheckpoint` args change if hasattr(checkpoint_callback, k) }, } if not self._checkpoint_name: self._checkpoint_name = f"model-{self.experiment.id}" artifact = wandb.Artifact(name=self._checkpoint_name, type="model", metadata=metadata) artifact.add_file(p, name="model.ckpt", policy=self.add_file_policy) aliases = ["latest", "best"] if p == checkpoint_callback.best_model_path else ["latest"] self.experiment.log_artifact(artifact, aliases=aliases) # remember logged models - timestamp needed in case filename didn't change (lastkckpt or custom name) self._logged_model_time[p] = t
WandbLogger
python
django__django
tests/auth_tests/test_remote_user.py
{ "start": 16062, "end": 16838 }
class ____(RemoteUserTest): """Backend that allows inactive users.""" backend = "django.contrib.auth.backends.AllowAllUsersRemoteUserBackend" def test_inactive_user(self): user = User.objects.create(username="knownuser", is_active=False) response = self.client.get("/remote_user/", **{self.header: self.known_user}) self.assertEqual(response.context["user"].username, user.username) async def test_inactive_user_async(self): user = await User.objects.acreate(username="knownuser", is_active=False) response = await self.async_client.get( "/remote_user/", **{self.header: self.known_user} ) self.assertEqual(response.context["user"].username, user.username)
AllowAllUsersRemoteUserBackendTest
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/linalg/tridiagonal_solve_op_test.py
{ "start": 2189, "end": 27584 }
class ____(test.TestCase): def _test(self, diags, rhs, expected, diags_format="compact", transpose_rhs=False, conjugate_rhs=False): with self.cached_session(): pivoting = True if hasattr(self, "pivoting"): pivoting = self.pivoting if test_util.is_xla_enabled() and pivoting: # Pivoting is not supported by xla backends. return result = linalg_impl.tridiagonal_solve( diags, rhs, diags_format, transpose_rhs, conjugate_rhs, partial_pivoting=pivoting) result = self.evaluate(result) if expected is None: self.assertAllEqual( np.zeros_like(result, dtype=np.bool_), np.isfinite(result)) else: self.assertAllClose(result, expected) def _testWithLists(self, diags, rhs, expected=None, diags_format="compact", transpose_rhs=False, conjugate_rhs=False): self._test( _tfconst(diags), _tfconst(rhs), _tfconst(expected), diags_format, transpose_rhs, conjugate_rhs) def _assertRaises(self, diags, rhs, diags_format="compact"): pivoting = True if hasattr(self, "pivoting"): pivoting = self.pivoting if test_util.is_xla_enabled() and pivoting: # Pivoting is not supported by xla backends. return with self.assertRaises(ValueError): linalg_impl.tridiagonal_solve( diags, rhs, diags_format, partial_pivoting=pivoting) # Tests with various dtypes def testReal(self): for dtype in dtypes.float32, dtypes.float64: self._test( diags=constant_op.constant(_sample_diags, dtype), rhs=constant_op.constant(_sample_rhs, dtype), expected=constant_op.constant(_sample_result, dtype)) def testComplex(self): for dtype in dtypes.complex64, dtypes.complex128: self._test( diags=constant_op.constant(_sample_diags, dtype) * (1 + 1j), rhs=constant_op.constant(_sample_rhs, dtype) * (1 - 1j), expected=constant_op.constant(_sample_result, dtype) * (1 - 1j) / (1 + 1j)) # Tests with small matrix sizes def test3x3(self): self._testWithLists( diags=[[2, -1, 0], [1, 3, 1], [0, -1, -2]], rhs=[1, 2, 3], expected=[-3, 2, 7]) def test2x2(self): self._testWithLists( diags=[[2, 0], [1, 3], [0, 1]], rhs=[1, 4], expected=[-5, 3]) def test2x2Complex(self): for dtype in dtypes.complex64, dtypes.complex128: self._test( diags=constant_op.constant([[2j, 0j], [1j, 3j], [0j, 1j]], dtype), rhs=constant_op.constant([1 - 1j, 4 - 4j], dtype), expected=constant_op.constant([5 + 5j, -3 - 3j], dtype)) def test1x1(self): self._testWithLists(diags=[[0], [3], [0]], rhs=[6], expected=[2]) def test0x0(self): if test_util.is_xla_enabled(): # The following test crashes with XLA due to slicing 0 length tensors. return self._test( diags=constant_op.constant(0, shape=(3, 0), dtype=dtypes.float32), rhs=constant_op.constant(0, shape=(0, 1), dtype=dtypes.float32), expected=constant_op.constant(0, shape=(0, 1), dtype=dtypes.float32)) def test2x2WithMultipleRhs(self): self._testWithLists( diags=[[2, 0], [1, 3], [0, 1]], rhs=[[1, 2, 3], [4, 8, 12]], expected=[[-5, -10, -15], [3, 6, 9]]) def test1x1WithMultipleRhs(self): self._testWithLists( diags=[[0], [3], [0]], rhs=[[6, 9, 12]], expected=[[2, 3, 4]]) def test1x1NotInvertible(self): if test_util.is_xla_enabled(): # XLA implementation does not check invertibility. return self._testWithLists(diags=[[0], [0], [0]], rhs=[[6, 9, 12]]) def test2x2NotInvertible(self): if test_util.is_xla_enabled(): # XLA implementation does not check invertibility. return self._testWithLists(diags=[[3, 0], [1, 3], [0, 1]], rhs=[1, 4]) # Other edge cases @flags(FLAG_REQUIRES_PIVOTING) def testCaseRequiringPivoting(self): # Without partial pivoting (e.g. Thomas algorithm) this would fail. self._testWithLists( diags=[[2, -1, 1, 0], [1, 4, 1, -1], [0, 2, -2, 3]], rhs=[1, 2, 3, 4], expected=[8, -3.5, 0, -4]) @flags(FLAG_REQUIRES_PIVOTING) def testCaseRequiringPivotingLastRows(self): self._testWithLists( diags=[[2, 1, -1, 0], [1, -1, 2, 1], [0, 1, -6, 1]], rhs=[1, 2, -1, -2], expected=[5, -2, -5, 3]) def testNotInvertible(self): if test_util.is_xla_enabled(): return self._testWithLists( diags=[[2, -1, 1, 0], [1, 4, 1, -1], [0, 2, 0, 3]], rhs=[1, 2, 3, 4]) def testDiagonal(self): self._testWithLists( diags=[[0, 0, 0, 0], [1, 2, -1, -2], [0, 0, 0, 0]], rhs=[1, 2, 3, 4], expected=[1, 1, -3, -2]) def testUpperTriangular(self): self._testWithLists( diags=[[2, 4, -1, 0], [1, 3, 1, 2], [0, 0, 0, 0]], rhs=[1, 6, 4, 4], expected=[13, -6, 6, 2]) def testLowerTriangular(self): self._testWithLists( diags=[[0, 0, 0, 0], [2, -1, 3, 1], [0, 1, 4, 2]], rhs=[4, 5, 6, 1], expected=[2, -3, 6, -11]) # Multiple right-hand sides and batching def testWithTwoRightHandSides(self): self._testWithLists( diags=_sample_diags, rhs=np.transpose([_sample_rhs, 2 * _sample_rhs]), expected=np.transpose([_sample_result, 2 * _sample_result])) def testBatching(self): self._testWithLists( diags=np.array([_sample_diags, -_sample_diags]), rhs=np.array([_sample_rhs, 2 * _sample_rhs]), expected=np.array([_sample_result, -2 * _sample_result])) def testWithTwoBatchingDimensions(self): self._testWithLists( diags=np.array([[_sample_diags, -_sample_diags, _sample_diags], [-_sample_diags, _sample_diags, -_sample_diags]]), rhs=np.array([[_sample_rhs, 2 * _sample_rhs, 3 * _sample_rhs], [4 * _sample_rhs, 5 * _sample_rhs, 6 * _sample_rhs]]), expected=np.array( [[_sample_result, -2 * _sample_result, 3 * _sample_result], [-4 * _sample_result, 5 * _sample_result, -6 * _sample_result]])) def testBatchingAndTwoRightHandSides(self): rhs = np.transpose([_sample_rhs, 2 * _sample_rhs]) expected_result = np.transpose([_sample_result, 2 * _sample_result]) self._testWithLists( diags=np.array([_sample_diags, -_sample_diags]), rhs=np.array([rhs, 2 * rhs]), expected=np.array([expected_result, -2 * expected_result])) # Various input formats def testSequenceFormat(self): self._test( diags=(_tfconst([2, 1, 4]), _tfconst([1, 3, 2, 2]), _tfconst([1, -1, 1])), rhs=_tfconst([1, 2, 3, 4]), expected=_tfconst([-9, 5, -4, 4]), diags_format="sequence") def testSequenceFormatWithDummyElements(self): dummy = 20 self._test( diags=(_tfconst([2, 1, 4, dummy]), _tfconst([1, 3, 2, 2]), _tfconst([dummy, 1, -1, 1])), rhs=_tfconst([1, 2, 3, 4]), expected=_tfconst([-9, 5, -4, 4]), diags_format="sequence") def testSequenceFormatWithBatching(self): self._test( diags=(_tfconst([[2, 1, 4], [-2, -1, -4]]), _tfconst([[1, 3, 2, 2], [-1, -3, -2, -2]]), _tfconst([[1, -1, 1], [-1, 1, -1]])), rhs=_tfconst([[1, 2, 3, 4], [1, 2, 3, 4]]), expected=_tfconst([[-9, 5, -4, 4], [9, -5, 4, -4]]), diags_format="sequence") def testMatrixFormat(self): self._testWithLists( diags=[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]], rhs=[1, 2, 3, 4], expected=[-9, 5, -4, 4], diags_format="matrix") def testMatrixFormatWithMultipleRightHandSides(self): self._testWithLists( diags=[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]], rhs=[[1, -1], [2, -2], [3, -3], [4, -4]], expected=[[-9, 9], [5, -5], [-4, 4], [4, -4]], diags_format="matrix") def testMatrixFormatWithBatching(self): self._testWithLists( diags=[[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]], [[-1, -2, 0, 0], [-1, -3, -1, 0], [0, 1, -2, -4], [0, 0, -1, -2]]], rhs=[[1, 2, 3, 4], [1, 2, 3, 4]], expected=[[-9, 5, -4, 4], [9, -5, 4, -4]], diags_format="matrix") def testRightHandSideAsColumn(self): self._testWithLists( diags=_sample_diags, rhs=np.transpose([_sample_rhs]), expected=np.transpose([_sample_result]), diags_format="compact") # Tests with transpose and adjoint def testTransposeRhs(self): self._testWithLists( diags=_sample_diags, rhs=np.array([_sample_rhs, 2 * _sample_rhs]), expected=np.array([_sample_result, 2 * _sample_result]).T, transpose_rhs=True) def testConjugateRhs(self): self._testWithLists( diags=_sample_diags, rhs=np.transpose([_sample_rhs * (1 + 1j), _sample_rhs * (1 - 2j)]), expected=np.transpose( [_sample_result * (1 - 1j), _sample_result * (1 + 2j)]), conjugate_rhs=True) def testAdjointRhs(self): self._testWithLists( diags=_sample_diags, rhs=np.array([_sample_rhs * (1 + 1j), _sample_rhs * (1 - 2j)]), expected=np.array( [_sample_result * (1 - 1j), _sample_result * (1 + 2j)]).T, transpose_rhs=True, conjugate_rhs=True) def testTransposeRhsWithBatching(self): self._testWithLists( diags=np.array([_sample_diags, -_sample_diags]), rhs=np.array([[_sample_rhs, 2 * _sample_rhs], [3 * _sample_rhs, 4 * _sample_rhs]]), expected=np.array([[_sample_result, 2 * _sample_result], [-3 * _sample_result, -4 * _sample_result]]).transpose(0, 2, 1), transpose_rhs=True) def testTransposeRhsWithRhsAsVector(self): self._testWithLists( diags=_sample_diags, rhs=_sample_rhs, expected=_sample_result, transpose_rhs=True) def testConjugateRhsWithRhsAsVector(self): self._testWithLists( diags=_sample_diags, rhs=_sample_rhs * (1 + 1j), expected=_sample_result * (1 - 1j), conjugate_rhs=True) def testTransposeRhsWithRhsAsVectorAndBatching(self): self._testWithLists( diags=np.array([_sample_diags, -_sample_diags]), rhs=np.array([_sample_rhs, 2 * _sample_rhs]), expected=np.array([_sample_result, -2 * _sample_result]), transpose_rhs=True) # Gradient tests def _gradientTest( self, diags, rhs, y, # output = reduce_sum(y * tridiag_solve(diags, rhs)) expected_grad_diags, # expected gradient of output w.r.t. diags expected_grad_rhs, # expected gradient of output w.r.t. rhs diags_format="compact", transpose_rhs=False, conjugate_rhs=False, feed_dict=None): expected_grad_diags = _tfconst(expected_grad_diags) expected_grad_rhs = _tfconst(expected_grad_rhs) with backprop.GradientTape() as tape_diags: with backprop.GradientTape() as tape_rhs: tape_diags.watch(diags) tape_rhs.watch(rhs) if test_util.is_xla_enabled(): # Pivoting is not supported by xla backends. return x = linalg_impl.tridiagonal_solve( diags, rhs, diagonals_format=diags_format, transpose_rhs=transpose_rhs, conjugate_rhs=conjugate_rhs) res = math_ops.reduce_sum(x * y) with self.cached_session() as sess: actual_grad_diags = sess.run( tape_diags.gradient(res, diags), feed_dict=feed_dict) actual_rhs_diags = sess.run( tape_rhs.gradient(res, rhs), feed_dict=feed_dict) self.assertAllClose(expected_grad_diags, actual_grad_diags) self.assertAllClose(expected_grad_rhs, actual_rhs_diags) def _gradientTestWithLists(self, diags, rhs, y, expected_grad_diags, expected_grad_rhs, diags_format="compact", transpose_rhs=False, conjugate_rhs=False): self._gradientTest( _tfconst(diags), _tfconst(rhs), _tfconst(y), expected_grad_diags, expected_grad_rhs, diags_format, transpose_rhs, conjugate_rhs) def testGradientSimple(self): self._gradientTestWithLists( diags=_sample_diags, rhs=_sample_rhs, y=[1, 3, 2, 4], expected_grad_diags=[[-5, 0, 4, 0], [9, 0, -4, -16], [0, 0, 5, 16]], expected_grad_rhs=[1, 0, -1, 4]) def testGradientWithMultipleRhs(self): self._gradientTestWithLists( diags=_sample_diags, rhs=[[1, 2], [2, 4], [3, 6], [4, 8]], y=[[1, 5], [2, 6], [3, 7], [4, 8]], expected_grad_diags=([[-20, 28, -60, 0], [36, -35, 60, 80], [0, 63, -75, -80]]), expected_grad_rhs=[[0, 2], [1, 3], [1, 7], [0, -10]]) def _makeDataForGradientWithBatching(self): y = np.array([1, 3, 2, 4]) grad_diags = np.array([[-5, 0, 4, 0], [9, 0, -4, -16], [0, 0, 5, 16]]) grad_rhs = np.array([1, 0, -1, 4]) diags_batched = np.array( [[_sample_diags, 2 * _sample_diags, 3 * _sample_diags], [4 * _sample_diags, 5 * _sample_diags, 6 * _sample_diags]]) rhs_batched = np.array([[_sample_rhs, -_sample_rhs, _sample_rhs], [-_sample_rhs, _sample_rhs, -_sample_rhs]]) y_batched = np.array([[y, y, y], [y, y, y]]) expected_grad_diags_batched = np.array( [[grad_diags, -grad_diags / 4, grad_diags / 9], [-grad_diags / 16, grad_diags / 25, -grad_diags / 36]]) expected_grad_rhs_batched = np.array( [[grad_rhs, grad_rhs / 2, grad_rhs / 3], [grad_rhs / 4, grad_rhs / 5, grad_rhs / 6]]) return (y_batched, diags_batched, rhs_batched, expected_grad_diags_batched, expected_grad_rhs_batched) def testGradientWithBatchDims(self): y, diags, rhs, expected_grad_diags, expected_grad_rhs = \ self._makeDataForGradientWithBatching() self._gradientTestWithLists( diags=diags, rhs=rhs, y=y, expected_grad_diags=expected_grad_diags, expected_grad_rhs=expected_grad_rhs) @test_util.run_deprecated_v1 def testGradientWithUnknownShapes(self): def placeholder(rank): return array_ops.placeholder( dtypes.float64, shape=(None for _ in range(rank))) y, diags, rhs, expected_grad_diags, expected_grad_rhs = \ self._makeDataForGradientWithBatching() diags_placeholder = placeholder(rank=4) rhs_placeholder = placeholder(rank=3) y_placeholder = placeholder(rank=3) self._gradientTest( diags=diags_placeholder, rhs=rhs_placeholder, y=y_placeholder, expected_grad_diags=expected_grad_diags, expected_grad_rhs=expected_grad_rhs, feed_dict={ diags_placeholder: diags, rhs_placeholder: rhs, y_placeholder: y }) # Invalid input shapes @flags(FLAG_NO_PARAMETERIZATION) def testInvalidShapesCompactFormat(self): def test_raises(diags_shape, rhs_shape): self._assertRaises(_tf_ones(diags_shape), _tf_ones(rhs_shape), "compact") test_raises((5, 4, 4), (5, 4)) test_raises((5, 3, 4), (4, 5)) test_raises((5, 3, 4), (5)) test_raises((5), (5, 4)) @flags(FLAG_NO_PARAMETERIZATION) def testInvalidShapesSequenceFormat(self): def test_raises(diags_tuple_shapes, rhs_shape): diagonals = tuple(_tf_ones(shape) for shape in diags_tuple_shapes) self._assertRaises(diagonals, _tf_ones(rhs_shape), "sequence") test_raises(((5, 4), (5, 4)), (5, 4)) test_raises(((5, 4), (5, 4), (5, 6)), (5, 4)) test_raises(((5, 3), (5, 4), (5, 6)), (5, 4)) test_raises(((5, 6), (5, 4), (5, 3)), (5, 4)) test_raises(((5, 4), (7, 4), (5, 4)), (5, 4)) test_raises(((5, 4), (7, 4), (5, 4)), (3, 4)) @flags(FLAG_NO_PARAMETERIZATION) def testInvalidShapesMatrixFormat(self): def test_raises(diags_shape, rhs_shape): self._assertRaises(_tf_ones(diags_shape), _tf_ones(rhs_shape), "matrix") test_raises((5, 4, 7), (5, 4)) test_raises((5, 4, 4), (3, 4)) test_raises((5, 4, 4), (5, 3)) # Tests with placeholders def _testWithPlaceholders(self, diags_shape, rhs_shape, diags_feed, rhs_feed, expected, diags_format="compact"): if context.executing_eagerly(): return diags = array_ops.placeholder(dtypes.float64, shape=diags_shape) rhs = array_ops.placeholder(dtypes.float64, shape=rhs_shape) if test_util.is_xla_enabled() and self.pivoting: # Pivoting is not supported by xla backends. return x = linalg_impl.tridiagonal_solve( diags, rhs, diags_format, partial_pivoting=self.pivoting) with self.cached_session() as sess: result = sess.run(x, feed_dict={diags: diags_feed, rhs: rhs_feed}) self.assertAllClose(result, expected) @test_util.run_deprecated_v1 def testCompactFormatAllDimsUnknown(self): self._testWithPlaceholders( diags_shape=[None, None], rhs_shape=[None], diags_feed=_sample_diags, rhs_feed=_sample_rhs, expected=_sample_result) @test_util.run_deprecated_v1 def testCompactFormatUnknownMatrixSize(self): self._testWithPlaceholders( diags_shape=[3, None], rhs_shape=[4], diags_feed=_sample_diags, rhs_feed=_sample_rhs, expected=_sample_result) @test_util.run_deprecated_v1 def testCompactFormatUnknownRhsCount(self): self._testWithPlaceholders( diags_shape=[3, 4], rhs_shape=[4, None], diags_feed=_sample_diags, rhs_feed=np.transpose([_sample_rhs, 2 * _sample_rhs]), expected=np.transpose([_sample_result, 2 * _sample_result])) @test_util.run_deprecated_v1 def testCompactFormatUnknownBatchSize(self): self._testWithPlaceholders( diags_shape=[None, 3, 4], rhs_shape=[None, 4], diags_feed=np.array([_sample_diags, -_sample_diags]), rhs_feed=np.array([_sample_rhs, 2 * _sample_rhs]), expected=np.array([_sample_result, -2 * _sample_result])) @test_util.run_deprecated_v1 def testMatrixFormatWithUnknownDims(self): if context.executing_eagerly(): return def test_with_matrix_shapes(matrix_shape, rhs_shape=None): matrix = np.array([[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]]) rhs = np.array([1, 2, 3, 4]) x = np.array([-9, 5, -4, 4]) self._testWithPlaceholders( diags_shape=matrix_shape, rhs_shape=rhs_shape, diags_feed=matrix, rhs_feed=np.transpose([rhs, 2 * rhs]), expected=np.transpose([x, 2 * x]), diags_format="matrix") test_with_matrix_shapes(matrix_shape=[4, 4], rhs_shape=[None, None]) test_with_matrix_shapes(matrix_shape=[None, 4], rhs_shape=[None, None]) test_with_matrix_shapes(matrix_shape=[4, None], rhs_shape=[None, None]) test_with_matrix_shapes(matrix_shape=[None, None], rhs_shape=[None, None]) test_with_matrix_shapes(matrix_shape=[4, 4]) test_with_matrix_shapes(matrix_shape=[None, 4]) test_with_matrix_shapes(matrix_shape=[4, None]) test_with_matrix_shapes(matrix_shape=[None, None]) test_with_matrix_shapes(matrix_shape=None, rhs_shape=[None, None]) test_with_matrix_shapes(matrix_shape=None) @test_util.run_deprecated_v1 def testSequenceFormatWithUnknownDims(self): if context.executing_eagerly(): return if test_util.is_xla_enabled() and self.pivoting: # Pivoting is not supported by xla backends. return superdiag = array_ops.placeholder(dtypes.float64, shape=[None]) diag = array_ops.placeholder(dtypes.float64, shape=[None]) subdiag = array_ops.placeholder(dtypes.float64, shape=[None]) rhs = array_ops.placeholder(dtypes.float64, shape=[None]) x = linalg_impl.tridiagonal_solve((superdiag, diag, subdiag), rhs, diagonals_format="sequence", partial_pivoting=self.pivoting) with self.cached_session() as sess: result = sess.run( x, feed_dict={ subdiag: [20, 1, -1, 1], diag: [1, 3, 2, 2], superdiag: [2, 1, 4, 20], rhs: [1, 2, 3, 4] }) self.assertAllClose(result, [-9, 5, -4, 4]) # Benchmark class TridiagonalSolveBenchmark(test.Benchmark): sizes = [(100000, 1, 1), (1000000, 1, 1), (10000000, 1, 1), (100000, 10, 1), (100000, 100, 1), (10000, 1, 10), (10000, 1, 100)] pivoting_options = [(True, "pivoting"), (False, "no_pivoting")] def _generateData(self, matrix_size, batch_size, num_rhs, seed=42): np.random.seed(seed) data = np.random.normal(size=(batch_size, matrix_size, 3 + num_rhs)) diags = np.stack([data[:, :, 0], data[:, :, 1], data[:, :, 2]], axis=-2) rhs = data[:, :, 3:] return (variables.Variable(diags, dtype=dtypes.float64), variables.Variable(rhs, dtype=dtypes.float64)) def _generateMatrixData(self, matrix_size, batch_size, num_rhs, seed=42): np.random.seed(seed) import scipy.sparse as sparse # pylint:disable=g-import-not-at-top # By being strictly diagonally dominant, we guarantee invertibility.d diag = 2 * np.abs(np.random.randn(matrix_size)) + 4.1 subdiag = 2 * np.abs(np.random.randn(matrix_size - 1)) superdiag = 2 * np.abs(np.random.randn(matrix_size - 1)) matrix = sparse.diags([superdiag, diag, subdiag], [1, 0, -1]).toarray() vector = np.random.randn(batch_size, matrix_size, num_rhs) return (variables.Variable(np.tile(matrix, (batch_size, 1, 1))), variables.Variable(vector)) def _benchmark(self, generate_data_fn, test_name_format_string): devices = [("/cpu:0", "cpu")] if test.is_gpu_available(cuda_only=True): devices += [("/gpu:0", "gpu")] for device_option, pivoting_option, size_option in \ itertools.product(devices, self.pivoting_options, self.sizes): device_id, device_name = device_option pivoting, pivoting_name = pivoting_option matrix_size, batch_size, num_rhs = size_option with ops.Graph().as_default(), \ session.Session(config=benchmark.benchmark_config()) as sess, \ ops.device(device_id): diags, rhs = generate_data_fn(matrix_size, batch_size, num_rhs) # Pivoting is not supported by XLA backends. if test.is_xla_enabled() and pivoting: return x = linalg_impl.tridiagonal_solve( diags, rhs, partial_pivoting=pivoting) self.evaluate(variables.global_variables_initializer()) self.run_op_benchmark( sess, control_flow_ops.group(x), min_iters=10, store_memory_usage=False, name=test_name_format_string.format(device_name, matrix_size, batch_size, num_rhs, pivoting_name)) def benchmarkTridiagonalSolveOp_WithMatrixInput(self): self._benchmark( self._generateMatrixData, test_name_format_string=( "tridiagonal_solve_matrix_format_{}_matrix_size_{}_" "batch_size_{}_num_rhs_{}_{}")) def benchmarkTridiagonalSolveOp(self): self._benchmark( self._generateMatrixData, test_name_format_string=("tridiagonal_solve_{}_matrix_size_{}_" "batch_size_{}_num_rhs_{}_{}")) if __name__ == "__main__": for name, fun in dict(TridiagonalSolveOpTest.__dict__).items(): if not name.startswith("test"): continue if hasattr(fun, FLAG_NO_PARAMETERIZATION): continue # Replace testFoo with testFoo_pivoting and testFoo_noPivoting, setting # self.pivoting to corresponding value. delattr(TridiagonalSolveOpTest, name) def decor(test_fun, pivoting): def wrapped(instance): instance.pivoting = pivoting test_fun(instance) return wrapped setattr(TridiagonalSolveOpTest, name + "_pivoting", decor(fun, pivoting=True)) if not hasattr(fun, FLAG_REQUIRES_PIVOTING): setattr(TridiagonalSolveOpTest, name + "_noPivoting", decor(fun, pivoting=False)) test.main()
TridiagonalSolveOpTest
python
getsentry__sentry
src/sentry/types/region.py
{ "start": 4042, "end": 12950 }
class ____: """A set of regions in a Sentry environment. This is a singleton class. It is immutable in a production environment, but affords overrides by the subclass TestEnvRegionDirectory. """ def __init__(self, regions: Collection[Region]) -> None: self._regions = frozenset(regions) self._by_name = {r.name: r for r in self._regions} @property def regions(self) -> frozenset[Region]: return self._regions def get_by_name(self, region_name: str) -> Region | None: return self._by_name.get(region_name) def get_regions(self, category: RegionCategory | None = None) -> Iterable[Region]: return (r for r in self.regions if (category is None or r.category == category)) def get_region_names(self, category: RegionCategory | None = None) -> Iterable[str]: return (r.name for r in self.regions if (category is None or r.category == category)) def validate_all(self) -> None: for region in self.regions: region.validate() def _parse_raw_config(region_config: list[RegionConfig]) -> Iterable[Region]: for config_value in region_config: yield Region( name=config_value["name"], snowflake_id=config_value["snowflake_id"], category=RegionCategory(config_value["category"]), address=config_value["address"], visible=config_value.get("visible", True), ) def _generate_monolith_region_if_needed(regions: Collection[Region]) -> Iterable[Region]: """Check whether a default monolith region must be generated. Check the provided set of regions to see whether a region with the configured name is present. If so, return an empty iterator. Else, yield the newly generated region. """ if not settings.SENTRY_MONOLITH_REGION: raise RegionConfigurationError( "`SENTRY_MONOLITH_REGION` must provide a default region name" ) if not regions: yield Region( name=settings.SENTRY_MONOLITH_REGION, snowflake_id=0, address=options.get("system.url-prefix"), category=RegionCategory.MULTI_TENANT, ) elif not any(r.name == settings.SENTRY_MONOLITH_REGION for r in regions): raise RegionConfigurationError( "The SENTRY_MONOLITH_REGION setting must point to a region name " f"({settings.SENTRY_MONOLITH_REGION=!r}; " f"region names = {[r.name for r in regions]!r})" ) def load_from_config(region_config: list[RegionConfig]) -> RegionDirectory: try: regions = set(_parse_raw_config(region_config)) regions |= set(_generate_monolith_region_if_needed(regions)) return RegionDirectory(regions) except RegionConfigurationError as e: sentry_sdk.capture_exception(e) raise except Exception as e: sentry_sdk.capture_exception(e) raise RegionConfigurationError("Unable to parse region_config.") from e _global_regions: RegionDirectory | None = None def set_global_directory(directory: RegionDirectory) -> None: if not in_test_environment(): raise Exception( "The region directory can be set directly only in a test environment. " "Otherwise, it should be automatically loaded from config when " "get_global_directory is first called." ) global _global_regions _global_regions = directory def get_global_directory() -> RegionDirectory: global _global_regions if _global_regions is not None: return _global_regions from django.conf import settings # For now, assume that all region configs can be taken in through Django # settings. We may investigate other ways of delivering those configs in # production. _global_regions = load_from_config(settings.SENTRY_REGION_CONFIG) return _global_regions def get_region_by_name(name: str) -> Region: """Look up a region by name.""" global_regions = get_global_directory() region = global_regions.get_by_name(name) if region is not None: return region else: region_names = list(global_regions.get_region_names(RegionCategory.MULTI_TENANT)) raise RegionResolutionError( f"No region with name: {name!r} " f"(expected one of {region_names!r} or a single-tenant name)" ) def is_region_name(name: str) -> bool: return get_global_directory().get_by_name(name) is not None def subdomain_is_region(request: HttpRequest) -> bool: subdomain = getattr(request, "subdomain", None) if subdomain is None: return False return is_region_name(subdomain) @control_silo_function def get_region_for_organization(organization_id_or_slug: str) -> Region: """Resolve an organization to the region where its data is stored.""" from sentry.models.organizationmapping import OrganizationMapping if organization_id_or_slug.isdecimal(): mapping = OrganizationMapping.objects.filter( organization_id=organization_id_or_slug ).first() else: mapping = OrganizationMapping.objects.filter(slug=organization_id_or_slug).first() if not mapping: raise RegionResolutionError( f"Organization {organization_id_or_slug} has no associated mapping." ) return get_region_by_name(name=mapping.region_name) def get_local_region() -> Region: """Get the region in which this server instance is running. Return the monolith region if this server instance is in monolith mode. Otherwise, it must be a region silo; raise RegionContextError otherwise. """ if SiloMode.get_current_mode() == SiloMode.MONOLITH: return get_region_by_name(settings.SENTRY_MONOLITH_REGION) if SiloMode.get_current_mode() != SiloMode.REGION: raise RegionContextError("Not a region silo") # In our threaded acceptance tests, we need to override the region of the current # context when passing through test rpc calls, but we can't rely on settings because # django settings are not thread safe :'( # We use this thread local instead which is managed by the SiloMode context managers single_process_region = SingleProcessSiloModeState.get_region() if single_process_region is not None: return single_process_region if not settings.SENTRY_REGION: if in_test_environment(): return get_region_by_name(settings.SENTRY_MONOLITH_REGION) else: raise Exception("SENTRY_REGION must be set when server is in REGION silo mode") return get_region_by_name(settings.SENTRY_REGION) @control_silo_function def _find_orgs_for_user(user_id: int) -> set[int]: from sentry.models.organizationmembermapping import OrganizationMemberMapping return { m["organization_id"] for m in OrganizationMemberMapping.objects.filter(user_id=user_id).values("organization_id") } @control_silo_function def find_regions_for_orgs(org_ids: Iterable[int]) -> set[str]: from sentry.models.organizationmapping import OrganizationMapping if SiloMode.get_current_mode() == SiloMode.MONOLITH: return {settings.SENTRY_MONOLITH_REGION} else: return set( OrganizationMapping.objects.filter(organization_id__in=org_ids).values_list( "region_name", flat=True ) ) @control_silo_function def find_regions_for_user(user_id: int) -> set[str]: if SiloMode.get_current_mode() == SiloMode.MONOLITH: return {settings.SENTRY_MONOLITH_REGION} org_ids = _find_orgs_for_user(user_id) return find_regions_for_orgs(org_ids) @control_silo_function def find_regions_for_sentry_app(sentry_app: SentryApp) -> set[str]: from sentry.models.organizationmapping import OrganizationMapping from sentry.sentry_apps.models.sentry_app_installation import SentryAppInstallation if SiloMode.get_current_mode() == SiloMode.MONOLITH: return {settings.SENTRY_MONOLITH_REGION} organizations_with_installations = SentryAppInstallation.objects.filter( sentry_app=sentry_app ).values_list("organization_id") regions = ( OrganizationMapping.objects.filter(organization_id__in=organizations_with_installations) .distinct("region_name") .values_list("region_name") ) return {r[0] for r in regions} def find_all_region_names() -> Iterable[str]: return get_global_directory().get_region_names() def find_all_multitenant_region_names() -> list[str]: """ Return all visible multi_tenant regions. """ regions = get_global_directory().get_regions(RegionCategory.MULTI_TENANT) return list([r.name for r in regions if r.visible]) def find_all_region_addresses() -> Iterable[str]: return (r.address for r in get_global_directory().regions)
RegionDirectory
python
great-expectations__great_expectations
contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_present_in_other_table.py
{ "start": 719, "end": 9999 }
class ____(QueryExpectation): """Expect the values in a column to be present in another table. This is an Expectation that allows for the validation of referential integrity, that a foreign key exists in another table. In the following example, order table has a foreign key to customer table, and referential integrity is preserved, because all the values of CUSTOMER_ID in order_table_1 are present in the CUSTOMER_ID column of customer_table. "order_table_1": { "ORDER_ID": ["aaa", "bbb", "ccc"], "CUSTOMER_ID": [1, 1, 3], } "customer_table": { "CUSTOMER_ID": [1, 2, 3], } However, in the second example, referential integrity is not preserved, because there are two values (4 and 5) in the CUSTOMER_ID column of order_table_2 that are not present in the CUSTOMER_ID column of customer_table. "order_table_2": { "ORDER_ID": ["ddd", "eee", "fff"], "CUSTOMER_ID": [1, 4, 5], } "customer_table": { "CUSTOMER_ID": [1, 2, 3], } ExpectColumnValuesToBePresentInAnotherTable will PASS for example 1 and FAIL for example 2. Args: foreign_key_column: foreign key column of current table that we want to validate. foreign_table: foreign table name. foreign_table_key_column: key column in foreign table. """ metric_dependencies = ("query.template_values",) foreign_key_column: str foreign_table: str foreign_table_key_column: str template_dict: dict = {} query: str = """ SELECT a.{foreign_key_column} FROM {batch} a LEFT JOIN {foreign_table} b ON a.{foreign_key_column} = b.{foreign_table_key_column} WHERE b.{foreign_table_key_column} IS NULL """ library_metadata: ClassVar[dict] = { "maturity": "experimental", "tags": ["table expectation", "multi-table expectation", "query-based"], "contributors": [ "@great_expectations", ], "requirements": [ "sqlalchemy", "snowflake-sqlalchemy", "snowflake-connector-python", ], "has_full_test_suite": False, "manually_reviewed_code": True, } success_keys: ClassVar[Tuple[str, ...]] = ( "template_dict", "query", ) domain_keys: ClassVar[Tuple[str, ...]] = ( "query", "batch_id", "row_condition", "condition_parser", ) examples: ClassVar[List[dict]] = [ { "data": [ { "dataset_name": "order_table_1", "data": { "ORDER_ID": ["aaa", "bbb", "ccc"], "CUSTOMER_ID": [1, 1, 3], }, }, { "dataset_name": "customer_table", "data": { "CUSTOMER_ID": [1, 2, 3], }, }, ], "only_for": ["snowflake", "sqlite"], "tests": [ { "title": "basic_positive_test", "exact_match_out": False, "include_in_gallery": True, "in": { "foreign_key_column": "CUSTOMER_ID", "foreign_table": "customer_table", "foreign_table_key_column": "CUSTOMER_ID", }, "out": { "success": True, "result": { "observed_value": "0 missing values.", "unexpected_index_list": [], }, }, }, ], }, { "data": [ { "dataset_name": "order_table_2", "data": { "ORDER_ID": ["aaa", "bbb", "ccc"], "CUSTOMER_ID": [1, 5, 6], }, }, { "dataset_name": "customer_table", "data": { "CUSTOMER_ID": [1, 2, 3], }, }, ], "only_for": ["snowflake", "sqlite"], "tests": [ { "title": "basic_negative_test", "exact_match_out": False, "include_in_gallery": True, "in": { "foreign_key_column": "CUSTOMER_ID", "foreign_table": "customer_table", "foreign_table_key_column": "CUSTOMER_ID", }, "out": { "success": False, "result": { "observed_value": "2 missing values.", "unexpected_count": 2, "unexpected_index_list": [ {"customer_id": "5"}, {"customer_id": "6"}, ], }, }, }, ], }, ] @root_validator def _validate_template_dict(cls, values): template_dict: dict = { "foreign_key_column": values.get("foreign_key_column"), "foreign_table": values.get("foreign_table"), "foreign_table_key_column": values.get("foreign_table_key_column"), } values["template_dict"] = template_dict return values @classmethod @override @renderer(renderer_type="renderer.prescriptive") def _prescriptive_renderer( cls, configuration: Optional[ExpectationConfiguration] = None, result: Optional[ExpectationValidationResult] = None, runtime_configuration: Optional[dict] = None, ) -> List[RenderedStringTemplateContent]: runtime_configuration = runtime_configuration or {} styling = runtime_configuration.get("styling") foreign_key_column: str = configuration.kwargs.get("foreign_key_column") foreign_table: str = configuration.kwargs.get("foreign_table") foreign_table_key_column: str = configuration.kwargs.get("foreign_table_key_column") template_str = "All values in column $foreign_key_column are present in column $foreign_table_key_column of table $foreign_table." params = { "foreign_key_column": foreign_key_column, "foreign_table": foreign_table, "foreign_table_key_column": foreign_table_key_column, } return [ RenderedStringTemplateContent( content_block_type="string_template", string_template={ "template": template_str, "params": params, "styling": styling, }, ) ] @classmethod @override @renderer(renderer_type="renderer.diagnostic.unexpected_table") def _diagnostic_unexpected_table_renderer( cls, configuration: ExpectationConfiguration = None, result: ExpectationValidationResult = None, runtime_configuration: Optional[dict] = None, **kwargs, ): if result is None: return None result_dict: Optional[dict] = result.result if result_dict is None: return None unexpected_index_list: Optional[List[dict]] = result_dict.get("unexpected_index_list") # Don't render table if we don't have unexpected_values if not unexpected_index_list: return None unexpected_index_df: pd.DataFrame = pd.DataFrame(unexpected_index_list, dtype="string") # extract column name from unexpected values column_name: str = list(unexpected_index_list[0].keys())[0].upper() header_row = [f"Missing Values for {column_name} Column"] row_list = [] for index, row in unexpected_index_df.iterrows(): unexpected_value = row row_list.append(unexpected_value) unexpected_table_content_block = RenderedTableContent( content_block_type="table", table=row_list, # type: ignore[arg-type] header_row=header_row, # type: ignore[arg-type] styling={"body": {"classes": ["table-bordered", "table-sm", "mt-3"]}}, ) return [unexpected_table_content_block] def _validate( self, metrics: dict, runtime_configuration: Optional[dict] = None, execution_engine: Optional[ExecutionEngine] = None, ) -> Union[ExpectationValidationResult, dict]: unexpected_values = metrics.get("query.template_values") final_value = len(unexpected_values) return ExpectationValidationResult( success=(final_value == 0), result={ "observed_value": f"{final_value} missing value{'s' if final_value != 1 else ''}.", "unexpected_index_list": unexpected_values, }, )
ExpectColumnValuesToBePresentInOtherTable
python
sympy__sympy
sympy/stats/crv.py
{ "start": 1554, "end": 2210 }
class ____(ContinuousDomain, SingleDomain): """ A univariate domain with continuous support Represented using a single symbol and interval. """ def compute_expectation(self, expr, variables=None, **kwargs): if variables is None: variables = self.symbols if not variables: return expr if frozenset(variables) != frozenset(self.symbols): raise ValueError("Values should be equal") # assumes only intervals return Integral(expr, (self.symbol, self.set), **kwargs) def as_boolean(self): return self.set.as_relational(self.symbol)
SingleContinuousDomain
python
readthedocs__readthedocs.org
readthedocs/analytics/apps.py
{ "start": 84, "end": 280 }
class ____(AppConfig): """Analytics app init code.""" default_auto_field = "django.db.models.BigAutoField" name = "readthedocs.analytics" verbose_name = "Analytics"
AnalyticsAppConfig
python
huggingface__transformers
src/transformers/models/deberta_v2/modeling_deberta_v2.py
{ "start": 18870, "end": 20465 }
class ____(nn.Module): def __init__(self, config): super().__init__() kernel_size = getattr(config, "conv_kernel_size", 3) groups = getattr(config, "conv_groups", 1) self.conv_act = getattr(config, "conv_act", "tanh") self.conv = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size, padding=(kernel_size - 1) // 2, groups=groups ) self.LayerNorm = LayerNorm(config.hidden_size, config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def forward(self, hidden_states, residual_states, input_mask): out = self.conv(hidden_states.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous() rmask = (1 - input_mask).bool() out.masked_fill_(rmask.unsqueeze(-1).expand(out.size()), 0) out = ACT2FN[self.conv_act](self.dropout(out)) layer_norm_input = residual_states + out output = self.LayerNorm(layer_norm_input).to(layer_norm_input) if input_mask is None: output_states = output else: if input_mask.dim() != layer_norm_input.dim(): if input_mask.dim() == 4: input_mask = input_mask.squeeze(1).squeeze(1) input_mask = input_mask.unsqueeze(2) input_mask = input_mask.to(output.dtype) output_states = output * input_mask return output_states # Copied from transformers.models.deberta.modeling_deberta.DebertaEmbeddings with DebertaLayerNorm->LayerNorm,Deberta->DebertaV2
ConvLayer
python
keras-team__keras
keras/src/layers/regularization/gaussian_dropout.py
{ "start": 192, "end": 2072 }
class ____(layers.Layer): """Apply multiplicative 1-centered Gaussian noise. As it is a regularization layer, it is only active at training time. Args: rate: Float, drop probability (as with `Dropout`). The multiplicative noise will have standard deviation `sqrt(rate / (1 - rate))`. seed: Integer, optional random seed to enable deterministic behavior. Call arguments: inputs: Input tensor (of any rank). training: Python boolean indicating whether the layer should behave in training mode (adding dropout) or in inference mode (doing nothing). """ def __init__(self, rate, seed=None, **kwargs): super().__init__(**kwargs) if not 0 <= rate <= 1: raise ValueError( f"Invalid value received for argument " "`rate`. Expected a float value between 0 and 1. " f"Received: rate={rate}" ) self.rate = rate self.seed = seed if rate > 0: self.seed_generator = backend.random.SeedGenerator(seed) self.supports_masking = True self._build_at_init() def call(self, inputs, training=False): if training and self.rate > 0: stddev = math.sqrt(self.rate / (1.0 - self.rate)) return inputs * backend.random.normal( shape=ops.shape(inputs), mean=1.0, stddev=stddev, dtype=self.compute_dtype, seed=self.seed_generator, ) return inputs def compute_output_shape(self, input_shape): return input_shape def get_config(self): base_config = super().get_config() config = { "rate": self.rate, "seed": self.seed, } return {**base_config, **config}
GaussianDropout
python
sympy__sympy
sympy/stats/matrix_distributions.py
{ "start": 4196, "end": 5088 }
class ____: """Returns the sample from numpy of the given distribution""" ### TODO: Add tests after adding matrix distributions in numpy_rv_map def __new__(cls, dist, size, seed=None): return cls._sample_numpy(dist, size, seed) @classmethod def _sample_numpy(cls, dist, size, seed): """Sample from NumPy.""" numpy_rv_map = { } sample_shape = { } dist_list = numpy_rv_map.keys() if dist.__class__.__name__ not in dist_list: return None import numpy if seed is None or isinstance(seed, int): rand_state = numpy.random.default_rng(seed=seed) else: rand_state = seed samp = numpy_rv_map[dist.__class__.__name__](dist, prod(size), rand_state) return samp.reshape(size + sample_shape[dist.__class__.__name__](dist))
SampleMatrixNumpy
python
google__jax
jax/_src/api.py
{ "start": 109631, "end": 142062 }
class ____: fun: Callable in_tree: PyTreeDef out_tree: PyTreeDef args_res: list[Any] opaque_residuals: list[Any] jaxpr = property(lambda self: self.fun.args[2]) # type: ignore def __call__(self, out_ct, *extra_args): if extra_args: name, *_ = self.jaxpr.debug_info.func_src_info.split(' ') raise TypeError(_vjp_too_many_args(name, len(extra_args))) return self.fun(self.in_tree, self.out_tree, self.args_res, self.opaque_residuals)(out_ct) def with_refs(self, *maybe_ct_refs): return self.fun(self.in_tree, self.out_tree, self.args_res, self.opaque_residuals, *maybe_ct_refs) # Only safe to put these in cache keys if residuals aren't mutated. Beware! __hash__ = object.__hash__ __eq__ = object.__eq__ register_pytree_node( VJP, lambda vjp: ((vjp.args_res, vjp.opaque_residuals), (vjp.fun, vjp.in_tree, vjp.out_tree)), lambda meta, args_res: VJP(*meta, *args_res)) @partial(api_boundary, repro_api_name="jax.linear_transpose") def linear_transpose(fun: Callable, *primals, reduce_axes=()) -> Callable: """Transpose a function that is promised to be linear. For linear functions, this transformation is equivalent to :py:func:`vjp`, but avoids the overhead of computing the forward pass. The outputs of the transposed function will always have the exact same dtypes as ``primals``, even if some values are truncated (e.g., from complex to float, or from float64 to float32). To avoid truncation, use dtypes in ``primals`` that match the full range of desired outputs from the transposed function. Integer dtypes are not supported. Args: fun: the linear function to be transposed. *primals: a positional argument tuple of arrays, scalars, or (nested) standard Python containers (tuples, lists, dicts, namedtuples, i.e., pytrees) of those types used for evaluating the shape/dtype of ``fun(*primals)``. These arguments may be real scalars/ndarrays, but that is not required: only the ``shape`` and ``dtype`` attributes are accessed. See below for an example. (Note that the duck-typed objects cannot be namedtuples because those are treated as standard Python containers.) Returns: A callable that calculates the transpose of ``fun``. Valid input into this function must have the same shape/dtypes/structure as the result of ``fun(*primals)``. Output will be a tuple, with the same shape/dtypes/structure as ``primals``. >>> import jax >>> >>> f = lambda x, y: 0.5 * x - 0.5 * y >>> scalar = jax.ShapeDtypeStruct(shape=(), dtype=np.dtype(np.float32)) >>> f_transpose = jax.linear_transpose(f, scalar, scalar) >>> f_transpose(1.0) (Array(0.5, dtype=float32), Array(-0.5, dtype=float32)) """ if reduce_axes: raise NotImplementedError("reduce_axes argument to transpose is deprecated") del reduce_axes primals_flat, in_tree = tree_flatten(primals) flat_fun, out_tree = flatten_fun_nokwargs( lu.wrap_init(fun, debug_info=debug_info("linear_transpose", fun, primals, {})), in_tree) in_avals = map(shaped_abstractify, primals_flat) in_dtypes = map(lambda a: a.dtype, in_avals) in_pvals = map(pe.PartialVal.unknown, in_avals) jaxpr, out_pvals, const = pe.trace_to_jaxpr_nounits(flat_fun, in_pvals, instantiate=True) jaxpr, _ = pe.dce_jaxpr(jaxpr, [True] * len(jaxpr.outvars), True) out_avals, _ = unzip2(out_pvals) out_dtypes = map(lambda a: a.dtype, out_avals) if not (all(dtypes.issubdtype(d, np.inexact) for d in in_dtypes + out_dtypes) or all(dtypes.issubdtype(d, np.integer) for d in in_dtypes + out_dtypes)): raise TypeError("linear_transpose only supports [float or complex] -> " "[float or complex], and integer -> integer functions, " f"but got {in_dtypes} -> {out_dtypes}.") @api_boundary def transposed_fun(const, out_cotangent): out_cts, out_tree2 = tree_flatten(out_cotangent) if out_tree() != out_tree2: raise TypeError("cotangent tree does not match function output, " f"expected {out_tree()} but got {out_tree2}") if not all(map(core.typecheck, out_avals, out_cts)): raise TypeError("cotangent type does not match function output, " f"expected {out_avals} but got {out_cts}") dummies = [ad.UndefinedPrimal(a) for a in in_avals] in_cts = ad.backward_pass(jaxpr, True, const, dummies, out_cts) in_cts = map(ad.instantiate_zeros, in_cts) return tree_unflatten(in_tree, in_cts) # Ensure that transposed_fun is a PyTree return Partial(transposed_fun, const) def _flat_axes_specs(abstracted_axes, *args, **kwargs ) -> list[pe.AbstractedAxesSpec]: if kwargs: raise NotImplementedError def ax_leaf(l): return (isinstance(l, dict) and all_leaves(l.values()) or isinstance(l, tuple) and all_leaves(l, lambda x: x is None)) return broadcast_prefix(abstracted_axes, args, ax_leaf) @overload def make_jaxpr( fun: Callable, static_argnums: int | Iterable[int] = (), axis_env: Sequence[tuple[AxisName, int]] | None = None, return_shape: Literal[False] = ..., abstracted_axes: Any | None = None, ) -> Callable[..., core.ClosedJaxpr]: ... @overload def make_jaxpr( fun: Callable, static_argnums: int | Iterable[int] = (), axis_env: Sequence[tuple[AxisName, int]] | None = None, return_shape: Literal[True] = ..., abstracted_axes: Any | None = None, ) -> Callable[..., tuple[core.ClosedJaxpr, Any]]: ... @partial(api_boundary, repro_api_name="jax.make_japr") def make_jaxpr( fun: Callable, static_argnums: int | Iterable[int] = (), axis_env: Sequence[tuple[AxisName, int]] | None = None, return_shape: bool = False, abstracted_axes: Any | None = None, ) -> Callable[..., core.ClosedJaxpr | tuple[core.ClosedJaxpr, Any]]: """Create a function that returns the jaxpr of ``fun`` given example args. Args: fun: The function whose ``jaxpr`` is to be computed. Its positional arguments and return value should be arrays, scalars, or standard Python containers (tuple/list/dict) thereof. static_argnums: See the :py:func:`jax.jit` docstring. axis_env: Optional, a sequence of pairs where the first element is an axis name and the second element is a positive integer representing the size of the mapped axis with that name. This parameter is useful when lowering functions that involve parallel communication collectives, and it specifies the axis name/size environment that would be set up by applications of :py:func:`jax.pmap`. return_shape: Optional boolean, defaults to ``False``. If ``True``, the wrapped function returns a pair where the first element is the ``ClosedJaxpr`` representation of ``fun`` and the second element is a pytree with the same structure as the output of ``fun`` and where the leaves are objects with ``shape`` and ``dtype`` attributes representing the corresponding types of the output leaves. Returns: A wrapped version of ``fun`` that when applied to example arguments returns a ``ClosedJaxpr`` representation of ``fun`` on those arguments. If the argument ``return_shape`` is ``True``, then the returned function instead returns a pair where the first element is the ``ClosedJaxpr`` representation of ``fun`` and the second element is a pytree representing the structure, shape, dtypes, and named shapes of the output of ``fun``. A ``jaxpr`` is JAX's intermediate representation for program traces. The ``jaxpr`` language is based on the simply-typed first-order lambda calculus with let-bindings. :py:func:`make_jaxpr` adapts a function to return its ``jaxpr``, which we can inspect to understand what JAX is doing internally. The ``jaxpr`` returned is a trace of ``fun`` abstracted to :py:class:`ShapedArray` level. Other levels of abstraction exist internally. We do not describe the semantics of the ``jaxpr`` language in detail here, but instead give a few examples. >>> import jax >>> >>> def f(x): return jax.numpy.sin(jax.numpy.cos(x)) >>> print(f(3.0)) -0.83602 >>> jax.make_jaxpr(f)(3.0) { lambda ; a:f32[]. let b:f32[] = cos a; c:f32[] = sin b in (c,) } >>> jax.make_jaxpr(jax.grad(f))(3.0) { lambda ; a:f32[]. let b:f32[] = cos a c:f32[] = sin a _:f32[] = sin b d:f32[] = cos b e:f32[] = mul 1.0:f32[] d f:f32[] = neg e g:f32[] = mul f c in (g,) } """ try: hash(fun) weakref.ref(fun) except TypeError: fun = partial(fun) @wraps(fun) @api_boundary def make_jaxpr_f(*args, **kwargs): with core.extend_axis_env_nd(axis_env or []): traced = jit(fun, static_argnums=static_argnums, abstracted_axes=abstracted_axes).trace(*args, **kwargs) # `jit` converts tracers in consts to args but `make_jaxpr` callers expect # consts not to be converted. num_consts = traced._num_consts if num_consts: jaxpr_ = pe.convert_invars_to_constvars(traced.jaxpr.jaxpr, num_consts) jaxpr = core.ClosedJaxpr(jaxpr_, traced._consts) else: jaxpr = traced.jaxpr if return_shape: out = [ShapeDtypeStruct(o.shape, o.dtype) for o in jaxpr.out_avals] return jaxpr, tree_unflatten(tree_structure(traced.out_info), out) return jaxpr make_jaxpr_f.__module__ = "jax" if hasattr(fun, "__qualname__"): make_jaxpr_f.__qualname__ = f"make_jaxpr({fun.__qualname__})" if hasattr(fun, "__name__"): make_jaxpr_f.__name__ = f"make_jaxpr({fun.__name__})" return make_jaxpr_f def _infer_src_sharding(src, x) -> Sharding | None: if src is not None: return src # pytype: disable=bad-return-type if isinstance(x, array.ArrayImpl): return x.sharding if isinstance(x, core.Tracer): val = x.to_concrete_value() if val is not None and isinstance(val, array.ArrayImpl): return val.sharding return None @util.cache(max_size=2048, trace_context_in_key=False) def _check_string_compatible_sharding(s): """Checks if target devices are compatible with string arrays.""" if isinstance(s, xc.Device) and s.device_kind == "cpu": return if (isinstance(s, Sharding) and s._internal_device_list[0].device_kind == "cpu"): return raise TypeError( "String arrays can only be sharded to CPU devices. Received" f" unsupported device or sharding: {s}") @util.cache(max_size=2048, trace_context_in_key=False) def _check_sharding(aval, s): if (s is not None and not isinstance(s, (xc.Device, Sharding, Format, core.MemorySpace))): raise ValueError( "`jax.device_put` only accepts `None`, `jax.sharding.Sharding`," " `jax.Device`, `Format`, `jax.memory.Space` or a pytree of these" f" values. Received invalid value: {s}") if isinstance(aval, core.ShapedArray) and dtypes.is_string_dtype(aval.dtype): _check_string_compatible_sharding(s) if isinstance(s, Sharding): if isinstance(aval, core.AbstractToken): aval = core.get_token_aval() if not isinstance(s, PmapSharding): pjit.pjit_check_aval_sharding( (s,), (aval,), ("",), "device_put args", allow_uneven_sharding=False) s.shard_shape(aval.shape) # should raise an Error if incompatible def pspec_to_sharding(name, val): if isinstance(val, P): mesh = get_concrete_mesh() if mesh.empty: raise ValueError( "Please set a mesh via `jax.set_mesh` if a PartitionSpec is" f" passed to {name}") return NamedSharding(mesh, val) return val def device_put( x, device: None | xc.Device | Sharding | P | Format | Any = None, *, src: None | xc.Device | Sharding | P | Format | Any = None, donate: bool | Any = False, may_alias: bool | None | Any = None): """Transfers ``x`` to ``device``. Args: x: An array, scalar, or (nested) standard Python container thereof. device: The (optional) :py:class:`Device`, :py:class:`Sharding`, or a (nested) :py:class:`Sharding` in standard Python container (must be a tree prefix of ``x``), representing the device(s) to which ``x`` should be transferred. If given, then the result is committed to the device(s). src: The (optional) :py:class:`Device`, :py:class:`Sharding`, or a (nested) :py:class:`Sharding` in standard Python container (must be a tree prefix of ``x``), representing the device(s) on which ``x`` belongs. donate: bool or a (nested) bool in standard Python container (must be a tree prefix of ``x``). If True, ``x`` can be overwritten and marked deleted in the caller. This is best effort. JAX will donate if possible, otherwise it won't. The input buffer (in the future) will always be deleted if donated. may_alias: bool or None or a (nested) bool in standard Python container (must be a tree prefix of ``x``). If False, `x` will be copied. If true, `x` may be aliased depending on the runtime's implementation. Returns: A copy of ``x`` that resides on ``device``. If the ``device`` parameter is ``None``, then this operation behaves like the identity function if the operand is on any device already, otherwise it transfers the data to the default device, uncommitted. For more details on data placement see the :ref:`FAQ on data placement <faq-data-placement>`. This function is always asynchronous, i.e. returns immediately without blocking the calling Python thread until any transfers are completed. """ with config.explicit_device_put_scope(): x_flat, treedef = tree_flatten(x) if (device is None or isinstance(device, (xc.Device, Sharding, core.MemorySpace))): device_flat = [device] * len(x_flat) else: device_flat = flatten_axes("device_put device", treedef, device) if (src is None or isinstance(src, (xc.Device, Sharding, core.MemorySpace))): src_flat = [_infer_src_sharding(src, xf) for xf in x_flat] else: src_flat = flatten_axes("device_put source", treedef, src) src_flat = list(map(_infer_src_sharding, src_flat, x_flat)) device_flat = map(partial(pspec_to_sharding, 'device_put'), device_flat) src_flat = map(partial(pspec_to_sharding, 'device_put'), src_flat) if isinstance(donate, bool): donate_flat = [donate] * len(x_flat) else: donate_flat = flatten_axes("device_put donate", treedef, donate) if isinstance(may_alias, bool): may_alias_flat = [may_alias] * len(x_flat) else: may_alias_flat = flatten_axes("device_put may_alias", treedef, may_alias) copy_semantics = [] for m, d in zip(may_alias_flat, donate_flat): if m and d: raise ValueError('may_alias and donate cannot be True at the same time.') if m is None: m = not d if m and not d: copy_semantics.append(dispatch.ArrayCopySemantics.REUSE_INPUT) elif not m and d: copy_semantics.append(dispatch.ArrayCopySemantics.DONATE_INPUT) else: assert not m and not d copy_semantics.append(dispatch.ArrayCopySemantics.ALWAYS_COPY) dst_avals = [] for xf, d in zip(x_flat, device_flat): aval = shaped_abstractify(xf) aval = dispatch.update_dp_aval(aval, d) dst_avals.append(aval) _check_sharding(aval, d) if core.trace_state_clean(): out_flat = dispatch._batched_device_put_impl( *x_flat, devices=device_flat, srcs=src_flat, # type: ignore copy_semantics=copy_semantics, dst_avals=dst_avals) else: out_flat = dispatch.device_put_p.bind( *x_flat, devices=tuple(device_flat), srcs=tuple(src_flat), copy_semantics=tuple(copy_semantics)) return tree_unflatten(treedef, out_flat) def device_put_sharded(shards: Sequence[Any], devices: Sequence[xc.Device]): # noqa: F811 """Transfer array shards to specified devices and form Array(s). Args: shards: A sequence of arrays, scalars, or (nested) standard Python containers thereof representing the shards to be stacked together to form the output. The length of ``shards`` must equal the length of ``devices``. devices: A sequence of :py:class:`Device` instances representing the devices to which corresponding shards in ``shards`` will be transferred. This function is always asynchronous, i.e. returns immediately. Returns: A Array or (nested) Python container thereof representing the elements of ``shards`` stacked together, with each shard backed by physical device memory specified by the corresponding entry in ``devices``. Examples: Passing a list of arrays for ``shards`` results in a sharded array containing a stacked version of the inputs: >>> import jax >>> devices = jax.local_devices() >>> x = [jax.numpy.ones(5) for device in devices] >>> y = jax.device_put_sharded(x, devices) # doctest: +SKIP >>> np.allclose(y, jax.numpy.stack(x)) # doctest: +SKIP True Passing a list of nested container objects with arrays at the leaves for ``shards`` corresponds to stacking the shards at each leaf. This requires all entries in the list to have the same tree structure: >>> x = [(i, jax.numpy.arange(i, i + 4)) for i in range(len(devices))] >>> y = jax.device_put_sharded(x, devices) # doctest: +SKIP >>> type(y) # doctest: +SKIP <class 'tuple'> >>> y0 = jax.device_put_sharded([a for a, b in x], devices) # doctest: +SKIP >>> y1 = jax.device_put_sharded([b for a, b in x], devices) # doctest: +SKIP >>> np.allclose(y[0], y0) # doctest: +SKIP True >>> np.allclose(y[1], y1) # doctest: +SKIP True See Also: - device_put - device_put_replicated """ # TODO(jakevdp): provide a default for devices that considers both local # devices and pods if not isinstance(shards, Sequence): raise TypeError("device_put_sharded `shards` input must be a sequence; " f"got {type(shards)}") if len(shards) != len(devices): raise ValueError(f"len(shards) = {len(shards)} must equal " f"len(devices) = {len(devices)}.") def _device_put_sharded(*xs): avals = [core.get_aval(x) for x in xs] if not all(a1 == a2 for a1, a2 in zip(avals[:-1], avals[1:])): a1, a2 = next((a1, a2) for a1, a2 in zip(avals[:-1], avals[1:]) if a1 != a2) raise ValueError("the shards passed to device_put_sharded must have " f"consistent shape and dtype, but got {a1} and {a2}.") stacked_aval = avals[0].update(shape=(len(devices),) + avals[0].shape) sharding_spec = sharding_specs.create_pmap_sharding_spec(stacked_aval.shape) sharding = PmapSharding(np.array(devices), sharding_spec) if dtypes.issubdtype(stacked_aval.dtype, dtypes.extended): return stacked_aval.dtype._rules.device_put_sharded(xs, stacked_aval, sharding, devices) if config.pmap_no_rank_reduction.value: ys = [] for x in xs: if not isinstance(x, (np.ndarray, basearray.Array)): x = np.asarray(x) ys.append(x[None]) else: ys = xs return pxla.batched_device_put(stacked_aval, sharding, ys, list(devices)) with config.explicit_device_put_scope(): return tree_map(_device_put_sharded, *shards) def device_put_replicated(x: Any, devices: Sequence[xc.Device]): # noqa: F811 """Transfer array(s) to each specified device and form Array(s). Args: x: an array, scalar, or (nested) standard Python container thereof representing the array to be replicated to form the output. devices: A sequence of :py:class:`Device` instances representing the devices to which ``x`` will be transferred. This function is always asynchronous, i.e. returns immediately. Returns: An Array or (nested) Python container thereof representing the value of ``x`` broadcasted along a new leading axis of size ``len(devices)``, with each slice along that new leading axis backed by memory on the device specified by the corresponding entry in ``devices``. Examples: Passing an array: >>> import jax >>> devices = jax.local_devices() >>> x = jax.numpy.array([1., 2., 3.]) >>> y = jax.device_put_replicated(x, devices) # doctest: +SKIP >>> np.allclose(y, jax.numpy.stack([x for _ in devices])) # doctest: +SKIP True See Also: - device_put - device_put_sharded """ if not isinstance(devices, Sequence) or not devices: raise ValueError("`devices` argument to `device_put_replicated must be " "a non-empty sequence.") def _device_put_replicated(x): aval = core.unmapped_aval(len(devices), 0, core.get_aval(x)) assert isinstance(aval, ShapedArray) sharding_spec = sharding_specs.create_pmap_sharding_spec(aval.shape) if config.pmap_no_rank_reduction.value: if isinstance(x, (np.ndarray, basearray.Array)): buf = device_put(x[None], devices[0]) else: buf = device_put(x, devices[0])[None] else: buf = device_put(x, devices[0]) sharding = PmapSharding(np.array(devices), sharding_spec) if dtypes.issubdtype(aval.dtype, dtypes.extended): return aval.dtype._rules.device_put_replicated(buf, aval, sharding, devices) return pxla.batched_device_put(aval, sharding, [buf] * len(devices), devices) with config.explicit_device_put_scope(): return tree_map(_device_put_replicated, x) # TODO(mattjj): consider revising def _device_get(x): if isinstance(x, core.Tracer): return x # Extended dtypes dispatch via their device_get rule. if isinstance(x, basearray.Array) and dtypes.issubdtype(x.dtype, dtypes.extended): bufs, tree = tree_util.dispatch_registry.flatten(x) return tree.unflatten(device_get(bufs)) # Other types dispatch via their __array__ method. try: toarray = x.__array__ except AttributeError: return x else: return toarray() def device_get(x: Any): """Transfer ``x`` to host. If ``x`` is a pytree, then the individual buffers are copied in parallel. Args: x: An array, scalar, Array or (nested) standard Python container thereof representing the array to be transferred to host. Returns: An array or (nested) Python container thereof representing the value of ``x``. Examples: Passing a Array: >>> import jax >>> x = jax.numpy.array([1., 2., 3.]) >>> jax.device_get(x) array([1., 2., 3.], dtype=float32) Passing a scalar (has no effect): >>> jax.device_get(1) 1 See Also: - device_put - device_put_sharded - device_put_replicated """ with config.explicit_device_get_scope(): for y in tree_leaves(x): try: y.copy_to_host_async() except AttributeError: pass return tree_map(_device_get, x) @partial(api_boundary, repro_api_name="jax.eval_shape") def eval_shape(fun: Callable, *args, **kwargs): """Compute the shape/dtype of ``fun`` without any FLOPs. This utility function is useful for performing shape inference. Its input/output behavior is defined by:: def eval_shape(fun, *args, **kwargs): out = fun(*args, **kwargs) shape_dtype_struct = lambda x: jax.ShapeDtypeStruct(x.shape, x.dtype) return jax.tree_util.tree_map(shape_dtype_struct, out) But instead of applying ``fun`` directly, which might be expensive, it uses JAX's abstract interpretation machinery to evaluate the shapes without doing any FLOPs. Using :py:func:`eval_shape` can also catch shape errors, and will raise same shape errors as evaluating ``fun(*args, **kwargs)``. Args: fun: The function whose output shape should be evaluated. *args: a positional argument tuple of arrays, scalars, or (nested) standard Python containers (tuples, lists, dicts, namedtuples, i.e. pytrees) of those types. Since only the ``shape`` and ``dtype`` attributes are accessed, one can use :class:`jax.ShapeDtypeStruct` or another container that duck-types as ndarrays (note however that duck-typed objects cannot be namedtuples because those are treated as standard Python containers). **kwargs: a keyword argument dict of arrays, scalars, or (nested) standard Python containers (pytrees) of those types. As in ``args``, array values need only be duck-typed to have ``shape`` and ``dtype`` attributes. Returns: out: a nested PyTree containing :class:`jax.ShapeDtypeStruct` objects as leaves. For example: >>> import jax >>> import jax.numpy as jnp >>> >>> f = lambda A, x: jnp.tanh(jnp.dot(A, x)) >>> A = jax.ShapeDtypeStruct((2000, 3000), jnp.float32) >>> x = jax.ShapeDtypeStruct((3000, 1000), jnp.float32) >>> out = jax.eval_shape(f, A, x) # no FLOPs performed >>> print(out.shape) (2000, 1000) >>> print(out.dtype) float32 All arguments passed via :func:`eval_shape` will be treated as dynamic; static arguments can be included via closure, for example using :func:`functools.partial`: >>> import jax >>> from jax import lax >>> from functools import partial >>> import jax.numpy as jnp >>> >>> x = jax.ShapeDtypeStruct((1, 1, 28, 28), jnp.float32) >>> kernel = jax.ShapeDtypeStruct((32, 1, 3, 3), jnp.float32) >>> >>> conv_same = partial(lax.conv_general_dilated, window_strides=(1, 1), padding="SAME") >>> out = jax.eval_shape(conv_same, x, kernel) >>> print(out.shape) (1, 32, 28, 28) >>> print(out.dtype) float32 """ if type(fun) is xc._xla.PjitFunction: return fun.trace(*args, **kwargs).out_info # type: ignore try: hash(fun) except TypeError: fun = partial(fun) return jit(fun).trace(*args, **kwargs).out_info @partial(api_boundary, repro_api_name="jax.named_call") def named_call( fun: F, *, name: str | None = None, ) -> F: """Adds a user specified name to a function when staging out JAX computations. When staging out computations for just-in-time compilation to XLA (or other backends such as TensorFlow) JAX runs your Python program but by default does not preserve any of the function names or other metadata associated with it. This can make debugging the staged out (and/or compiled) representation of your program complicated because there is limited context information for each operation being executed. `named_call` tells JAX to stage the given function out as a subcomputation with a specific name. When the staged out program is compiled with XLA these named subcomputations are preserved and show up in debugging utilities like the TensorFlow Profiler in TensorBoard. Names are also preserved when staging out JAX programs to TensorFlow using :func:`experimental.jax2tf.convert`. Args: fun: Function to be wrapped. This can be any Callable. name: Optional. The prefix to use to name all sub computations created within the name scope. Use the fun.__name__ if not specified. Returns: A version of ``fun`` that is wrapped in a ``named_scope``. """ if name is None: name = fun.__name__ return source_info_util.extend_name_stack(name)(fun) def named_scope( name: str, ) -> source_info_util.ExtendNameStackContextManager: """A context manager that adds a user specified name to the JAX name stack. When staging out computations for just-in-time compilation to XLA (or other backends such as TensorFlow) JAX does not, by default, preserve the names (or other source metadata) of Python functions it encounters. This can make debugging the staged out (and/or compiled) representation of your program complicated because there is limited context information for each operation being executed. ``named_scope`` tells JAX to stage the given function with additional annotations on the underlying operations. JAX internally keeps track of these annotations in a name stack. When the staged out program is compiled with XLA these annotations are preserved and show up in debugging utilities like the TensorFlow Profiler in TensorBoard. Names are also preserved when staging out JAX programs to TensorFlow using :func:`experimental.jax2tf.convert`. Args: name: The prefix to use to name all operations created within the name scope. Yields: Yields ``None``, but enters a context in which `name` will be appended to the active name stack. Examples: ``named_scope`` can be used as a context manager inside compiled functions: >>> import jax >>> >>> @jax.jit ... def layer(w, x): ... with jax.named_scope("dot_product"): ... logits = w.dot(x) ... with jax.named_scope("activation"): ... return jax.nn.relu(logits) It can also be used as a decorator: >>> @jax.jit ... @jax.named_scope("layer") ... def layer(w, x): ... logits = w.dot(x) ... return jax.nn.relu(logits) """ if not isinstance(name, str): raise TypeError("named_scope name argument must be a string.") return source_info_util.extend_name_stack(name) def effects_barrier(): """Waits until existing functions have completed any side-effects.""" dispatch.runtime_tokens.block_until_ready() def block_until_ready(x): """ Tries to call a ``block_until_ready`` method on pytree leaves. Args: x: a pytree, usually with at least some JAX array instances at its leaves. Returns: A pytree with the same structure and values of the input, where the values of all JAX array leaves are ready. """ def try_to_block(x): try: return x.block_until_ready() except AttributeError: return x arrays = [] for leaf in tree_leaves(x): if isinstance(leaf, array.ArrayImpl): arrays.append(leaf) else: try_to_block(leaf) if not arrays: # `arrays` will be empty if tree_leaves(x) is empty or all leaves are not # jax.Array. pass elif len(arrays) == 1: # Fast path for single array. try_to_block(arrays[0]) else: # Optimized for multiple arrays. xc.batched_block_until_ready(arrays) return x def copy_to_host_async(x): """ Tries to call a ``copy_to_host_async`` method on pytree leaves. For each leaf this method will try to call the ``copy_to_host_async`` method on the leaf. If the leaf is not a JAX array, or if the leaf does not have a ``copy_to_host_async`` method, then this method will do nothing to the leaf. Args: x: a pytree, usually with at least some JAX array instances at its leaves. Returns: A pytree with the same structure and values of the input, where the host copy of the values of all JAX array leaves are started. """ for leaf in tree_leaves(x): try: copy_fn = leaf.copy_to_host_async except AttributeError: pass else: copy_fn() return x def clear_backends(): """ Clear all backend clients so that new backend clients can be created later. """ xb._clear_backends() util.clear_all_caches() pjit._cpp_pjit_cache_fun_only.clear() pjit._cpp_pjit_cache_explicit_attributes.clear() xc._xla.PjitFunctionCache.clear_all() @atexit.register def clean_up(): if xb._default_backend is not None: clear_backends() clear_caches() # Shut down distributed system if it exists. Otherwise, this is a no-op. distributed.shutdown() def live_arrays(platform=None): """Return all live arrays in the backend for `platform`. If platform is None, it is the default backend. """ return xb.get_backend(platform).live_arrays() def clear_caches(): """Clear all compilation and staging caches. This doesn't clear the persistent cache; to disable it (e.g. for benchmarks), set the jax_enable_compilation_cache config option to False. """ # Clear all lu.cache, util.cache and util.weakref_lru_cache instances # (used for staging and Python-dispatch compiled executable caches). util.clear_all_caches() # Clear all C++ compiled executable caches for pjit pjit._cpp_pjit_cache_fun_only.clear() pjit._cpp_pjit_cache_explicit_attributes.clear() pjit._infer_params_cached.cache_clear() xc._xla.PjitFunctionCache.clear_all() # Clear all C++ compiled executable caches for pmap for fun in _pmap_cache_clears: fun._cache_clear()
VJP
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/backfill.py
{ "start": 29463, "end": 29722 }
class ____(graphene.Union): class Meta: types = (GraphenePartitionBackfills, GraphenePythonError) name = "PartitionBackfillsOrError" GrapheneBackfillPolicyType = graphene.Enum.from_enum(BackfillPolicyType)
GraphenePartitionBackfillsOrError
python
pytorch__pytorch
torch/_dynamo/source.py
{ "start": 13349, "end": 13788 }
class ____(ChainedSource): def reconstruct(self, codegen: "PyCodegen") -> None: codegen(self.base) codegen.extend_output(codegen.create_load_attrs("__code__")) def guard_source(self) -> GuardSource: return self.base.guard_source() def name(self) -> str: return f"{self.base.name()}.__code__" # Represents obj.__closure__ where object is type object @dataclasses.dataclass(frozen=True)
CodeSource
python
huggingface__transformers
src/transformers/models/bloom/modeling_bloom.py
{ "start": 37979, "end": 43425 }
class ____(BloomPreTrainedModel): def __init__(self, config: BloomConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = BloomModel(config) self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple[torch.Tensor], SequenceClassifierOutputWithPast]: r""" input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] if self.config.pad_token_id is None and batch_size != 1: raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") if self.config.pad_token_id is None: last_non_pad_token = -1 elif input_ids is not None: # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32) token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32) last_non_pad_token = (token_indices * non_pad_mask).argmax(-1) else: last_non_pad_token = -1 logger.warning_once( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token] loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) else: loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @auto_docstring
BloomForSequenceClassification