language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
huggingface__transformers
src/transformers/models/flava/configuration_flava.py
{ "start": 17720, "end": 32812 }
class ____(PreTrainedConfig): r""" [`FlavaConfig`] is the configuration class to store the configuration of a [`FlavaModel`]. It is used to instantiate FLAVA model according to the specified arguments, defining the text model, image model, image codebook and multimodal model configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the FLAVA [facebook/flava-full](https://huggingface.co/facebook/flava-full) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: text_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`FlavaTextConfig`]. image_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`FlavaImageConfig`]. multimodal_config (`dict`, *optional*): Dictionary of configuration options used to initialize [`FlavaMultimodalConfig`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. projection_dim (`int`, *optional*, defaults to 512): Dimensionality of text and image projection layers. logit_scale_init_value (`float`, *optional*, defaults to 2.6592): The initial value of the *logit_scale* parameter. Default is used as per the original FLAVA/CLIP implementation. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. ce_ignore_index (`int`, *optional*, defaults to -100): Cross entropy index to ignore. mim_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to MIM (Masked Image Modeling) unimodal loss mlm_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to MLM (Masked Language Modeling) unimodal loss global_contrastive_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to global contrastive cross-alignment loss. itm_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to image-text matching multimodal loss. mmm_image_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to MMM loss's image part. mmm_text_weight (`float`, *optional*, defaults to 1.0): Weight to be assigned to MMM loss's text part. global_backprop_contrastive (`bool`, *optional*, defaults to `True`): Whether to use global backpropgation through all workers in contrastive loss. skip_unmasked_multimodal_encoder (`bool`, *optional*, defaults to `True`): Whether to skip running unmasked multimodal encoder whose outputs are not used by FLAVA losses. return_loss (`bool`, *optional*, defaults to `True`): Whether to return loss or not kwargs (*optional*): Dictionary of keyword arguments. Example: ```python >>> from transformers import FlavaConfig, FlavaModel, FlavaForPreTraining >>> # Initializing a FlavaConfig with style configuration >>> configuration = FlavaConfig() >>> # Initializing a FlavaModel and FlavaForPreTraining model (with random weights) from the style configuration >>> model = FlavaModel(configuration) >>> model_pre = FlavaForPreTraining(configuration) >>> # Accessing the model configuration >>> configuration = model.config >>> configuration_pre = model_pre.config ``` """ model_type = "flava" sub_configs = { "text_config": FlavaTextConfig, "image_config": FlavaImageConfig, "multimodal_config": FlavaMultimodalConfig, "image_codebook_config": FlavaImageCodebookConfig, } def __init__( self, image_config: Optional[dict[str, Any]] = None, text_config: Optional[dict[str, Any]] = None, multimodal_config: Optional[dict[str, Any]] = None, image_codebook_config: Optional[dict[str, Any]] = None, hidden_size: int = 768, layer_norm_eps: float = 1e-12, projection_dim: int = 768, init_codebook: bool = True, logit_scale_init_value: float = 2.6592, initializer_range: float = 0.02, ce_ignore_index: int = -100, mim_weight: float = 1.0, mlm_weight: float = 1.0, global_contrastive_weight: float = 1.0, itm_weight: float = 1.0, mmm_image_weight: float = 1.0, mmm_text_weight: float = 1.0, global_backprop_contrastive: bool = True, skip_unmasked_multimodal_encoder: bool = True, return_loss: bool = True, **kwargs, ): # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). text_config_dict = kwargs.pop("text_config_dict", None) image_config_dict = kwargs.pop("image_config_dict", None) multimodal_config_dict = kwargs.pop("multimodal_config_dict", None) image_codebook_config_dict = kwargs.pop("image_codebook_config_dict", None) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: text_config = {} # This is the complete result when using `text_config_dict`. _text_config_dict = FlavaTextConfig(**text_config_dict).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key != "transformers_version": # If specified in `text_config_dict` if key in text_config_dict: message = ( f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. " f'The value `text_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`text_config_dict` is provided which will be used to initialize `FlavaTextConfig`. The " f'value `text_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict) if image_config_dict is not None: if image_config is None: image_config = {} # This is the complete result when using `image_config_dict`. _image_config_dict = FlavaImageConfig(**image_config_dict).to_dict() # convert keys to string instead of integer if "id2label" in _image_config_dict: _image_config_dict["id2label"] = { str(key): value for key, value in _image_config_dict["id2label"].items() } # Give a warning if the values exist in both `_image_config_dict` and `image_config` but being different. for key, value in _image_config_dict.items(): if key in image_config and value != image_config[key] and key != "transformers_version": # If specified in `image_config_dict` if key in image_config_dict: message = ( f"`{key}` is found in both `image_config_dict` and `image_config` but with different " f'values. The value `image_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`image_config_dict` is provided which will be used to initialize `FlavaImageConfig`. " f'The value `image_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `image_config` with the ones in `_image_config_dict`. image_config.update(_image_config_dict) if multimodal_config_dict is not None: if multimodal_config is None: multimodal_config = {} # This is the complete result when using `multimodal_config_dict`. _multimodal_config_dict = FlavaMultimodalConfig(**multimodal_config_dict).to_dict() # Give a warning if the values exist in both `_multimodal_config_dict` and `multimodal_config` but being # different. for key, value in _multimodal_config_dict.items(): if key in multimodal_config and value != multimodal_config[key] and key != "transformers_version": # If specified in `multimodal_config_dict` if key in multimodal_config_dict: message = ( f"`{key}` is found in both `multimodal_config_dict` and `multimodal_config` but with " f'different values. The value `multimodal_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: message = ( f"`multimodal_config_dict` is provided which will be used to initialize " f'`FlavaMultimodalConfig`. The value `multimodal_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `multimodal_config` with the ones in `_multimodal_config_dict`. multimodal_config.update(_multimodal_config_dict) if image_codebook_config_dict is not None: if image_codebook_config is None: image_codebook_config = {} # This is the complete result when using `image_codebook_config_dict`. _image_codebook_config_dict = FlavaImageCodebookConfig(**image_codebook_config_dict).to_dict() # Give a warning if the values exist in both `_image_codebook_config_dict` and `image_codebook_config` but # being different. for key, value in _image_codebook_config_dict.items(): if ( key in image_codebook_config and value != image_codebook_config[key] and key != "transformers_version" ): # If specified in `image_codebook_config_dict` if key in image_codebook_config_dict: message = ( f"`{key}` is found in both `image_codebook_config_dict` and `image_codebook_config` but " f'with different values. The value `image_codebook_config_dict["{key}"]` will be used ' "instead." ) # If inferred from default argument values (just to be super careful) else: message = ( f"`image_codebook_config_dict` is provided which will be used to initialize " f'`FlavaImageCodebookConfig`. The value `image_codebook_config["{key}"]` will be overridden.' ) logger.info(message) # Update all values in `image_codebook_config` with the ones in `_image_codebook_config_dict`. image_codebook_config.update(_image_codebook_config_dict) if text_config is None: text_config = FlavaTextConfig() logger.info("`text_config` is `None`. initializing the `FlavaTextConfig` with default values.") elif isinstance(text_config, dict): text_config = FlavaTextConfig(**text_config) if image_config is None: image_config = FlavaImageConfig() logger.info("`image_config` is `None`. initializing the `FlavaImageConfig` with default values.") elif isinstance(image_config, dict): image_config = FlavaImageConfig(**image_config) if multimodal_config is None: multimodal_config = FlavaMultimodalConfig() logger.info("`image_config` is `None`. initializing the `FlavaMultimodalConfig` with default values.") elif isinstance(multimodal_config, dict): multimodal_config = FlavaMultimodalConfig(**multimodal_config) if image_codebook_config is None: image_codebook_config = FlavaImageCodebookConfig() logger.info("`image_config` is `None`. initializing the `FlavaImageCodebookConfig` with default values.") elif isinstance(image_codebook_config, dict): image_codebook_config = FlavaImageCodebookConfig(**image_codebook_config) self.text_config = text_config self.image_config = image_config self.multimodal_config = multimodal_config self.image_codebook_config = image_codebook_config self.projection_dim = projection_dim self.init_codebook = init_codebook self.hidden_size = hidden_size self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.logit_scale_init_value = logit_scale_init_value self.initializer_factor = 1.0 self.ce_ignore_index = ce_ignore_index self.mim_weight = mim_weight self.mlm_weight = mlm_weight self.global_contrastive_weight = global_contrastive_weight self.itm_weight = itm_weight self.mmm_image_weight = mmm_image_weight self.mmm_text_weight = mmm_text_weight self.global_backprop_contrastive = global_backprop_contrastive self.skip_unmasked_multimodal_encoder = skip_unmasked_multimodal_encoder self.return_loss = return_loss super().__init__(**kwargs) __all__ = ["FlavaConfig", "FlavaImageCodebookConfig", "FlavaImageConfig", "FlavaMultimodalConfig", "FlavaTextConfig"]
FlavaConfig
python
walkccc__LeetCode
solutions/1030. Matrix Cells in Distance Order/1030.py
{ "start": 0, "end": 567 }
class ____: def allCellsDistOrder(self, rows: int, cols: int, rCenter: int, cCenter: int) -> list[list[int]]: DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0)) ans = [] q = collections.deque([(rCenter, cCenter)]) seen = {(rCenter, cCenter)} while q: i, j = q.popleft() ans.append([i, j]) for dx, dy in DIRS: x = i + dx y = j + dy if x < 0 or x == rows or y < 0 or y == cols: continue if (x, y) in seen: continue seen.add((x, y)) q.append((x, y)) return ans
Solution
python
django__django
tests/prefetch_related/tests.py
{ "start": 45257, "end": 53245 }
class ____(TestCase): @classmethod def setUpTestData(cls): book1 = Book.objects.create(title="Winnie the Pooh") book2 = Book.objects.create(title="Do you like green eggs and spam?") book3 = Book.objects.create(title="Three Men In A Boat") reader1 = Reader.objects.create(name="me") reader2 = Reader.objects.create(name="you") reader3 = Reader.objects.create(name="someone") book1.read_by.add(reader1, reader2) book2.read_by.add(reader2) book3.read_by.add(reader3) cls.book1, cls.book2, cls.book3 = book1, book2, book3 cls.reader1, cls.reader2, cls.reader3 = reader1, reader2, reader3 def test_prefetch_GFK(self): TaggedItem.objects.create(tag="awesome", content_object=self.book1) TaggedItem.objects.create(tag="great", content_object=self.reader1) TaggedItem.objects.create(tag="outstanding", content_object=self.book2) TaggedItem.objects.create(tag="amazing", content_object=self.reader3) # 1 for TaggedItem table, 1 for Book table, 1 for Reader table with self.assertNumQueries(3): qs = TaggedItem.objects.prefetch_related("content_object") list(qs) def test_prefetch_GFK_nonint_pk(self): Comment.objects.create(comment="awesome", content_object=self.book1) # 1 for Comment table, 1 for Book table with self.assertNumQueries(2): qs = Comment.objects.prefetch_related("content_object") [c.content_object for c in qs] def test_prefetch_GFK_uuid_pk(self): article = Article.objects.create(name="Django") Comment.objects.create(comment="awesome", content_object_uuid=article) qs = Comment.objects.prefetch_related("content_object_uuid") self.assertEqual([c.content_object_uuid for c in qs], [article]) def test_prefetch_GFK_uses_prepped_primary_key(self): article = ArticleCustomUUID.objects.create(name="Blanche") Comment.objects.create(comment="Enchantment", content_object_uuid=article) obj = Comment.objects.prefetch_related("content_object_uuid").get( comment="Enchantment" ) self.assertEqual(obj.content_object_uuid, article) def test_prefetch_GFK_fk_pk(self): book = Book.objects.create(title="Poems") book_with_year = BookWithYear.objects.create(book=book, published_year=2019) Comment.objects.create(comment="awesome", content_object=book_with_year) qs = Comment.objects.prefetch_related("content_object") self.assertEqual([c.content_object for c in qs], [book_with_year]) def test_traverse_GFK(self): """ A 'content_object' can be traversed with prefetch_related() and get to related objects on the other side (assuming it is suitably filtered) """ TaggedItem.objects.create(tag="awesome", content_object=self.book1) TaggedItem.objects.create(tag="awesome", content_object=self.book2) TaggedItem.objects.create(tag="awesome", content_object=self.book3) TaggedItem.objects.create(tag="awesome", content_object=self.reader1) TaggedItem.objects.create(tag="awesome", content_object=self.reader2) ct = ContentType.objects.get_for_model(Book) # We get 3 queries - 1 for main query, 1 for content_objects since they # all use the same table, and 1 for the 'read_by' relation. with self.assertNumQueries(3): # If we limit to books, we know that they will have 'read_by' # attributes, so the following makes sense: qs = TaggedItem.objects.filter( content_type=ct, tag="awesome" ).prefetch_related("content_object__read_by") readers_of_awesome_books = { r.name for tag in qs for r in tag.content_object.read_by.all() } self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"}) def test_nullable_GFK(self): TaggedItem.objects.create( tag="awesome", content_object=self.book1, created_by=self.reader1 ) TaggedItem.objects.create(tag="great", content_object=self.book2) TaggedItem.objects.create(tag="rubbish", content_object=self.book3) with self.assertNumQueries(2): result = [ t.created_by for t in TaggedItem.objects.prefetch_related("created_by") ] self.assertEqual(result, [t.created_by for t in TaggedItem.objects.all()]) def test_generic_relation(self): bookmark = Bookmark.objects.create(url="http://www.djangoproject.com/") TaggedItem.objects.create(content_object=bookmark, tag="django") TaggedItem.objects.create(content_object=bookmark, tag="python") with self.assertNumQueries(2): tags = [ t.tag for b in Bookmark.objects.prefetch_related("tags") for t in b.tags.all() ] self.assertEqual(sorted(tags), ["django", "python"]) def test_charfield_GFK(self): b = Bookmark.objects.create(url="http://www.djangoproject.com/") TaggedItem.objects.create(content_object=b, tag="django") TaggedItem.objects.create(content_object=b, favorite=b, tag="python") with self.assertNumQueries(3): bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related( "tags", "favorite_tags" )[0] self.assertEqual( sorted(i.tag for i in bookmark.tags.all()), ["django", "python"] ) self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"]) def test_custom_queryset(self): bookmark = Bookmark.objects.create(url="http://www.djangoproject.com/") django_tag = TaggedItem.objects.create(content_object=bookmark, tag="django") TaggedItem.objects.create(content_object=bookmark, tag="python") with self.assertNumQueries(2): bookmark = Bookmark.objects.prefetch_related( Prefetch("tags", TaggedItem.objects.filter(tag="django")), ).get() with self.assertNumQueries(0): self.assertEqual(list(bookmark.tags.all()), [django_tag]) # The custom queryset filters should be applied to the queryset # instance returned by the manager. self.assertEqual(list(bookmark.tags.all()), list(bookmark.tags.all().all())) def test_deleted_GFK(self): TaggedItem.objects.create(tag="awesome", content_object=self.book1) TaggedItem.objects.create(tag="awesome", content_object=self.book2) ct = ContentType.objects.get_for_model(Book) book1_pk = self.book1.pk self.book1.delete() with self.assertNumQueries(2): qs = TaggedItem.objects.filter(tag="awesome").prefetch_related( "content_object" ) result = [ (tag.object_id, tag.content_type_id, tag.content_object) for tag in qs ] self.assertEqual( result, [ (book1_pk, ct.pk, None), (self.book2.pk, ct.pk, self.book2), ], ) def test_reverse_generic_relation(self): # Create two distinct bookmarks to ensure the bookmark and # tagged item models primary are offset. first_bookmark = Bookmark.objects.create() second_bookmark = Bookmark.objects.create() TaggedItem.objects.create( content_object=first_bookmark, favorite=second_bookmark ) with self.assertNumQueries(2): obj = TaggedItem.objects.prefetch_related("favorite_bookmarks").get() with self.assertNumQueries(0): prefetched_bookmarks = obj.favorite_bookmarks.all() self.assertQuerySetEqual(prefetched_bookmarks, [second_bookmark])
GenericRelationTests
python
h5py__h5py
h5py/tests/test_h5f.py
{ "start": 3008, "end": 3929 }
class ____(TestCase): def test_vlen_strings(self): # Create file with dataset containing vlen arrays of vlen strings dn_tmp = tempfile.mkdtemp('h5py.lowtest.test_h5f.TestVlenStrings.test_vlen_strings') fn_h5 = os.path.join(dn_tmp, 'test.h5') try: with File(fn_h5, mode='w') as h: vlen_str = special_dtype(vlen=str) vlen_vlen_str = special_dtype(vlen=vlen_str) ds = h.create_dataset('/com', (2,), dtype=vlen_vlen_str) ds[0] = (np.array(["a", "b", "c"], dtype=vlen_vlen_str)) ds[1] = (np.array(["d", "e", "f","g"], dtype=vlen_vlen_str)) with File(fn_h5, "r") as h: ds = h["com"] assert ds[0].tolist() == [b'a', b'b', b'c'] assert ds[1].tolist() == [b'd', b'e', b'f', b'g'] finally: shutil.rmtree(dn_tmp)
TestVlenData
python
pypa__setuptools
setuptools/command/editable_wheel.py
{ "start": 31465, "end": 32746 }
class ____: # MetaPathFinder @classmethod def find_spec(cls, fullname: str, path=None, target=None) -> ModuleSpec | None: # type: ignore # Top-level packages and modules (we know these exist in the FS) if fullname in MAPPING: pkg_path = MAPPING[fullname] return cls._find_spec(fullname, Path(pkg_path)) # Handle immediate children modules (required for namespaces to work) # To avoid problems with case sensitivity in the file system we delegate # to the importlib.machinery implementation. parent, _, child = fullname.rpartition(".") if parent and parent in MAPPING: return PathFinder.find_spec(fullname, path=[MAPPING[parent]]) # Other levels of nesting should be handled automatically by importlib # using the parent path. return None @classmethod def _find_spec(cls, fullname: str, candidate_path: Path) -> ModuleSpec | None: init = candidate_path / "__init__.py" candidates = (candidate_path.with_suffix(x) for x in module_suffixes()) for candidate in chain([init], candidates): if candidate.exists(): return spec_from_file_location(fullname, candidate) return None
_EditableFinder
python
kamyu104__LeetCode-Solutions
Python/maximum-number-of-potholes-that-can-be-fixed.py
{ "start": 53, "end": 1450 }
class ____(object): def maxPotholes(self, road, budget): """ :type road: str :type budget: int :rtype: int """ def inplace_counting_sort(nums, reverse=False): # Time: O(n) if not nums: return count = [0]*(max(nums)+1) for num in nums: count[num] += 1 for i in xrange(1, len(count)): count[i] += count[i-1] for i in reversed(xrange(len(nums))): # inplace but unstable sort while nums[i] >= 0: count[nums[i]] -= 1 j = count[nums[i]] nums[i], nums[j] = nums[j], ~nums[i] for i in xrange(len(nums)): nums[i] = ~nums[i] # restore values if reverse: # unstable sort nums.reverse() ls = [] l = 0 for i in xrange(len(road)): l += 1 if i+1 == len(road) or road[i+1] != road[i]: if road[i] == 'x': ls.append(l) l = 0 inplace_counting_sort(ls) result = 0 for l in reversed(ls): c = min(l+1, budget) if c-1 <= 0: break result += c-1 budget -= c return result # Time: O(nlogn) # Space: O(n) # sort, greedy
Solution
python
getsentry__sentry
src/sentry/workflow_engine/migrations/0068_migrate_anomaly_detection_alerts.py
{ "start": 2630, "end": 2833 }
class ____(StrEnum): SLACK = "slack" MSTEAMS = "msteams" DISCORD = "discord" PAGERDUTY = "pagerduty" OPSGENIE = "opsgenie" EMAIL = "email" SENTRY_APP = "sentry_app"
ActionType
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/stackdriver.py
{ "start": 6600, "end": 10100 }
class ____(GoogleCloudBaseOperator): """ Enables one or more disabled alerting policies identified by filter parameter. Inoperative in case the policy is already enabled. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:StackdriverEnableAlertPoliciesOperator` :param filter_: If provided, this field specifies the criteria that must be met by alert policies to be enabled. For more details, see https://cloud.google.com/monitoring/api/v3/sorting-and-filtering. :param retry: A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. :param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. :param metadata: Additional metadata that is provided to the method. :param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform. :param project_id: The project in which alert needs to be enabled. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ ui_color = "#e5ffcc" template_fields: Sequence[str] = ( "filter_", "impersonation_chain", ) operator_extra_links = (StackdriverPoliciesLink(),) def __init__( self, *, filter_: str | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", project_id: str = PROVIDE_PROJECT_ID, impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.gcp_conn_id = gcp_conn_id self.project_id = project_id self.filter_ = filter_ self.retry = retry self.timeout = timeout self.metadata = metadata self.impersonation_chain = impersonation_chain self.hook: StackdriverHook | None = None def execute(self, context: Context): self.log.info("Enable Alert Policies: Project id: %s Filter: %s", self.project_id, self.filter_) if self.hook is None: self.hook = StackdriverHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) self.hook.enable_alert_policies( filter_=self.filter_, project_id=self.project_id, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) StackdriverPoliciesLink.persist( context=context, project_id=self.project_id or self.hook.project_id, ) # Disable Alert Operator
StackdriverEnableAlertPoliciesOperator
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 291045, "end": 291823 }
class ____(sgqlc.types.Input): """Autogenerated input type of RevokeEnterpriseOrganizationsMigratorRole """ __schema__ = github_schema __field_names__ = ("enterprise_id", "login", "client_mutation_id") enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="enterpriseId") """The ID of the enterprise to which all organizations managed by it will be granted the migrator role. """ login = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="login") """The login of the user to revoke the migrator role""" client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") """A unique identifier for the client performing the mutation."""
RevokeEnterpriseOrganizationsMigratorRoleInput
python
airbytehq__airbyte
airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/test_articles.py
{ "start": 865, "end": 4837 }
class ____(TestCase): def _config(self) -> ConfigBuilder: return ( ConfigBuilder() .with_basic_auth_credentials("user@example.com", "password") .with_subdomain("d3v-airbyte") .with_start_date(ab_datetime_now().subtract(timedelta(hours=1))) ) def _get_authenticator(self, config): return ApiTokenAuthenticator(email=config["credentials"]["email"], password=config["credentials"]["api_token"]) @HttpMocker() def test_given_one_page_when_read_then_return_records(self, http_mocker): config = self._config().with_start_date(_START_DATE).build() api_token_authenticator = self._get_authenticator(config) http_mocker.get( ArticlesRequestBuilder.articles_endpoint(api_token_authenticator).with_start_time(_START_DATE).build(), ArticlesResponseBuilder.response() .with_record(ArticlesRecordBuilder.record()) .with_record(ArticlesRecordBuilder.record()) .build(), ) output = read_stream("articles", SyncMode.full_refresh, config) assert len(output.records) == 2 @HttpMocker() def test_given_next_page_when_read_then_paginate(self, http_mocker): config = self._config().with_start_date(_START_DATE).build() api_token_authenticator = self._get_authenticator(config) next_page_http_request = ( ArticlesRequestBuilder.articles_endpoint(api_token_authenticator).with_start_time(_START_DATE.add(timedelta(days=10))).build() ) http_mocker.get( ArticlesRequestBuilder.articles_endpoint(api_token_authenticator).with_start_time(_START_DATE).build(), ArticlesResponseBuilder.response(next_page_http_request) .with_record(ArticlesRecordBuilder.record()) .with_record(ArticlesRecordBuilder.record()) .with_pagination() .build(), ) http_mocker.get( next_page_http_request, ArticlesResponseBuilder.response().with_record(ArticlesRecordBuilder.record()).build(), ) output = read_stream("articles", SyncMode.full_refresh, config) assert len(output.records) == 3 @HttpMocker() def test_when_read_then_set_state_value_to_most_recent_cursor_value(self, http_mocker): config = self._config().with_start_date(_START_DATE).build() api_token_authenticator = self._get_authenticator(config) most_recent_cursor_value = _START_DATE.add(timedelta(days=2)) http_mocker.get( ArticlesRequestBuilder.articles_endpoint(api_token_authenticator).with_start_time(_START_DATE).build(), ArticlesResponseBuilder.response() .with_record(ArticlesRecordBuilder.record().with_cursor(datetime_to_string(most_recent_cursor_value))) .build(), ) output = read_stream("articles", SyncMode.full_refresh, config) assert output.most_recent_state.stream_state.__dict__ == {"updated_at": str(int(most_recent_cursor_value.timestamp()))} @HttpMocker() def test_given_input_state_when_read_then_set_state_value_to_most_recent_cursor_value(self, http_mocker): config = self._config().with_start_date(_START_DATE).build() api_token_authenticator = self._get_authenticator(config) state_cursor_value = _START_DATE.add(timedelta(days=2)) http_mocker.get( ArticlesRequestBuilder.articles_endpoint(api_token_authenticator).with_start_time(state_cursor_value).build(), ArticlesResponseBuilder.response().with_record(ArticlesRecordBuilder.record()).build(), ) output = read_stream( "articles", SyncMode.full_refresh, config, StateBuilder().with_stream_state("articles", {"updated_at": datetime_to_string(state_cursor_value)}).build(), ) assert len(output.records) == 1
TestArticlesStream
python
getsentry__sentry
src/sentry/api/endpoints/organization_auth_token_details.py
{ "start": 834, "end": 3065 }
class ____(ControlSiloOrganizationEndpoint): publish_status = { "DELETE": ApiPublishStatus.PRIVATE, "GET": ApiPublishStatus.PRIVATE, "PUT": ApiPublishStatus.PRIVATE, } owner = ApiOwner.ENTERPRISE authentication_classes = (SessionNoAuthTokenAuthentication,) permission_classes = (OrgAuthTokenPermission,) def convert_args(self, request: Request, token_id, *args, **kwargs): args, kwargs = super().convert_args(request, *args, **kwargs) organization = kwargs["organization"] try: kwargs["instance"] = OrgAuthToken.objects.get( organization_id=organization.id, id=token_id, date_deactivated__isnull=True ) except OrgAuthToken.DoesNotExist: raise ResourceDoesNotExist return (args, kwargs) def get( self, request: Request, instance: OrgAuthToken, **kwargs, ) -> Response: return Response(serialize(instance, request.user, token=None)) def put( self, request: Request, instance: OrgAuthToken, **kwargs, ): name = request.data.get("name") if not name: return Response({"detail": "The name cannot be blank."}, status=400) if len(name) > MAX_NAME_LENGTH: return Response( {"detail": "The name cannot be longer than 255 characters."}, status=400 ) instance.update(name=name) return Response(status=204) def delete( self, request: Request, organization: RpcOrganization, instance: OrgAuthToken, **kwargs, ): instance.update(date_deactivated=timezone.now()) self.create_audit_entry( request, organization=organization, target_object=instance.id, event=audit_log.get_event_id("ORGAUTHTOKEN_REMOVE"), data=instance.get_audit_log_data(), ) analytics.record( OrgAuthTokenDeleted( user_id=request.user.id, organization_id=organization.id, ) ) return Response(status=204)
OrganizationAuthTokenDetailsEndpoint
python
airbytehq__airbyte
airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_responses/records/posts_records_builder.py
{ "start": 196, "end": 487 }
class ____(ZendeskSupportRecordBuilder): @classmethod def posts_record(cls) -> "PostsRecordBuilder": record_template = cls.extract_record("posts", __file__, NestedPath(["posts", 0])) return cls(record_template, FieldPath("id"), FieldPath("updated_at"))
PostsRecordBuilder
python
sympy__sympy
sympy/functions/elementary/trigonometric.py
{ "start": 18450, "end": 30742 }
class ____(TrigonometricFunction): """ The cosine function. Returns the cosine of x (measured in radians). Explanation =========== See :func:`sin` for notes about automatic evaluation. Examples ======== >>> from sympy import cos, pi >>> from sympy.abc import x >>> cos(x**2).diff(x) -2*x*sin(x**2) >>> cos(1).diff(x) 0 >>> cos(pi) -1 >>> cos(pi/2) 0 >>> cos(2*pi/3) -1/2 >>> cos(pi/12) sqrt(2)/4 + sqrt(6)/4 See Also ======== sympy.functions.elementary.trigonometric.sin sympy.functions.elementary.trigonometric.csc sympy.functions.elementary.trigonometric.sec sympy.functions.elementary.trigonometric.tan sympy.functions.elementary.trigonometric.cot sympy.functions.elementary.trigonometric.asin sympy.functions.elementary.trigonometric.acsc sympy.functions.elementary.trigonometric.acos sympy.functions.elementary.trigonometric.asec sympy.functions.elementary.trigonometric.atan sympy.functions.elementary.trigonometric.acot sympy.functions.elementary.trigonometric.atan2 References ========== .. [1] https://en.wikipedia.org/wiki/Trigonometric_functions .. [2] https://dlmf.nist.gov/4.14 .. [3] https://functions.wolfram.com/ElementaryFunctions/Cos """ def period(self, symbol=None): return self._period(2*pi, symbol) def fdiff(self, argindex=1): if argindex == 1: return -sin(self.args[0]) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): from sympy.functions.special.polynomials import chebyshevt from sympy.calculus.accumulationbounds import AccumBounds from sympy.sets.setexpr import SetExpr if arg.is_Number: if arg is S.NaN: return S.NaN elif arg.is_zero: return S.One elif arg in (S.Infinity, S.NegativeInfinity): # In this case it is better to return AccumBounds(-1, 1) # rather than returning S.NaN, since AccumBounds(-1, 1) # preserves the information that sin(oo) is between # -1 and 1, where S.NaN does not do that. return AccumBounds(-1, 1) if arg is S.ComplexInfinity: return S.NaN if isinstance(arg, AccumBounds): return sin(arg + pi/2) elif isinstance(arg, SetExpr): return arg._eval_func(cls) if arg.is_extended_real and arg.is_finite is False: return AccumBounds(-1, 1) if arg.could_extract_minus_sign(): return cls(-arg) i_coeff = _imaginary_unit_as_coefficient(arg) if i_coeff is not None: from sympy.functions.elementary.hyperbolic import cosh return cosh(i_coeff) pi_coeff = _pi_coeff(arg) if pi_coeff is not None: if pi_coeff.is_integer: return (S.NegativeOne)**pi_coeff if (2*pi_coeff).is_integer: # is_even-case handled above as then pi_coeff.is_integer, # so check if known to be not even if pi_coeff.is_even is False: return S.Zero if not pi_coeff.is_Rational: narg = pi_coeff*pi if narg != arg: return cls(narg) return None # cosine formula ##################### # https://github.com/sympy/sympy/issues/6048 # explicit calculations are performed for # cos(k pi/n) for n = 8,10,12,15,20,24,30,40,60,120 # Some other exact values like cos(k pi/240) can be # calculated using a partial-fraction decomposition # by calling cos( X ).rewrite(sqrt) if pi_coeff.is_Rational: q = pi_coeff.q p = pi_coeff.p % (2*q) if p > q: narg = (pi_coeff - 1)*pi return -cls(narg) if 2*p > q: narg = (1 - pi_coeff)*pi return -cls(narg) # If nested sqrt's are worse than un-evaluation # you can require q to be in (1, 2, 3, 4, 6, 12) # q <= 12, q=15, q=20, q=24, q=30, q=40, q=60, q=120 return # expressions with 2 or fewer sqrt nestings. table2 = _table2() if q in table2: a, b = table2[q] a, b = p*pi/a, p*pi/b nvala, nvalb = cls(a), cls(b) if None in (nvala, nvalb): return None return nvala*nvalb + cls(pi/2 - a)*cls(pi/2 - b) if q > 12: return None cst_table_some = { 3: S.Half, 5: (sqrt(5) + 1) / 4, } if q in cst_table_some: cts = cst_table_some[pi_coeff.q] return chebyshevt(pi_coeff.p, cts).expand() if 0 == q % 2: narg = (pi_coeff*2)*pi nval = cls(narg) if None == nval: return None x = (2*pi_coeff + 1)/2 sign_cos = (-1)**((-1 if x < 0 else 1)*int(abs(x))) return sign_cos*sqrt( (1 + nval)/2 ) return None if arg.is_Add: x, m = _peeloff_pi(arg) if m: m = m*pi return cos(m)*cos(x) - sin(m)*sin(x) if arg.is_zero: return S.One if isinstance(arg, acos): return arg.args[0] if isinstance(arg, atan): x = arg.args[0] return 1/sqrt(1 + x**2) if isinstance(arg, atan2): y, x = arg.args return x/sqrt(x**2 + y**2) if isinstance(arg, asin): x = arg.args[0] return sqrt(1 - x ** 2) if isinstance(arg, acot): x = arg.args[0] return 1/sqrt(1 + 1/x**2) if isinstance(arg, acsc): x = arg.args[0] return sqrt(1 - 1/x**2) if isinstance(arg, asec): x = arg.args[0] return 1/x @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n < 0 or n % 2 == 1: return S.Zero else: x = sympify(x) if len(previous_terms) > 2: p = previous_terms[-2] return -p*x**2/(n*(n - 1)) else: return S.NegativeOne**(n//2)*x**n/factorial(n) def _eval_nseries(self, x, n, logx, cdir=0): arg = self.args[0] if logx is not None: arg = arg.subs(log(x), logx) if arg.subs(x, 0).has(S.NaN, S.ComplexInfinity): raise PoleError("Cannot expand %s around 0" % (self)) return super()._eval_nseries(x, n=n, logx=logx, cdir=cdir) def _eval_rewrite_as_exp(self, arg, **kwargs): I = S.ImaginaryUnit from sympy.functions.elementary.hyperbolic import HyperbolicFunction if isinstance(arg, (TrigonometricFunction, HyperbolicFunction)): arg = arg.func(arg.args[0]).rewrite(exp, **kwargs) return (exp(arg*I) + exp(-arg*I))/2 def _eval_rewrite_as_Pow(self, arg, **kwargs): if isinstance(arg, log): I = S.ImaginaryUnit x = arg.args[0] return x**I/2 + x**-I/2 def _eval_rewrite_as_sin(self, arg, **kwargs): return sin(arg + pi/2, evaluate=False) def _eval_rewrite_as_tan(self, arg, **kwargs): tan_half = tan(S.Half*arg)**2 return (1 - tan_half)/(1 + tan_half) def _eval_rewrite_as_sincos(self, arg, **kwargs): return sin(arg)*cos(arg)/sin(arg) def _eval_rewrite_as_cot(self, arg, **kwargs): cot_half = cot(S.Half*arg)**2 return Piecewise((1, And(Eq(im(arg), 0), Eq(Mod(arg, 2*pi), 0))), ((cot_half - 1)/(cot_half + 1), True)) def _eval_rewrite_as_pow(self, arg, **kwargs): return self._eval_rewrite_as_sqrt(arg, **kwargs) def _eval_rewrite_as_sqrt(self, arg: Expr, **kwargs): from sympy.functions.special.polynomials import chebyshevt pi_coeff = _pi_coeff(arg) if pi_coeff is None: return None if isinstance(pi_coeff, Integer): return None if not isinstance(pi_coeff, Rational): return None cst_table_some = cos_table() if pi_coeff.q in cst_table_some: rv = chebyshevt(pi_coeff.p, cst_table_some[pi_coeff.q]()) if pi_coeff.q < 257: rv = rv.expand() return rv if not pi_coeff.q % 2: # recursively remove factors of 2 pico2 = pi_coeff * 2 nval = cos(pico2 * pi).rewrite(sqrt, **kwargs) x = (pico2 + 1) / 2 sign_cos = -1 if int(x) % 2 else 1 return sign_cos * sqrt((1 + nval) / 2) FC = fermat_coords(pi_coeff.q) if FC: denoms = FC else: denoms = [b**e for b, e in factorint(pi_coeff.q).items()] apart = ipartfrac(*denoms) decomp = (pi_coeff.p * Rational(n, d) for n, d in zip(apart, denoms)) X = [(x[1], x[0]*pi) for x in zip(decomp, numbered_symbols('z'))] pcls = cos(sum(x[0] for x in X))._eval_expand_trig().subs(X) if not FC or len(FC) == 1: return pcls return pcls.rewrite(sqrt, **kwargs) def _eval_rewrite_as_sec(self, arg, **kwargs): return 1/sec(arg) def _eval_rewrite_as_csc(self, arg, **kwargs): return 1/sec(arg).rewrite(csc, **kwargs) def _eval_rewrite_as_besselj(self, arg, **kwargs): from sympy.functions.special.bessel import besselj return Piecewise( (sqrt(pi*arg/2)*besselj(-S.Half, arg), Ne(arg, 0)), (1, True) ) def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def as_real_imag(self, deep=True, **hints): from sympy.functions.elementary.hyperbolic import cosh, sinh re, im = self._as_real_imag(deep=deep, **hints) return (cos(re)*cosh(im), -sin(re)*sinh(im)) def _eval_expand_trig(self, **hints): from sympy.functions.special.polynomials import chebyshevt arg = self.args[0] x = None if arg.is_Add: # TODO: Do this more efficiently for more than two terms x, y = arg.as_two_terms() sx = sin(x, evaluate=False)._eval_expand_trig() sy = sin(y, evaluate=False)._eval_expand_trig() cx = cos(x, evaluate=False)._eval_expand_trig() cy = cos(y, evaluate=False)._eval_expand_trig() return cx*cy - sx*sy elif arg.is_Mul: coeff, terms = arg.as_coeff_Mul(rational=True) if coeff.is_Integer: return chebyshevt(coeff, cos(terms)) return cos(arg) def _eval_as_leading_term(self, x, logx, cdir): from sympy.calculus.accumulationbounds import AccumBounds arg = self.args[0] x0 = arg.subs(x, 0).cancel() n = (x0 + pi/2)/pi if n.is_integer: lt = (arg - n*pi + pi/2).as_leading_term(x) return (S.NegativeOne**n)*lt if x0 is S.ComplexInfinity: x0 = arg.limit(x, 0, dir='-' if re(cdir).is_negative else '+') if x0 in [S.Infinity, S.NegativeInfinity]: return AccumBounds(-1, 1) return self.func(x0) if x0.is_finite else self def _eval_is_extended_real(self): if self.args[0].is_extended_real: return True def _eval_is_finite(self): arg = self.args[0] if arg.is_extended_real: return True def _eval_is_complex(self): if self.args[0].is_extended_real \ or self.args[0].is_complex: return True def _eval_is_zero(self): rest, pi_mult = _peeloff_pi(self.args[0]) if rest.is_zero and pi_mult: return (pi_mult - S.Half).is_integer
cos
python
readthedocs__readthedocs.org
readthedocs/proxito/views/hosting.py
{ "start": 8825, "end": 9564 }
class ____: """Mixin to remove fields from serializers.""" FIELDS_TO_REMOVE = [] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) for field in self.FIELDS_TO_REMOVE: if field in self.fields: del self.fields[field] if field in self.Meta.fields: del self.Meta.fields[self.Meta.fields.index(field)] # NOTE: the following serializers are required only to remove some fields we # can't expose yet in this API endpoint because it's running under El Proxito # which cannot resolve URLs pointing to the APIv3 because they are not defined # on El Proxito. # # See https://github.com/readthedocs/readthedocs-ops/issues/1323
RemoveFieldsMixin
python
astropy__astropy
astropy/__init__.py
{ "start": 3589, "end": 4094 }
class ____(base_constants_version): """ The version of physical constants to use. """ # Maintainers: update when new constants are added _value = "codata2022" _versions = dict( codata2022="codata2022", codata2018="codata2018", codata2014="codata2014", codata2010="codata2010", astropyconst80="codata2022", astropyconst40="codata2018", astropyconst20="codata2014", astropyconst13="codata2010", )
physical_constants
python
getsentry__sentry
src/sentry/new_migrations/migrations.py
{ "start": 234, "end": 1731 }
class ____(Migration): """ Migrations subclassing this will perform safety checks to help ensure that they won't cause production issues during deploy. """ # This flag is used to decide whether to run this migration in a transaction or not. Generally # we don't want to run in a transaction here, since for long running operations like data # back-fills this results in us locking an increasing number of rows until we finally commit. atomic = False # This can be set to `False` to disable safety checks. Don't do this without approval from # the `owners-migrations` team. checked = True def apply(self, project_state, schema_editor, collect_sql=False): if self.checked: schema_editor.safe = True for op in self.operations: validate_operation(op) return super().apply(project_state, schema_editor, collect_sql) def validate_operation(op): if isinstance(op, RunSQL) and not isinstance(op, SafeRunSQL): raise UnsafeOperationException( "Using `RunSQL` is unsafe because our migrations safety framework can't detect problems with the " "migration, and doesn't apply timeout and statement locks. Use `SafeRunSQL` instead, and get " "approval from `owners-migrations` to make sure that it's safe." ) if isinstance(op, SeparateDatabaseAndState): for db_op in op.database_operations: validate_operation(db_op)
CheckedMigration
python
getsentry__sentry-python
sentry_sdk/integrations/anthropic.py
{ "start": 1279, "end": 15640 }
class ____(Integration): identifier = "anthropic" origin = f"auto.ai.{identifier}" def __init__(self, include_prompts=True): # type: (AnthropicIntegration, bool) -> None self.include_prompts = include_prompts @staticmethod def setup_once(): # type: () -> None version = package_version("anthropic") _check_minimum_version(AnthropicIntegration, version) Messages.create = _wrap_message_create(Messages.create) AsyncMessages.create = _wrap_message_create_async(AsyncMessages.create) def _capture_exception(exc): # type: (Any) -> None set_span_errored() event, hint = event_from_exception( exc, client_options=sentry_sdk.get_client().options, mechanism={"type": "anthropic", "handled": False}, ) sentry_sdk.capture_event(event, hint=hint) def _get_token_usage(result): # type: (Messages) -> tuple[int, int] """ Get token usage from the Anthropic response. """ input_tokens = 0 output_tokens = 0 if hasattr(result, "usage"): usage = result.usage if hasattr(usage, "input_tokens") and isinstance(usage.input_tokens, int): input_tokens = usage.input_tokens if hasattr(usage, "output_tokens") and isinstance(usage.output_tokens, int): output_tokens = usage.output_tokens return input_tokens, output_tokens def _collect_ai_data(event, model, input_tokens, output_tokens, content_blocks): # type: (MessageStreamEvent, str | None, int, int, list[str]) -> tuple[str | None, int, int, list[str]] """ Collect model information, token usage, and collect content blocks from the AI streaming response. """ with capture_internal_exceptions(): if hasattr(event, "type"): if event.type == "message_start": usage = event.message.usage input_tokens += usage.input_tokens output_tokens += usage.output_tokens model = event.message.model or model elif event.type == "content_block_start": pass elif event.type == "content_block_delta": if hasattr(event.delta, "text"): content_blocks.append(event.delta.text) elif hasattr(event.delta, "partial_json"): content_blocks.append(event.delta.partial_json) elif event.type == "content_block_stop": pass elif event.type == "message_delta": output_tokens += event.usage.output_tokens return model, input_tokens, output_tokens, content_blocks def _set_input_data(span, kwargs, integration): # type: (Span, dict[str, Any], AnthropicIntegration) -> None """ Set input data for the span based on the provided keyword arguments for the anthropic message creation. """ system_prompt = kwargs.get("system") messages = kwargs.get("messages") if ( messages is not None and len(messages) > 0 and should_send_default_pii() and integration.include_prompts ): normalized_messages = [] if system_prompt: system_prompt_content = None # type: Optional[Union[str, List[dict[str, Any]]]] if isinstance(system_prompt, str): system_prompt_content = system_prompt elif isinstance(system_prompt, Iterable): system_prompt_content = [] for item in system_prompt: if ( isinstance(item, dict) and item.get("type") == "text" and item.get("text") ): system_prompt_content.append(item.copy()) if system_prompt_content: normalized_messages.append( { "role": GEN_AI_ALLOWED_MESSAGE_ROLES.SYSTEM, "content": system_prompt_content, } ) for message in messages: if ( message.get("role") == GEN_AI_ALLOWED_MESSAGE_ROLES.USER and "content" in message and isinstance(message["content"], (list, tuple)) ): for item in message["content"]: if item.get("type") == "tool_result": normalized_messages.append( { "role": GEN_AI_ALLOWED_MESSAGE_ROLES.TOOL, "content": { # type: ignore[dict-item] "tool_use_id": item.get("tool_use_id"), "output": item.get("content"), }, } ) else: normalized_messages.append(message) role_normalized_messages = normalize_message_roles(normalized_messages) scope = sentry_sdk.get_current_scope() messages_data = truncate_and_annotate_messages( role_normalized_messages, span, scope ) if messages_data is not None: set_data_normalized( span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages_data, unpack=False ) set_data_normalized( span, SPANDATA.GEN_AI_RESPONSE_STREAMING, kwargs.get("stream", False) ) kwargs_keys_to_attributes = { "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, "model": SPANDATA.GEN_AI_REQUEST_MODEL, "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE, "top_k": SPANDATA.GEN_AI_REQUEST_TOP_K, "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P, } for key, attribute in kwargs_keys_to_attributes.items(): value = kwargs.get(key) if value is not None and _is_given(value): set_data_normalized(span, attribute, value) # Input attributes: Tools tools = kwargs.get("tools") if tools is not None and _is_given(tools) and len(tools) > 0: set_data_normalized( span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools) ) def _set_output_data( span, integration, model, input_tokens, output_tokens, content_blocks, finish_span=False, ): # type: (Span, AnthropicIntegration, str | None, int | None, int | None, list[Any], bool) -> None """ Set output data for the span based on the AI response.""" span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, model) if should_send_default_pii() and integration.include_prompts: output_messages = { "response": [], "tool": [], } # type: (dict[str, list[Any]]) for output in content_blocks: if output["type"] == "text": output_messages["response"].append(output["text"]) elif output["type"] == "tool_use": output_messages["tool"].append(output) if len(output_messages["tool"]) > 0: set_data_normalized( span, SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, output_messages["tool"], unpack=False, ) if len(output_messages["response"]) > 0: set_data_normalized( span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"] ) record_token_usage( span, input_tokens=input_tokens, output_tokens=output_tokens, ) if finish_span: span.__exit__(None, None, None) def _sentry_patched_create_common(f, *args, **kwargs): # type: (Any, *Any, **Any) -> Any integration = kwargs.pop("integration") if integration is None: return f(*args, **kwargs) if "messages" not in kwargs: return f(*args, **kwargs) try: iter(kwargs["messages"]) except TypeError: return f(*args, **kwargs) model = kwargs.get("model", "") span = get_start_span_function()( op=OP.GEN_AI_CHAT, name=f"chat {model}".strip(), origin=AnthropicIntegration.origin, ) span.__enter__() _set_input_data(span, kwargs, integration) result = yield f, args, kwargs with capture_internal_exceptions(): if hasattr(result, "content"): input_tokens, output_tokens = _get_token_usage(result) content_blocks = [] for content_block in result.content: if hasattr(content_block, "to_dict"): content_blocks.append(content_block.to_dict()) elif hasattr(content_block, "model_dump"): content_blocks.append(content_block.model_dump()) elif hasattr(content_block, "text"): content_blocks.append({"type": "text", "text": content_block.text}) _set_output_data( span=span, integration=integration, model=getattr(result, "model", None), input_tokens=input_tokens, output_tokens=output_tokens, content_blocks=content_blocks, finish_span=True, ) # Streaming response elif hasattr(result, "_iterator"): old_iterator = result._iterator def new_iterator(): # type: () -> Iterator[MessageStreamEvent] model = None input_tokens = 0 output_tokens = 0 content_blocks = [] # type: list[str] for event in old_iterator: model, input_tokens, output_tokens, content_blocks = ( _collect_ai_data( event, model, input_tokens, output_tokens, content_blocks ) ) yield event _set_output_data( span=span, integration=integration, model=model, input_tokens=input_tokens, output_tokens=output_tokens, content_blocks=[{"text": "".join(content_blocks), "type": "text"}], finish_span=True, ) async def new_iterator_async(): # type: () -> AsyncIterator[MessageStreamEvent] model = None input_tokens = 0 output_tokens = 0 content_blocks = [] # type: list[str] async for event in old_iterator: model, input_tokens, output_tokens, content_blocks = ( _collect_ai_data( event, model, input_tokens, output_tokens, content_blocks ) ) yield event _set_output_data( span=span, integration=integration, model=model, input_tokens=input_tokens, output_tokens=output_tokens, content_blocks=[{"text": "".join(content_blocks), "type": "text"}], finish_span=True, ) if str(type(result._iterator)) == "<class 'async_generator'>": result._iterator = new_iterator_async() else: result._iterator = new_iterator() else: span.set_data("unknown_response", True) span.__exit__(None, None, None) return result def _wrap_message_create(f): # type: (Any) -> Any def _execute_sync(f, *args, **kwargs): # type: (Any, *Any, **Any) -> Any gen = _sentry_patched_create_common(f, *args, **kwargs) try: f, args, kwargs = next(gen) except StopIteration as e: return e.value try: try: result = f(*args, **kwargs) except Exception as exc: _capture_exception(exc) raise exc from None return gen.send(result) except StopIteration as e: return e.value @wraps(f) def _sentry_patched_create_sync(*args, **kwargs): # type: (*Any, **Any) -> Any integration = sentry_sdk.get_client().get_integration(AnthropicIntegration) kwargs["integration"] = integration try: return _execute_sync(f, *args, **kwargs) finally: span = sentry_sdk.get_current_span() if span is not None and span.status == SPANSTATUS.INTERNAL_ERROR: with capture_internal_exceptions(): span.__exit__(None, None, None) return _sentry_patched_create_sync def _wrap_message_create_async(f): # type: (Any) -> Any async def _execute_async(f, *args, **kwargs): # type: (Any, *Any, **Any) -> Any gen = _sentry_patched_create_common(f, *args, **kwargs) try: f, args, kwargs = next(gen) except StopIteration as e: return await e.value try: try: result = await f(*args, **kwargs) except Exception as exc: _capture_exception(exc) raise exc from None return gen.send(result) except StopIteration as e: return e.value @wraps(f) async def _sentry_patched_create_async(*args, **kwargs): # type: (*Any, **Any) -> Any integration = sentry_sdk.get_client().get_integration(AnthropicIntegration) kwargs["integration"] = integration try: return await _execute_async(f, *args, **kwargs) finally: span = sentry_sdk.get_current_span() if span is not None and span.status == SPANSTATUS.INTERNAL_ERROR: with capture_internal_exceptions(): span.__exit__(None, None, None) return _sentry_patched_create_async def _is_given(obj): # type: (Any) -> bool """ Check for givenness safely across different anthropic versions. """ if NotGiven is not None and isinstance(obj, NotGiven): return False if Omit is not None and isinstance(obj, Omit): return False return True
AnthropicIntegration
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_hyperlink33.py
{ "start": 315, "end": 950 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("hyperlink33.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file with image(s).""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() worksheet.insert_image( "E9", self.image_dir + "red.png", {"url": "https://github.com/jmcnamara", "tip": "GitHub"}, ) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
spack__spack
lib/spack/spack/vendor/ruamel/yaml/events.py
{ "start": 2690, "end": 3269 }
class ____(NodeEvent): __slots__ = 'tag', 'implicit', 'flow_style', 'nr_items' def __init__( self, anchor, tag, implicit, start_mark=None, end_mark=None, flow_style=None, comment=None, nr_items=None, ): # type: (Any, Any, Any, Any, Any, Any, Any, Optional[int]) -> None NodeEvent.__init__(self, anchor, start_mark, end_mark, comment) self.tag = tag self.implicit = implicit self.flow_style = flow_style self.nr_items = nr_items
CollectionStartEvent
python
walkccc__LeetCode
solutions/945. Minimum Increment to Make Array Unique/945.py
{ "start": 0, "end": 242 }
class ____: def minIncrementForUnique(self, nums: list[int]) -> int: ans = 0 minAvailable = 0 for num in sorted(nums): ans += max(minAvailable - num, 0) minAvailable = max(minAvailable, num) + 1 return ans
Solution
python
airbytehq__airbyte
airbyte-integrations/connectors/source-shopify/source_shopify/utils.py
{ "start": 2151, "end": 2741 }
class ____(AirbyteTracedException): """Raises the error if authenticated user doesn't have access to verify the grantted scopes.""" help_url = "https://shopify.dev/docs/api/usage/access-scopes#authenticated-access-scopes" def __init__(self, response, **kwargs) -> None: self.message = f"Reason: Scopes are not available, make sure you're using the correct `Shopify Store` name. Actual response: {response}. More info about: {self.help_url}" super().__init__(internal_message=self.message, failure_type=FailureType.config_error, **kwargs)
ShopifyAccessScopesError
python
mwaskom__seaborn
tests/_marks/test_line.py
{ "start": 5188, "end": 5716 }
class ____: # Most behaviors shared with Path and covered by above tests def test_xy_data(self): x = [1, 5, 3, np.nan, 2] y = [1, 4, 2, 5, 3] g = [1, 2, 1, 1, 2] p = Plot(x=x, y=y, group=g).add(Line()).plot() line1, line2 = p._figure.axes[0].get_lines() assert_array_equal(line1.get_xdata(), [1, 3]) assert_array_equal(line1.get_ydata(), [1, 2]) assert_array_equal(line2.get_xdata(), [2, 5]) assert_array_equal(line2.get_ydata(), [3, 4])
TestLine
python
jazzband__prettytable
tests/test_html.py
{ "start": 869, "end": 21839 }
class ____: def test_html_output(self, helper_table: PrettyTable) -> None: result = helper_table.get_html_string() assert ( result.strip() == """ <table> <thead> <tr> <th></th> <th>Field 1</th> <th>Field 2</th> <th>Field 3</th> </tr> </thead> <tbody> <tr> <td>1</td> <td>value 1</td> <td>value2</td> <td>value3</td> </tr> <tr> <td>4</td> <td>value 4</td> <td>value5</td> <td>value6</td> </tr> <tr> <td>7</td> <td>value 7</td> <td>value8</td> <td>value9</td> </tr> </tbody> </table> """.strip() ) def test_html_output_formatted(self, helper_table: PrettyTable) -> None: result = helper_table.get_html_string(format=True) assert ( result.strip() == """ <table frame="box" rules="cols"> <thead> <tr> <th style="padding-left: 1em; padding-right: 1em; text-align: center"></th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 1</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 2</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 3</th> </tr> </thead> <tbody> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">1</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 1</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value2</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value3</td> </tr> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">4</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 4</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value5</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value6</td> </tr> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">7</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 7</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value8</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value9</td> </tr> </tbody> </table> """.strip() # noqa: E501 ) def test_html_output_with_title(self, helper_table: PrettyTable) -> None: helper_table.title = "Title & Title" result = helper_table.get_html_string( attributes={"bgcolor": "red", "a<b": "1<2"} ) assert ( result.strip() == """ <table bgcolor="red" a&lt;b="1&lt;2"> <caption>Title &amp; Title</caption> <thead> <tr> <th></th> <th>Field 1</th> <th>Field 2</th> <th>Field 3</th> </tr> </thead> <tbody> <tr> <td>1</td> <td>value 1</td> <td>value2</td> <td>value3</td> </tr> <tr> <td>4</td> <td>value 4</td> <td>value5</td> <td>value6</td> </tr> <tr> <td>7</td> <td>value 7</td> <td>value8</td> <td>value9</td> </tr> </tbody> </table> """.strip() ) def test_html_output_formatted_with_title(self, helper_table: PrettyTable) -> None: helper_table.title = "Title & Title" result = helper_table.get_html_string( attributes={"bgcolor": "red", "a<b": "1<2"}, format=True ) assert ( result.strip() == """ <table frame="box" rules="cols" bgcolor="red" a&lt;b="1&lt;2"> <caption>Title &amp; Title</caption> <thead> <tr> <th style="padding-left: 1em; padding-right: 1em; text-align: center"></th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 1</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 2</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 3</th> </tr> </thead> <tbody> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">1</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 1</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value2</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value3</td> </tr> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">4</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 4</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value5</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value6</td> </tr> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">7</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 7</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value8</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value9</td> </tr> </tbody> </table> """.strip() # noqa: E501 ) def test_html_output_without_escaped_header( self, empty_helper_table: PrettyTable ) -> None: empty_helper_table.field_names = [ "", "Field 1", "<em>Field 2</em>", "<a href='#'>Field 3</a>", ] result = empty_helper_table.get_html_string(escape_header=False) assert ( result.strip() == """ <table> <thead> <tr> <th></th> <th>Field 1</th> <th><em>Field 2</em></th> <th><a href='#'>Field 3</a></th> </tr> </thead> <tbody> </tbody> </table> """.strip() ) def test_html_output_without_escaped_data( self, empty_helper_table: PrettyTable ) -> None: empty_helper_table.add_row( [ 1, "<b>value 1</b>", "<span style='text-decoration: underline;'>value2</span>", "<a href='#'>value3</a>", ] ) result = empty_helper_table.get_html_string(escape_data=False) assert ( result.strip() == """ <table> <thead> <tr> <th></th> <th>Field 1</th> <th>Field 2</th> <th>Field 3</th> </tr> </thead> <tbody> <tr> <td>1</td> <td><b>value 1</b></td> <td><span style='text-decoration: underline;'>value2</span></td> <td><a href='#'>value3</a></td> </tr> </tbody> </table> """.strip() ) def test_html_output_with_escaped_header( self, empty_helper_table: PrettyTable ) -> None: empty_helper_table.field_names = [ "", "Field 1", "<em>Field 2</em>", "<a href='#'>Field 3</a>", ] result = empty_helper_table.get_html_string(escape_header=True) assert ( result.strip() == """ <table> <thead> <tr> <th></th> <th>Field 1</th> <th>&lt;em&gt;Field 2&lt;/em&gt;</th> <th>&lt;a href=&#x27;#&#x27;&gt;Field 3&lt;/a&gt;</th> </tr> </thead> <tbody> </tbody> </table> """.strip() ) def test_html_output_with_escaped_data( self, empty_helper_table: PrettyTable ) -> None: empty_helper_table.add_row( [ 1, "<b>value 1</b>", "<span style='text-decoration: underline;'>value2</span>", "<a href='#'>value3</a>", ] ) result = empty_helper_table.get_html_string(escape_data=True) assert ( result.strip() == """ <table> <thead> <tr> <th></th> <th>Field 1</th> <th>Field 2</th> <th>Field 3</th> </tr> </thead> <tbody> <tr> <td>1</td> <td>&lt;b&gt;value 1&lt;/b&gt;</td> <td>&lt;span style=&#x27;text-decoration: underline;&#x27;&gt;value2&lt;/span&gt;</td> <td>&lt;a href=&#x27;#&#x27;&gt;value3&lt;/a&gt;</td> </tr> </tbody> </table> """.strip() # noqa: E501 ) def test_html_output_formatted_without_escaped_header( self, empty_helper_table: PrettyTable ) -> None: empty_helper_table.field_names = [ "", "Field 1", "<em>Field 2</em>", "<a href='#'>Field 3</a>", ] result = empty_helper_table.get_html_string(escape_header=False, format=True) assert ( result.strip() == """ <table frame="box" rules="cols"> <thead> <tr> <th style="padding-left: 1em; padding-right: 1em; text-align: center"></th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 1</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center"><em>Field 2</em></th> <th style="padding-left: 1em; padding-right: 1em; text-align: center"><a href='#'>Field 3</a></th> </tr> </thead> <tbody> </tbody> </table> """.strip() # noqa: E501 ) def test_html_output_formatted_without_escaped_data( self, empty_helper_table: PrettyTable ) -> None: empty_helper_table.add_row( [ 1, "<b>value 1</b>", "<span style='text-decoration: underline;'>value2</span>", "<a href='#'>value3</a>", ] ) result = empty_helper_table.get_html_string(escape_data=False, format=True) assert ( result.strip() == """ <table frame="box" rules="cols"> <thead> <tr> <th style="padding-left: 1em; padding-right: 1em; text-align: center"></th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 1</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 2</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 3</th> </tr> </thead> <tbody> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">1</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top"><b>value 1</b></td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top"><span style='text-decoration: underline;'>value2</span></td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top"><a href='#'>value3</a></td> </tr> </tbody> </table> """.strip() # noqa: E501 ) def test_html_output_formatted_with_escaped_header( self, empty_helper_table: PrettyTable ) -> None: empty_helper_table.field_names = [ "", "Field 1", "<em>Field 2</em>", "<a href='#'>Field 3</a>", ] result = empty_helper_table.get_html_string(escape_header=True, format=True) assert ( result.strip() == """ <table frame="box" rules="cols"> <thead> <tr> <th style="padding-left: 1em; padding-right: 1em; text-align: center"></th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 1</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">&lt;em&gt;Field 2&lt;/em&gt;</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">&lt;a href=&#x27;#&#x27;&gt;Field 3&lt;/a&gt;</th> </tr> </thead> <tbody> </tbody> </table> """.strip() # noqa: E501 ) def test_html_output_formatted_with_escaped_data( self, empty_helper_table: PrettyTable ) -> None: empty_helper_table.add_row( [ 1, "<b>value 1</b>", "<span style='text-decoration: underline;'>value2</span>", "<a href='#'>value3</a>", ] ) result = empty_helper_table.get_html_string(escape_data=True, format=True) assert ( result.strip() == """ <table frame="box" rules="cols"> <thead> <tr> <th style="padding-left: 1em; padding-right: 1em; text-align: center"></th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 1</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 2</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 3</th> </tr> </thead> <tbody> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">1</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">&lt;b&gt;value 1&lt;/b&gt;</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">&lt;span style=&#x27;text-decoration: underline;&#x27;&gt;value2&lt;/span&gt;</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">&lt;a href=&#x27;#&#x27;&gt;value3&lt;/a&gt;</td> </tr> </tbody> </table> """.strip() # noqa: E501 ) def test_table_formatted_html_autoindex(self) -> None: """See also #199""" table = PrettyTable(["Field 1", "Field 2", "Field 3"]) for row in range(1, 3 * 3, 3): table.add_row( [f"value {row*100}", f"value {row+1*100}", f"value {row+2*100}"] ) table.format = True table.add_autoindex("I") assert ( table.get_html_string().strip() == """ <table frame="box" rules="cols"> <thead> <tr> <th style="padding-left: 1em; padding-right: 1em; text-align: center">I</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 1</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 2</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 3</th> </tr> </thead> <tbody> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">1</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 100</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 101</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 201</td> </tr> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">2</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 400</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 104</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 204</td> </tr> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">3</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 700</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 107</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 207</td> </tr> </tbody> </table>""".strip() # noqa: E501 ) def test_internal_border_preserved_html(self, helper_table: PrettyTable) -> None: helper_table.format = True helper_table.border = False helper_table.preserve_internal_border = True assert ( helper_table.get_html_string().strip() == """ <table rules="cols"> <thead> <tr> <th style="padding-left: 1em; padding-right: 1em; text-align: center"></th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 1</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 2</th> <th style="padding-left: 1em; padding-right: 1em; text-align: center">Field 3</th> </tr> </thead> <tbody> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">1</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 1</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value2</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value3</td> </tr> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">4</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 4</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value5</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value6</td> </tr> <tr> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">7</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value 7</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value8</td> <td style="padding-left: 1em; padding-right: 1em; text-align: center; vertical-align: top">value9</td> </tr> </tbody> </table> """.strip() # noqa: E501 ) def test_break_line_html(self) -> None: table = PrettyTable(["Field 1", "Field 2"]) table.add_row(["value 1", "value2\nsecond line"]) table.add_row(["value 3", "value4"]) result = table.get_html_string(hrules=HRuleStyle.ALL) assert ( result.strip() == """ <table> <thead> <tr> <th>Field 1</th> <th>Field 2</th> </tr> </thead> <tbody> <tr> <td>value 1</td> <td>value2<br>second line</td> </tr> <tr> <td>value 3</td> <td>value4</td> </tr> </tbody> </table> """.strip() ) def test_break_line_xhtml(self) -> None: table = PrettyTable(["Field 1", "Field 2"]) table.add_row(["value 1", "value2\nsecond line"]) table.add_row(["value 3", "value4"]) result = table.get_html_string(hrules=HRuleStyle.ALL, xhtml=True) assert ( result.strip() == """ <table> <thead> <tr> <th>Field 1</th> <th>Field 2</th> </tr> </thead> <tbody> <tr> <td>value 1</td> <td>value2<br/>second line</td> </tr> <tr> <td>value 3</td> <td>value4</td> </tr> </tbody> </table> """.strip() )
TestHtmlOutput
python
django-debug-toolbar__django-debug-toolbar
tests/base.py
{ "start": 3740, "end": 3840 }
class ____(BaseMixin, TransactionTestCase): databases = {"default", "replica"}
BaseMultiDBTestCase
python
django__django
tests/admin_registration/models.py
{ "start": 180, "end": 215 }
class ____(Person): pass
Traveler
python
huggingface__transformers
src/transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py
{ "start": 19020, "end": 19088 }
class ____(Sam2VideoMemoryFuser): pass
Sam3TrackerVideoMemoryFuser
python
pandas-dev__pandas
pandas/tests/api/test_api.py
{ "start": 11091, "end": 11539 }
class ____(Base): def test_util(self): self.check( pd.util, ["hash_array", "hash_pandas_object"], ignored=[ "_decorators", "_test_decorators", "_exceptions", "_validators", "capitalize_first_letter", "version", "_print_versions", "_tester", ], )
TestUtil
python
encode__django-rest-framework
tests/test_fields.py
{ "start": 57100, "end": 57387 }
class ____(FieldValues): """ Values for `TimeField` with a custom output format. """ valid_inputs = {} invalid_inputs = {} outputs = { datetime.time(13, 00): '01:00PM' } field = serializers.TimeField(format='%I:%M%p')
TestCustomOutputFormatTimeField
python
anthropics__anthropic-sdk-python
src/anthropic/types/message_param.py
{ "start": 871, "end": 1570 }
class ____(TypedDict, total=False): content: Required[ Union[ str, Iterable[ Union[ TextBlockParam, ImageBlockParam, DocumentBlockParam, SearchResultBlockParam, ThinkingBlockParam, RedactedThinkingBlockParam, ToolUseBlockParam, ToolResultBlockParam, ServerToolUseBlockParam, WebSearchToolResultBlockParam, ContentBlock, ] ], ] ] role: Required[Literal["user", "assistant"]]
MessageParam
python
wandb__wandb
wandb/sdk/artifacts/_generated/enums.py
{ "start": 356, "end": 442 }
class ____(str, Enum): READY = "READY" DELETED = "DELETED"
ArtifactCollectionState
python
pytorch__pytorch
torch/_strobelight/cli_function_profiler.py
{ "start": 696, "end": 1389 }
class ____(Exception): """ Raised when an error happens during strobelight profiling """ def _pid_namespace_link(pid: Optional[int] = None) -> str: """Returns the link to the process's namespace, example: pid:[4026531836]""" PID_NAMESPACE_PATH = "/proc/{}/ns/pid" pid = pid or os.getpid() return os.readlink(PID_NAMESPACE_PATH.format(pid)) def _pid_namespace(pid: Optional[int] = None) -> int: """Returns the process's namespace id""" pid = pid or os.getpid() link = _pid_namespace_link(pid) return int(link[link.find("[") + 1 : -1]) def _command_to_string(command: Sequence[str]) -> str: return " ".join(command)
StrobelightCLIProfilerError
python
sphinx-doc__sphinx
tests/roots/test-root/parsermod.py
{ "start": 65, "end": 346 }
class ____(Parser): supported = ('foo',) def parse(self, input, document): section = nodes.section(ids=['id1']) section += nodes.title('Generated section', 'Generated section') document += section def get_transforms(self): return []
Parser
python
pytorch__pytorch
torch/_inductor/fuzzer.py
{ "start": 1642, "end": 1878 }
class ____(CustomGraphPass): """ A Dummy pass to be used by ConfigFuzzer """ def __call__(self, graph: torch.fx.graph.Graph) -> None: return None def uuid(self) -> Optional[Any]: return None
DummyPass
python
astropy__astropy
astropy/units/tests/test_quantity_typing.py
{ "start": 234, "end": 2573 }
class ____: """Test Quantity Typing Annotations.""" def test_quantity_typing(self): """Test type hint creation from Quantity.""" annot = u.Quantity[u.m] assert get_origin(annot) is Annotated assert get_args(annot) == (u.Quantity, u.m) # test usage def func(x: annot, y: str) -> u.Quantity[u.s]: return x, y annots = get_type_hints(func, include_extras=True) assert annots["x"] is annot assert annots["return"].__metadata__[0] == u.s def test_metadata_in_annotation(self): """Test Quantity annotation with added metadata.""" multi_annot = u.Quantity[u.m, Any, np.dtype] def multi_func(x: multi_annot, y: str): return x, y annots = get_type_hints(multi_func, include_extras=True) assert annots["x"] == multi_annot def test_optional_and_annotated(self): """Test Quantity annotation in an Optional.""" opt_annot = u.Quantity[u.m] | None def opt_func(x: opt_annot, y: str): return x, y annots = get_type_hints(opt_func, include_extras=True) assert annots["x"] == opt_annot def test_union_and_annotated(self): """Test Quantity annotation in a Union.""" # double Quantity[] union_annot1 = u.Quantity[u.m] | u.Quantity[u.s] # one Quantity, one physical-type union_annot2 = u.Quantity[u.m] | u.Quantity["time"] # one Quantity, one general type union_annot3 = u.Quantity[u.m / u.s] | float def union_func(x: union_annot1, y: union_annot2) -> union_annot3: if isinstance(y, str): # value = time return x.value # returns <float> else: return x / y # returns Quantity[m / s] annots = get_type_hints(union_func, include_extras=True) assert annots["x"] == union_annot1 assert annots["y"] == union_annot2 assert annots["return"] == union_annot3 def test_quantity_subclass_typing(self): """Test type hint creation from a Quantity subclasses.""" class Length(u.SpecificTypeQuantity): _equivalent_unit = u.m annot = Length[u.km] assert get_origin(annot) is Annotated assert get_args(annot) == (Length, u.km)
TestQuantityTyping
python
pytorch__pytorch
test/distributed/test_c10d_nccl.py
{ "start": 158699, "end": 179443 }
class ____(test_c10d_common.AbstractCommTest, MultiProcessTestCase): @property def device(self): return f"cuda:{self.rank}" def setUp(self): super().setUp() # TORCH_NCCL_BLOCKING_WAIT overrides TORCH_NCCL_ASYNC_ERROR_HANDLING hence tests # that use TORCH_NCCL_BLOCKING_WAIT will test it as expected. os.environ["TORCH_NCCL_ASYNC_ERROR_HANDLING"] = "1" self._spawn_processes() def tearDown(self): super().tearDown() try: os.remove(self.file_name) except OSError: pass def _test_broadcast_coalesced(self, process_group, device, root_rank): half = torch.float16 # No support for float16 for CPU tensors if device == torch.device("cpu"): half = torch.float32 target = torch.arange(60, dtype=half, device=device).chunk(5) target += torch.arange(60, dtype=torch.float32, device=device).chunk(5) target += torch.arange(60, dtype=half, device=device).chunk(5) target += torch.arange(60, dtype=torch.float64, device=device).chunk(5) target += torch.arange(60, dtype=half, device=device).chunk(5) target += torch.arange(60, dtype=torch.float32, device=device).chunk(5) # The tensors to pass to broadcast are identical to the target # only on the process that is the root of the broadcast. if self.rank == root_rank: tensors = [tensor.clone() for tensor in target] else: tensors = [torch.zeros_like(tensor) for tensor in target] if self.rank != root_rank: self.assertNotEqual(tensors, target) c10d._broadcast_coalesced( process_group, tensors, buffer_size=256, src=root_rank ) if self.rank != root_rank: self.assertEqual(tensors, target) @requires_nccl() @skip_if_lt_x_gpu(2) def test_broadcast_coalesced_nccl(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", store=store, rank=self.rank, world_size=self.world_size ) process_group = c10d.distributed_c10d._get_default_group() device = torch.device(f"cuda:{self.rank:d}") ranks = [0, 1] for root_rank in ranks: self._test_broadcast_coalesced(process_group, device, root_rank) @requires_nccl() @skip_if_lt_x_gpu(2) def test_all_reduce_coalesced_nccl(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", store=store, rank=self.rank, world_size=self.world_size ) process_group = c10d.distributed_c10d._get_default_group() device = torch.device(f"cuda:{self.rank:d}") tensors = [ torch.full((60 + i,), self.rank + 1 + i, device=device, dtype=torch.float) for i in range(5) ] torch.distributed.all_reduce_coalesced(tensors, group=process_group) for i, t in enumerate(tensors): self.assertEqual( t, torch.full_like( t, self.world_size * (i + (self.world_size + 1.0) / 2.0) ), ) @requires_nccl() @skip_if_lt_x_gpu(2) def test_all_reduce_coalesced_manager_nccl(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", store=store, rank=self.rank, world_size=self.world_size ) process_group = c10d.distributed_c10d._get_default_group() device = torch.device(f"cuda:{self.rank:d}") tensors = [ torch.full((60 + i,), self.rank + 1 + i, device=device, dtype=torch.float) for i in range(5) ] with torch.distributed._coalescing_manager( group=process_group, device=device, async_ops=True ) as cm: for tensor in tensors: torch.distributed.all_reduce(tensor) self.assertEqual(len(cm.works), 1) cm.wait() for i, t in enumerate(tensors): self.assertEqual( t, torch.full_like( t, self.world_size * (i + (self.world_size + 1.0) / 2.0) ), ) @requires_nccl() @skip_if_lt_x_gpu(2) @runOnRocmArch(MI300_ARCH) def test_intra_node_comm_all_reduce(self): from torch._C._distributed_c10d import _get_intra_node_comm_usage_counter from torch.testing._internal.common_cuda import SM80OrLater for peer in range(self.world_size): if peer == self.rank: continue if not torch._C._cuda_canDeviceAccessPeer(self.rank, peer): raise SkipTest("Test requires p2p access") if not SM80OrLater: raise SkipTest("Test requires sm>=80") store = c10d.FileStore(self.file_name, self.world_size) os.environ["ENABLE_INTRA_NODE_COMM"] = "1" os.environ["TEST_INTRA_NODE_COMM"] = "1" torch.cuda.set_device(self.rank) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store ) expect = self.world_size * (self.world_size - 1) // 2 # IntraNodeComm currently only supports sum and bf16. # Verify that it is not used in the next two configurations. t = torch.full((4 * 1024 // 2,), self.rank).cuda() c10d.all_reduce(t, c10d.ReduceOp.SUM) self.assertTrue(t.eq(expect).all()) self.assertEqual(_get_intra_node_comm_usage_counter(), 0) t = torch.full((4 * 1024 // 2,), self.rank, dtype=torch.bfloat16).cuda() c10d.all_reduce(t, c10d.ReduceOp.AVG) self.assertEqual(_get_intra_node_comm_usage_counter(), 0) # Verify that IntraNodeComm is used up to 10MB t = torch.full((4 * 1024 // 2,), self.rank, dtype=torch.bfloat16).cuda() c10d.all_reduce(t, c10d.ReduceOp.SUM) self.assertTrue(t.eq(expect).all()) self.assertEqual(_get_intra_node_comm_usage_counter(), 1) t = torch.full((512 * 1024 // 2,), self.rank, dtype=torch.bfloat16).cuda() c10d.all_reduce(t, c10d.ReduceOp.SUM) self.assertTrue(t.eq(expect).all()) self.assertEqual(_get_intra_node_comm_usage_counter(), 2) t = torch.full((10 * 1024**2 // 2,), self.rank, dtype=torch.bfloat16).cuda() c10d.all_reduce(t, c10d.ReduceOp.SUM) self.assertTrue(t.eq(expect).all()) self.assertEqual(_get_intra_node_comm_usage_counter(), 3) # Verify that IntraNodeComm is not used beyond 10MB t = torch.full((10 * 1024**2 // 2 + 1,), self.rank, dtype=torch.bfloat16).cuda() c10d.all_reduce(t, c10d.ReduceOp.SUM) self.assertTrue(t.eq(expect).all()) self.assertEqual(_get_intra_node_comm_usage_counter(), 3) c10d.destroy_process_group() @requires_nccl() @requires_nccl_version( (2, 22), "Need NCCL 2.22+ for configuring estimate comm time" ) @skip_if_lt_x_gpu(2) def test_time_estimate_nccl(self): store = c10d.FileStore(self.file_name, self.world_size) torch.cuda.set_device(self.rank) c10d.init_process_group( backend="nccl", store=store, rank=self.rank, world_size=self.world_size ) process_group = c10d.distributed_c10d._get_default_group() device = torch.device(f"cuda:{self.rank:d}") t = torch.full( (1024,), self.rank, ).cuda() with dist._time_estimator(group=process_group, device=device) as cm: c10d.all_reduce(t, c10d.ReduceOp.SUM) self.assertTrue(cm.estimated_time is not None) self.assertTrue(cm.estimated_time > 0) @requires_nccl() @skip_if_lt_x_gpu(2) def test_sequence_num_set_default_pg_nccl(self): torch.cuda.set_device(self.rank) self._test_sequence_num_set_default_pg(backend="nccl") @skip_if_lt_x_gpu(2) @requires_nccl() def test_sequence_num_incremented_nccl_default(self): self._test_sequence_num_incremented_default_group("nccl") @skip_if_lt_x_gpu(4) @requires_nccl() def test_sequence_num_incremented_nccl_subgroup(self): if self.world_size < 4: return skip_but_pass_in_sandcastle("Test requires world_size of at least 4") self._test_sequence_num_incremented_subgroup("nccl") @requires_nccl() @skip_if_lt_x_gpu(2) def test_sequence_num_set_nccl_new_group(self): torch.cuda.set_device(self.rank) self._test_sequence_num_set_new_group(backend="nccl") def _test_pass_nccl_options(self, pg_opts): store = c10d.FileStore(self.file_name, self.world_size) # Test init_process_group accepts options dist.init_process_group( "nccl", world_size=self.world_size, rank=self.rank, store=store, pg_options=pg_opts, ) # Test with new_group pg = c10d.new_group([0, 1], pg_options=pg_opts) # test the process group works as expected t = torch.tensor([self.rank + 1] * 10).cuda(self.rank) pg.allreduce(t).wait() expected_tensor = torch.tensor([3] * 10).cuda(self.rank) self.assertEqual(expected_tensor, t) @requires_nccl() @skip_if_lt_x_gpu(2) def test_pass_nccl_options_high_priority_stream(self): pg_opts = c10d.ProcessGroupNCCL.Options() pg_opts.is_high_priority_stream = True self._test_pass_nccl_options(pg_opts) @requires_nccl() @requires_nccl_version( (2, 18), "Need NCCL 2.17+ for configuring NCCL communicators" ) @skip_if_lt_x_gpu(2) def test_pass_nccl_options_config(self): pg_opts = c10d.ProcessGroupNCCL.Options() pg_opts.config.max_ctas = 4 pg_opts.config.min_ctas = 2 pg_opts.config.cga_cluster_size = 2 pg_opts.config.net_name = "Socket" pg_opts.config.split_share = 1 nccl_debug_file = tempfile.NamedTemporaryFile() os.environ["NCCL_DEBUG"] = "INFO" os.environ["NCCL_DEBUG_FILE"] = nccl_debug_file.name # Tests functionality when passing nccl config self._test_pass_nccl_options(pg_opts) # Tests if comms were configured nccl_debug_file_content = nccl_debug_file.read() max_ctas = re.search(rb"Max CTAs.*(\d+)|$", nccl_debug_file_content).group(1) min_ctas = re.search(rb"Min CTAs.*(\d+)|$", nccl_debug_file_content).group(1) split_share = re.search( rb"Split share.*(\d+)|$", nccl_debug_file_content ).group(1) cga_cluster_size = re.search( rb"CGA cluster.*(\d+)|$", nccl_debug_file_content ).group(1) net_name = re.search( rb"Using network.([a-zA-z]+)|$", nccl_debug_file_content ).group(1) self.assertEqual(pg_opts.config.max_ctas, int(max_ctas)) self.assertEqual(pg_opts.config.min_ctas, int(min_ctas)) self.assertEqual(pg_opts.config.cga_cluster_size, int(cga_cluster_size)) self.assertEqual(pg_opts.config.net_name, net_name.decode()) self.assertEqual(pg_opts.config.split_share, int(split_share)) # Tests that config is inited correctly pg_opts = c10d.ProcessGroupNCCL.Options() nccl_cfg = c10d.ProcessGroupNCCL.NCCLConfig() self.assertEqual(pg_opts.config.min_ctas, -2147483648) self.assertEqual(nccl_cfg.min_ctas, -2147483648) # Tests that opts and config can be copied pg_opts_2 = copy.deepcopy(pg_opts) nccl_cfg_2 = copy.copy(pg_opts_2.config) pg_opts_2.config.min_ctas = 2 nccl_cfg_2.min_ctas = 4 self.assertEqual(pg_opts.config.min_ctas, -2147483648) self.assertEqual(pg_opts_2.config.min_ctas, 2) self.assertEqual(nccl_cfg_2.min_ctas, 4) @requires_nccl() @skip_if_lt_x_gpu(4) def test_nccl_barrier(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store ) t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) c10d.all_reduce(t) expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank) self.assertEqual(expected_tensor, t) # Test with new_group pg = c10d.new_group([0, 1]) t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) pg.allreduce(t).wait() self.assertEqual(expected_tensor, t) pg = c10d.new_group([0]) if self.rank == 0: t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) pg.allreduce(t).wait() self.assertEqual(expected_tensor, t) pg = c10d.new_group([1]) if self.rank == 1: t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank) pg.allreduce(t).wait() self.assertEqual(expected_tensor, t) @requires_nccl() @skip_if_lt_x_gpu(2) def test_nccl_barrier_device_ids(self): store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store ) c10d.barrier(device_ids=[self.rank]) @requires_nccl() @skip_if_lt_x_gpu(2) def test_unwaited(self) -> None: # Verify that the process can terminate gracefully # even with unwaited tensors store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store ) # Case 1: Run collectives under context manager, and don't call wait on them. with _functional_collectives.allow_inflight_collective_as_graph_input_ctx(): self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 0) input = torch.full( (10240, 10240), float(self.rank), device=f"cuda:{self.rank}" ) dist.all_reduce(input, op=dist.ReduceOp.SUM, async_op=True) # Non-functional collectives run under the context manager is registered in the work registry. self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 1) # Running another collective on the same tensor should still work dist.all_reduce(input, op=dist.ReduceOp.SUM, async_op=True) self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 2) # Case 2: Run collectives not under context manager, and don't call wait on them. # NOTE: Here we intentionally test memory-stressed case. self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 2) for _ in range(50000): input = torch.full( (1024, 1024), float(self.rank), device=f"cuda:{self.rank}" ) dist.all_reduce(input, op=dist.ReduceOp.SUM, async_op=True) # Work registry size is unchanged, since non-functional collectives not run under # the context manager is not registered in the work registry. self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 2) @requires_nccl() @skip_if_lt_x_gpu(2) def test_wait_tensor(self) -> None: # Verify that c10d_functional.wait_tensor() can be invoked on # output tensor of non-functional collective store = c10d.FileStore(self.file_name, self.world_size) c10d.init_process_group( backend="nccl", rank=self.rank, world_size=self.world_size, store=store ) # Case 1: under context manager (i.e. work is registered in registry) with _functional_collectives.allow_inflight_collective_as_graph_input_ctx(): input1 = torch.full((10, 10), float(self.rank), device=f"cuda:{self.rank}") self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 0) dist.all_reduce(input1, op=dist.ReduceOp.SUM, async_op=True) self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 1) torch.ops.c10d_functional.wait_tensor(input1) self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 0) input2 = torch.full((10, 10), float(self.rank), device=f"cuda:{self.rank}") self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 0) work = dist.all_reduce(input2, op=dist.ReduceOp.SUM, async_op=True) self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 1) work.wait() self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 0) self.assertEqual(input1, input2) # Case 2: not under context manager (i.e. work is not registered in registry) input1 = torch.full((10, 10), float(self.rank), device=f"cuda:{self.rank}") self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 0) dist.all_reduce(input1, op=dist.ReduceOp.SUM, async_op=True) self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 0) # this does not take effect, since the underlying wait_tensor() logic would not # be able to find the corresponding work object (because it's not registered in registry) torch.ops.c10d_functional.wait_tensor(input1) self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 0) input2 = torch.full((10, 10), float(self.rank), device=f"cuda:{self.rank}") self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 0) work = dist.all_reduce(input2, op=dist.ReduceOp.SUM, async_op=True) self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 0) work.wait() self.assertEqual(torch._C._distributed_c10d._get_work_registry_size(), 0) self.assertEqual(input1, input2) @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["DETAIL"]) def test_nccl_warn_not_in_group_debug_detail(self): self._test_warn_not_in_group(backend="nccl") @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["INFO"]) def test_nccl_warn_not_in_group_debug_info(self): self._test_warn_not_in_group(backend="nccl") @requires_nccl() @skip_if_lt_x_gpu(2) @with_dist_debug_levels(levels=["OFF"]) def test_nccl_warn_not_in_group_debug_off(self): self._test_warn_not_in_group(backend="nccl") @requires_nccl() @skip_if_lt_x_gpu(2) def test_nncl_rank_membership(self): self._test_rank_membership(backend="nccl") @requires_nccl() @skip_if_lt_x_gpu(2) def test_tensor_dtype_mismatch(self): self._test_tensor_dtype_mismatch(backend="nccl") @requires_nccl() @skip_if_lt_x_gpu(2) def test_tensor_dtype_complex(self): self._test_tensor_dtype_complex(backend="nccl") @requires_nccl() @skip_if_lt_x_gpu(2) def test_reduce_scatter_base_k(self): store = dist.FileStore(self.file_name, self.world_size) dist.init_process_group( "nccl", world_size=self.world_size, rank=self.rank, store=store, ) output_tensor = torch.zeros(2, dtype=torch.int64).to(self.rank) input_tensors = torch.arange(self.world_size * 2, dtype=torch.int64).to( self.rank ) input_tensors = torch.reshape(input_tensors, (self.world_size, 2)) dist.reduce_scatter_tensor(output_tensor, input_tensors) self.assertEqual(output_tensor, input_tensors[self.rank] * self.world_size) @requires_nccl() @skip_if_lt_x_gpu(2) def test_reduce_scatter_tensor_coalesced(self): store = dist.FileStore(self.file_name, self.world_size) dist.init_process_group( "nccl", world_size=self.world_size, rank=self.rank, store=store, ) output_tensors = torch.zeros(2, 2).to(self.rank) input_tensors = [torch.ones(2, 2).to(self.rank) for _ in range(self.world_size)] with dist._coalescing_manager(): for i in range(self.world_size): dist.reduce_scatter_tensor(output_tensors[i], input_tensors[i]) self.assertEqual(output_tensors, input_tensors[self.rank] * self.world_size)
CommTest
python
sanic-org__sanic
guide/webapp/display/layouts/models.py
{ "start": 72, "end": 260 }
class ____(Struct, kw_only=False, omit_defaults=True): label: str path: str | None = None href: str | None = None items: list[MenuItem] = field(default_factory=list)
MenuItem
python
pytorch__pytorch
.github/scripts/test_delete_old_branches.py
{ "start": 274, "end": 1759 }
class ____(unittest.TestCase): def test_delete_tag( self, mock_run_git: "MagicMock", mock_delete_tag: "MagicMock" ) -> None: for tag in [ "ciflow/branch/12345", "ciflow/commitsha/1234567890abcdef1234567890abcdef12345678", "trunk/1234567890abcdef1234567890abcdef12345678", ]: mock_run_git.side_effect = [ tag, str(int(datetime.now().timestamp() - 8 * 24 * 60 * 60)), # 8 days ago ] delete_old_tags() mock_delete_tag.assert_called_once() mock_delete_tag.reset_mock() # Don't delete if the tag is not old enough mock_run_git.side_effect = [ tag, str(int(datetime.now().timestamp() - 6 * 24 * 60 * 60)), # 6 days ago ] delete_old_tags() mock_delete_tag.assert_not_called() def test_do_not_delete_tag( self, mock_run_git: "MagicMock", mock_delete_tag: "MagicMock" ) -> None: for tag in [ "ciflow/doesntseemtomatch", "trunk/doesntseemtomatch", "doesntseemtomatch", ]: mock_run_git.side_effect = [ tag, str(int(datetime.now().timestamp() - 8 * 24 * 60 * 60)), # 8 days ago ] delete_old_tags() mock_delete_tag.assert_not_called() if __name__ == "__main__": unittest.main()
TestDeleteTag
python
langchain-ai__langchain
libs/standard-tests/tests/unit_tests/test_in_memory_base_store.py
{ "start": 537, "end": 828 }
class ____(BaseStoreAsyncTests[str]): @pytest.fixture @override def three_values(self) -> tuple[str, str, str]: return "foo", "bar", "buzz" @pytest.fixture @override async def kv_store(self) -> InMemoryStore: return InMemoryStore()
TestInMemoryStoreAsync
python
huggingface__transformers
src/transformers/models/sam2/modular_sam2.py
{ "start": 17002, "end": 17079 }
class ____(MaskFormerSinePositionEmbedding): pass
Sam2SinePositionEmbedding
python
realpython__materials
gemini-cli/todolist/src/todolist/exporter.py
{ "start": 500, "end": 1933 }
class ____: def __init__( self, output: SupportsWrite[str], options: FormatOptions = {} ) -> None: self.output = output self.options = options def export(self, content: Any) -> None: json.dump(content, self.output, **self.options) def export_database_to_json() -> None: content = list(map(asdict, TaskListStatus.find_all())) for exporter in exporters(): exporter.export(content) def exporters() -> Iterator[JSONExporter]: file = None try: stdout_exporter = JSONExporter(sys.stdout) stdout_exporter.options["indent"] = 2 stdout_exporter.options["sort_keys"] = False stdout_exporter.options["separators"] = (", ", ": ") file = open(f"todo_{timestamp()}.json", mode="w+", encoding="utf-8") file_exporter = JSONExporter(file) file_exporter.options["indent"] = None file_exporter.options["sort_keys"] = True file_exporter.options["separators"] = (",", ":") yield stdout_exporter yield file_exporter file.seek(0) Console(stderr=True).print( Panel( Markdown(file.read()), title=file.name, width=80, border_style="bold", ) ) finally: if file: file.close() def timestamp() -> str: return datetime.now().strftime("%Y%m%d%H%M%S")
JSONExporter
python
google__pytype
pytype/abstract/function.py
{ "start": 34711, "end": 35320 }
class ____(_ReturnType): """An abstract return type.""" def __init__(self, t: _base.BaseValue, ctx: "context.Context") -> None: self._type = t self._ctx = ctx @property def name(self) -> str: return self._type.full_name def instantiate_parameter( self, node: cfg.CFGNode, param_name: str ) -> tuple[cfg.CFGNode, cfg.Variable]: param = self._type.get_formal_type_parameter(param_name) return self._ctx.vm.init_class(node, param) def get_parameter(self, node: cfg.CFGNode, param_name: str): return self._type.get_formal_type_parameter(param_name)
AbstractReturnType
python
walkccc__LeetCode
solutions/774. Minimize Max Distance to Gas Station/774.py
{ "start": 0, "end": 641 }
class ____: def minmaxGasDist(self, stations: list[int], k: int) -> float: ERR = 1e-6 l = 0 r = stations[-1] - stations[0] def possible(k: int, m: float) -> bool: """ Returns True if can use <= k gas stations to ensure that each adjacent distance between gas stations <= m. """ for a, b in zip(stations, stations[1:]): diff = b - a if diff > m: k -= math.ceil(diff / m) - 1 if k < 0: return False return True while r - l > ERR: m = (l + r) / 2 if possible(k, m): r = m else: l = m return l
Solution
python
jazzband__prettytable
tests/test_prettytable.py
{ "start": 35581, "end": 37514 }
class ____: @pytest.mark.parametrize( ["rows", "hrule", "expected_result"], [ ( [["value 1", "value2\nsecond line"], ["value 3", "value4"]], HRuleStyle.ALL, """ +---------+-------------+ | Field 1 | Field 2 | +---------+-------------+ | value 1 | value2 | | | second line | +---------+-------------+ | value 3 | value4 | +---------+-------------+ """, ), ( [ ["value 1", "value2\nsecond line"], ["value 3\n\nother line", "value4\n\n\nvalue5"], ], HRuleStyle.ALL, """ +------------+-------------+ | Field 1 | Field 2 | +------------+-------------+ | value 1 | value2 | | | second line | +------------+-------------+ | value 3 | value4 | | | | | other line | | | | value5 | +------------+-------------+ """, ), ( [ ["value 1", "value2\nsecond line"], ["value 3\n\nother line", "value4\n\n\nvalue5"], ], HRuleStyle.FRAME, """ +------------+-------------+ | Field 1 | Field 2 | +------------+-------------+ | value 1 | value2 | | | second line | | value 3 | value4 | | | | | other line | | | | value5 | +------------+-------------+ """, ), ], ) def test_break_line_ascii( self, rows: list[list[Any]], hrule: int, expected_result: str ) -> None: table = PrettyTable(["Field 1", "Field 2"]) for row in rows: table.add_row(row) result = table.get_string(hrules=hrule) assert result.strip() == expected_result.strip()
TestBreakLine
python
numpy__numpy
numpy/lib/tests/test_nanfunctions.py
{ "start": 24483, "end": 29736 }
class ____(SharedNanFunctionsTestsMixin): nanfuncs = [np.nanmean, np.nanvar, np.nanstd] stdfuncs = [np.mean, np.var, np.std] def test_dtype_error(self): for f in self.nanfuncs: for dtype in [np.bool, np.int_, np.object_]: assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype) def test_out_dtype_error(self): for f in self.nanfuncs: for dtype in [np.bool, np.int_, np.object_]: out = np.empty(_ndat.shape[0], dtype=dtype) assert_raises(TypeError, f, _ndat, axis=1, out=out) def test_ddof(self): nanfuncs = [np.nanvar, np.nanstd] stdfuncs = [np.var, np.std] for nf, rf in zip(nanfuncs, stdfuncs): for ddof in [0, 1]: tgt = [rf(d, ddof=ddof) for d in _rdat] res = nf(_ndat, axis=1, ddof=ddof) assert_almost_equal(res, tgt) def test_ddof_too_big(self): nanfuncs = [np.nanvar, np.nanstd] stdfuncs = [np.var, np.std] dsize = [len(d) for d in _rdat] for nf, rf in zip(nanfuncs, stdfuncs): for ddof in range(5): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') warnings.simplefilter('ignore', ComplexWarning) tgt = [ddof >= d for d in dsize] res = nf(_ndat, axis=1, ddof=ddof) assert_equal(np.isnan(res), tgt) if any(tgt): assert_(len(w) == 1) else: assert_(len(w) == 0) @pytest.mark.parametrize("axis", [None, 0, 1]) @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) @pytest.mark.parametrize("array", [ np.array(np.nan), np.full((3, 3), np.nan), ], ids=["0d", "2d"]) def test_allnans(self, axis, dtype, array): if axis is not None and array.ndim == 0: pytest.skip("`axis != None` not supported for 0d arrays") array = array.astype(dtype) match = "(Degrees of freedom <= 0 for slice.)|(Mean of empty slice)" for func in self.nanfuncs: with pytest.warns(RuntimeWarning, match=match): out = func(array, axis=axis) assert np.isnan(out).all() # `nanvar` and `nanstd` convert complex inputs to their # corresponding floating dtype if func is np.nanmean: assert out.dtype == array.dtype else: assert out.dtype == np.abs(array).dtype def test_empty(self): mat = np.zeros((0, 3)) for f in self.nanfuncs: for axis in [0, None]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(mat, axis=axis)).all()) assert_(len(w) == 1) assert_(issubclass(w[0].category, RuntimeWarning)) for axis in [1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_equal(f(mat, axis=axis), np.zeros([])) assert_(len(w) == 0) @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"]) def test_where(self, dtype): ar = np.arange(9).reshape(3, 3).astype(dtype) ar[0, :] = np.nan where = np.ones_like(ar, dtype=np.bool) where[:, 0] = False for f, f_std in zip(self.nanfuncs, self.stdfuncs): reference = f_std(ar[where][2:]) dtype_reference = dtype if f is np.nanmean else ar.real.dtype ret = f(ar, where=where) assert ret.dtype == dtype_reference np.testing.assert_allclose(ret, reference) def test_nanstd_with_mean_keyword(self): # Setting the seed to make the test reproducible rng = np.random.RandomState(1234) A = rng.randn(10, 20, 5) + 0.5 A[:, 5, :] = np.nan mean_out = np.zeros((10, 1, 5)) std_out = np.zeros((10, 1, 5)) mean = np.nanmean(A, out=mean_out, axis=1, keepdims=True) # The returned object should be the object specified during calling assert mean_out is mean std = np.nanstd(A, out=std_out, axis=1, keepdims=True, mean=mean) # The returned object should be the object specified during calling assert std_out is std # Shape of returned mean and std should be same assert std.shape == mean.shape assert std.shape == (10, 1, 5) # Output should be the same as from the individual algorithms std_old = np.nanstd(A, axis=1, keepdims=True) assert std_old.shape == mean.shape assert_almost_equal(std, std_old) _TIME_UNITS = ( "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as" ) # All `inexact` + `timdelta64` type codes _TYPE_CODES = list(np.typecodes["AllFloat"]) _TYPE_CODES += [f"m8[{unit}]" for unit in _TIME_UNITS]
TestNanFunctions_MeanVarStd
python
tensorflow__tensorflow
tensorflow/python/distribute/experimental/multi_worker_mirrored_strategy.py
{ "start": 1376, "end": 6509 }
class ____(distribute_lib.Strategy): """A distribution strategy for synchronous training on multiple workers. This strategy implements synchronous distributed training across multiple workers, each with potentially multiple GPUs. Similar to `tf.distribute.MirroredStrategy`, it replicates all variables and computations to each local device. The difference is that it uses a distributed collective implementation (e.g. all-reduce), so that multiple workers can work together. """ def __init__(self, cluster_resolver=None, communication_options=None, *, mesh=None): """Creates the strategy. Args: cluster_resolver: optional `tf.distribute.cluster_resolver.ClusterResolver`. In case neither `mesh` nor `cluster_resolver` are provided, `tf.distribute.cluster_resolver.TFConfigClusterResolver` is used. communication_options: currently ignore. mesh: optional Dtensor global mesh for the computation. Note that either `mesh` or the `cluster_resolver` should be provided. and not both. """ self._validate_init_args(mesh, cluster_resolver) if not mesh: if not cluster_resolver: # Use the TFConfigClusterResolver as default cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver() dtensor_env_var = _parse_dtensor_env_var_from_cluster_resolver( cluster_resolver) _config_dtensor_env_var(dtensor_env_var) mesh = _build_distributed_mesh(dtensor_util.DEFAULT_BATCH_MESH_DIM_NAME) extended = dtensor_strategy_extended.DTensorStrategyExtended( container_strategy=self, mesh=mesh) super().__init__(extended) self._mesh = mesh self._cluster_resolver = cluster_resolver @classmethod def _validate_init_args(cls, mesh, cluster_resolver): if mesh and cluster_resolver: raise ValueError('Mesh and cluster_resolver can not be provided at the ' f'same time. Received mesh = {mesh}, cluster_resolver = ' f'{cluster_resolver}') if mesh and len(mesh.shape()) != 1: raise ValueError('The mesh for MultiWorkerMirroredStrategy must be 1D, ' f'received: {len(mesh.shape())}D') def reduce(self, reduce_op, value, axis): return dtensor_util.dtensor_reduce(self, reduce_op, value, axis) @property def mesh(self): """Returns the mesh used by the strategy.""" return self._mesh def _parse_dtensor_env_var_from_cluster_resolver(cluster_resolver): """Parse the env vars for Dtensor based on the cluster resolver. In the multi-client setting, each of the DTensor jobs need to aware of each other, and the interface to setup those values are via the envvars. The value used by dtensor are different from the existing `MultiWorkerMirroredStrategy`. This function will parse the value from cluster resolver, and populate the corresponding value for DTensor jobs in the `os.environ`. Args: cluster_resolver: A `tf.distribute.cluster_resolver.ClusterResolver` instance. Returns: A dict of {Str:Str} which contains all the env vars needed by DTensor jobs. The value is for verification purpose. Raises: The value parsed from existing cluster spec is not valid. """ result = {} # Retrieve the number of host, cluster config from the resolver. cluster_spec = multi_worker_util.normalize_cluster_spec( cluster_resolver.cluster_spec()) # Export all the necessary envvars for dtensor # Get all the jobs from the cluster spec. Note that the in the normal # setting, it could be multiple worker devices without chief, and the # worker 0 will be the chief, or an explicit chief with multiple worker job. dtensor_jobs = [] if 'chief' in cluster_spec.jobs: dtensor_jobs.extend(cluster_spec.job_tasks('chief')) if 'worker' in cluster_spec.jobs: dtensor_jobs.extend(cluster_spec.job_tasks('worker')) if None in dtensor_jobs: raise ValueError('Unexpected dtensor job address from cluster spec: ' f'{cluster_spec}') result['DTENSOR_JOBS'] = ','.join(dtensor_jobs) result['DTENSOR_NUM_CLIENTS'] = str(len(dtensor_jobs)) if cluster_resolver.task_type == 'chief': dtensor_client_id = 0 elif cluster_resolver.task_type == 'worker': dtensor_client_id = cluster_resolver.task_id if 'chief' in cluster_spec.jobs: dtensor_client_id += 1 result['DTENSOR_CLIENT_ID'] = str(dtensor_client_id) result['DTENSOR_JOB_NAME'] = 'worker' return result def _config_dtensor_env_var(dtensor_env_vars): for k, v in dtensor_env_vars.items(): os.environ[k] = v def _build_distributed_mesh(batch_dim_name): device_type = d_config.preferred_device_type() local_devices = d_config.local_devices(device_type) number_clients = d_config.num_clients() dtensor_util.initialize_accelerator_system_once(device_type) # This assumes each client has same number of devices. mesh_dims = [(batch_dim_name, len(local_devices) * number_clients)] return mesh_util.create_distributed_mesh( mesh_dims, device_type=device_type)
MultiWorkerMirroredStrategy
python
python-poetry__poetry
src/poetry/publishing/hash_manager.py
{ "start": 208, "end": 309 }
class ____(NamedTuple): md5: str | None sha256: str | None blake2_256: str | None
Hexdigest
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/callable7.py
{ "start": 330, "end": 422 }
class ____: def method1(self, a: int): return a __call__ = method1 B()(0)
B
python
sphinx-doc__sphinx
sphinx/util/logging.py
{ "start": 3440, "end": 6267 }
class ____(logging.LoggerAdapter[logging.Logger]): """LoggerAdapter allowing ``type`` and ``subtype`` keywords.""" KEYWORDS = ['type', 'subtype', 'location', 'nonl', 'color', 'once'] def log( # type: ignore[override] self, level: int | str, msg: str, *args: Any, **kwargs: Any ) -> None: if isinstance(level, int): super().log(level, msg, *args, **kwargs) else: levelno = LEVEL_NAMES[level] super().log(levelno, msg, *args, **kwargs) def verbose(self, msg: str, *args: Any, **kwargs: Any) -> None: self.log(VERBOSE, msg, *args, **kwargs) def process(self, msg: str, kwargs: dict[str, Any]) -> tuple[str, dict[str, Any]]: # type: ignore[override] extra = kwargs.setdefault('extra', {}) for keyword in self.KEYWORDS: if keyword in kwargs: extra[keyword] = kwargs.pop(keyword) return msg, kwargs def handle(self, record: logging.LogRecord) -> None: self.logger.handle(record) def warning( # type: ignore[override] self, msg: object, *args: object, type: str | None = None, subtype: str | None = None, location: str | tuple[str | None, int | None] | Node | None = None, nonl: bool = True, color: str | None = None, once: bool = False, **kwargs: Any, ) -> None: """Log a sphinx warning. It is recommended to include a ``type`` and ``subtype`` for warnings as these can be displayed to the user using :confval:`show_warning_types` and used in :confval:`suppress_warnings` to suppress specific warnings. It is also recommended to specify a ``location`` whenever possible to help users in correcting the warning. :param msg: The message, which may contain placeholders for ``args``. :param args: The arguments to substitute into ``msg``. :param type: The type of the warning. :param subtype: The subtype of the warning. :param location: The source location of the warning's origin, which can be a string (the ``docname`` or ``docname:lineno``), a tuple of ``(docname, lineno)``, or the docutils node object. :param nonl: Whether to append a new line terminator to the message. :param color: A color code for the message. :param once: Do not log this warning, if a previous warning already has same ``msg``, ``args`` and ``once=True``. """ return super().warning( msg, *args, type=type, subtype=subtype, location=location, nonl=nonl, color=color, once=once, **kwargs, )
SphinxLoggerAdapter
python
huggingface__transformers
src/transformers/models/vit/modeling_vit.py
{ "start": 13946, "end": 14479 }
class ____(nn.Module): def __init__(self, config: ViTConfig): super().__init__() self.config = config self.layer = nn.ModuleList([ViTLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward(self, hidden_states: torch.Tensor) -> BaseModelOutput: for i, layer_module in enumerate(self.layer): hidden_states = layer_module(hidden_states) return BaseModelOutput(last_hidden_state=hidden_states) @auto_docstring
ViTEncoder
python
ansible__ansible
lib/ansible/executor/module_common.py
{ "start": 2686, "end": 6257 }
class ____: """Represents a module/module_utils item awaiting import analysis.""" name_parts: tuple[str, ...] is_ambiguous: bool = False child_is_redirected: bool = False is_optional: bool = False @classmethod def from_module(cls, module: types.ModuleType, append: str | None = None) -> t.Self: name = module.__name__ if append: name += '.' + append return cls.from_module_name(name) @classmethod def from_module_name(cls, module_name: str) -> t.Self: return cls(tuple(module_name.split('.'))) REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>" REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\"" REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\"" REPLACER_WINDOWS = b"# POWERSHELL_COMMON" REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>" REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>" # module_common is relative to module_utils, so fix the path _MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils') _SHEBANG_PLACEHOLDER = '# shebang placeholder' # ****************************************************************************** def _strip_comments(source: str) -> str: # Strip comments and blank lines from the wrapper buf = [] for line in source.splitlines(): l = line.strip() if (not l or l.startswith('#')) and l != _SHEBANG_PLACEHOLDER: line = '' buf.append(line) return '\n'.join(buf) def _read_ansiballz_code() -> str: code = (pathlib.Path(_ansiballz.__file__).parent / '_wrapper.py').read_text() if not C.DEFAULT_KEEP_REMOTE_FILES: # Keep comments when KEEP_REMOTE_FILES is set. That way users will see # the comments with some nice usage instructions. # Otherwise, strip comments for smaller over the wire size. code = _strip_comments(code) return code _ANSIBALLZ_CODE = _read_ansiballz_code() # read during startup to prevent individual workers from doing so def _get_ansiballz_code(shebang: str) -> str: code = _ANSIBALLZ_CODE code = code.replace(_SHEBANG_PLACEHOLDER, shebang) return code # dirname(dirname(dirname(site-packages/ansible/executor/module_common.py) == site-packages # Do this instead of getting site-packages from distutils.sysconfig so we work when we # haven't been installed site_packages = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) CORE_LIBRARY_PATH_RE = re.compile(r'%s/(?P<path>ansible/modules/.*)\.(py|ps1)$' % re.escape(site_packages)) COLLECTION_PATH_RE = re.compile(r'/(?P<path>ansible_collections/[^/]+/[^/]+/plugins/modules/.*)\.(py|ps1)$') # Detect new-style Python modules by looking for required imports: # import ansible_collections.[my_ns.my_col.plugins.module_utils.my_module_util] # from ansible_collections.[my_ns.my_col.plugins.module_utils import my_module_util] # import ansible.module_utils[.basic] # from ansible.module_utils[ import basic] # from ansible.module_utils[.basic import AnsibleModule] # from ..module_utils[ import basic] # from ..module_utils[.basic import AnsibleModule] NEW_STYLE_PYTHON_MODULE_RE = re.compile( # Relative imports br'(?:from +\.{2,} *module_utils.* +import |' # Collection absolute imports: br'from +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.* +import |' br'import +ansible_collections\.[^.]+\.[^.]+\.plugins\.module_utils.*|' # Core absolute imports br'from +ansible\.module_utils.* +import |' br'import +ansible\.module_utils\.)' )
_ModuleUtilsProcessEntry
python
numba__numba
numba/tests/test_datamodel.py
{ "start": 883, "end": 946 }
class ____(test_factory()): fe_type = types.float32
TestFloat
python
pytorch__pytorch
test/test_datapipe.py
{ "start": 4027, "end": 5573 }
class ____(TestCase): def setUp(self): self.elements = list(range(10)) random.shuffle(self.elements) self.chunk: DataChunk[int] = DataChunk(self.elements) def test_getitem(self): for i in range(10): self.assertEqual(self.elements[i], self.chunk[i]) def test_iter(self): for ele, dc in zip(self.elements, iter(self.chunk)): self.assertEqual(ele, dc) def test_len(self): self.assertEqual(len(self.elements), len(self.chunk)) def test_as_string(self): self.assertEqual(str(self.chunk), str(self.elements)) batch = [self.elements] * 3 chunks: list[DataChunk[int]] = [DataChunk(self.elements)] * 3 self.assertEqual(str(batch), str(chunks)) def test_sort(self): chunk: DataChunk[int] = DataChunk(self.elements) chunk.sort() self.assertTrue(isinstance(chunk, DataChunk)) for i, d in enumerate(chunk): self.assertEqual(i, d) def test_reverse(self): chunk: DataChunk[int] = DataChunk(self.elements) chunk.reverse() self.assertTrue(isinstance(chunk, DataChunk)) for i in range(10): self.assertEqual(chunk[i], self.elements[9 - i]) def test_random_shuffle(self): elements = list(range(10)) chunk: DataChunk[int] = DataChunk(elements) rng = random.Random(0) rng.shuffle(chunk) rng = random.Random(0) rng.shuffle(elements) self.assertEqual(chunk, elements)
TestDataChunk
python
pytorch__pytorch
test/dynamo/test_higher_order_ops.py
{ "start": 108676, "end": 114816 }
class ____(torch.nn.Module): def forward(self, L_x_: "f32[2, 4]", L_y_: "f32[4]"): l_x_ = L_x_ l_y_ = L_y_ x: "f32[2, 4]" = l_x_ + l_y_; l_x_ = None hints_wrapper_body_1 = self.hints_wrapper_body_1 hints_wrapper = torch.ops.higher_order.hints_wrapper(hints_wrapper_body_1, (x, l_y_), {}, hints = {'outer_body': True}); hints_wrapper_body_1 = x = l_y_ = None getitem: "f32[2, 4]" = hints_wrapper[0]; hints_wrapper = None return (getitem,) class hints_wrapper_body_1(torch.nn.Module): def forward(self, x: "f32[2, 4]", l_y_: "f32[4]"): hints_wrapper_body_0 = self.hints_wrapper_body_0 hints_wrapper = torch.ops.higher_order.hints_wrapper(hints_wrapper_body_0, (x, l_y_), {}, hints = {'inner_body': True}); hints_wrapper_body_0 = x = l_y_ = None getitem: "f32[2, 4]" = hints_wrapper[0]; hints_wrapper = None x_1: "f32[2, 4]" = torch.abs(getitem); getitem = None return (x_1,) class hints_wrapper_body_0(torch.nn.Module): def forward(self, x: "f32[2, 4]", l_y_: "f32[4]"): x_1: "f32[2, 4]" = torch.relu(x); x = None x_2: "f32[2, 4]" = x_1 + l_y_; x_1 = l_y_ = None return (x_2,) """, ) def test_hints_wrapper_no_hints(self): def fn_with_hints(x, y): def outer_body_fn(x, y): x = torch.add(x, y) return x res = hints_wrapper(outer_body_fn, (x, y), {}) return res backend = EagerAndRecordGraphs() cnt = CompileCounterWithBackend(backend) x = torch.randn(2, 4) y = torch.ones(4) msg = "hints_wrapper: improper args/kwargs" with self.assertRaisesRegex(RuntimeError, msg): torch.compile(fn_with_hints, backend=cnt)(x, y) def test_hints_wrapper_incorrect_type(self): def fn_with_hints(x, y): def outer_body_fn(x, y): x = torch.add(x, y) return x res = hints_wrapper(outer_body_fn, (x, y), {}, hints={"test": (True,)}) return res backend = EagerAndRecordGraphs() cnt = CompileCounterWithBackend(backend) x = torch.randn(2, 4) y = torch.ones(4) msg = r"hints must be a dict containing int, float, bool or str value," with self.assertRaisesRegex(RuntimeError, msg): torch.compile(fn_with_hints, backend=cnt)(x, y) def test_hints_wrapper_pytree_inputs(self): def fn_with_hints(x, y): def outer_body_fn(x): res = torch.add(x[0], x[1]["test"]) return res res = hints_wrapper( outer_body_fn, ((x, {"test": y}),), {}, hints={"test": True} ) return res x = torch.randn(2, 4) y = torch.ones(4) msg = r"args must be a tuple of tensors, ints, floats, or bools," with self.assertRaisesRegex(RuntimeError, msg): fn_with_hints(x, y) @requires_cuda_and_triton def test_wrap_inductor_compiled_regions_option(self): """ Test that wrap_inductor_compiled_regions option wraps compiled regions in inductor_compiled_code HOP, making them visible to DebugMode. """ from torch.utils._debug_mode import DebugMode # Test with wrapping enabled @torch.compile( backend="inductor", options={"wrap_inductor_compiled_regions": True}, fullgraph=True, ) def fn_wrapped(x, y): return torch.matmul(x, y) # Test with wrapping disabled (default) @torch.compile(backend="inductor", fullgraph=True) def fn_not_wrapped(x, y): return torch.matmul(x, y) x = torch.randn(4, 4, device="cuda") y = torch.randn(4, 4, device="cuda") # Test wrapped version - HOP should be visible in DebugMode with DebugMode() as debug_mode_wrapped: result_wrapped = fn_wrapped(x, y) debug_string_wrapped = debug_mode_wrapped.debug_string() self.assertIn("inductor_compiled_code", debug_string_wrapped) # Test non-wrapped version - HOP should NOT be visible with DebugMode() as debug_mode_not_wrapped: result_not_wrapped = fn_not_wrapped(x, y) debug_string_not_wrapped = debug_mode_not_wrapped.debug_string() self.assertNotIn("inductor_compiled_code", debug_string_not_wrapped) # Both should produce correct results expected = torch.matmul(x, y) self.assertEqual(result_wrapped, expected) self.assertEqual(result_not_wrapped, expected) @requires_cuda_and_triton def test_wrap_inductor_compiled_regions_with_backward(self): """ Test that wrap_inductor_compiled_regions works correctly with autograd. """ from torch.utils._debug_mode import DebugMode @torch.compile( backend="inductor", options={"wrap_inductor_compiled_regions": True}, fullgraph=True, ) def fn(x, y): return torch.matmul(x, y) x = torch.randn(4, 4, device="cuda", requires_grad=True) y = torch.randn(4, 4, device="cuda", requires_grad=True) # Clone for eager comparison x_eager = x.detach().clone().requires_grad_(True) y_eager = y.detach().clone().requires_grad_(True) # Compiled forward and backward with DebugMode() as debug_mode: result = fn(x, y) loss = result.sum() loss.backward() # HOP should be visible in forward pass self.assertIn("inductor_compiled_code", debug_mode.debug_string()) # Eager forward and backward for comparison expected = torch.matmul(x_eager, y_eager) expected_loss = expected.sum() expected_loss.backward() # Check correctness self.assertEqual(result, expected) self.assertEqual(x.grad, x_eager.grad) self.assertEqual(y.grad, y_eager.grad)
GraphModule
python
getsentry__sentry
src/sentry/workflow_engine/typings/notification_action.py
{ "start": 13334, "end": 14186 }
class ____(ABC): @staticmethod def separate_fields( data: dict[str, Any], excluded_keys: list[str] | None = None ) -> tuple[dict[str, Any], dict[str, Any]]: """ Separates data into standard and additional fields. Returns tuple of (dynamic_form_fields, additional_fields) """ excluded_keys = excluded_keys or [] dynamic_form_fields = data.get(TicketFieldMappingKeys.DYNAMIC_FORM_FIELDS_KEY.value, []) additional_fields = { k: v for k, v in data.items() if k not in dynamic_form_fields and k not in EXCLUDED_ACTION_DATA_KEYS and k not in excluded_keys and k != TicketFieldMappingKeys.DYNAMIC_FORM_FIELDS_KEY.value } return dynamic_form_fields, additional_fields
TicketingActionDataBlobHelper
python
pypa__pip
src/pip/_internal/exceptions.py
{ "start": 11590, "end": 11894 }
class ____(InstallationError): """Metadata is invalid.""" def __init__(self, ireq: InstallRequirement, error: str) -> None: self.ireq = ireq self.error = error def __str__(self) -> str: return f"Requested {self.ireq} has invalid metadata: {self.error}"
MetadataInvalid
python
ray-project__ray
rllib/algorithms/mock.py
{ "start": 221, "end": 2507 }
class ____(Algorithm): """Mock Algorithm for use in tests.""" @classmethod @override(Algorithm) def get_default_config(cls) -> AlgorithmConfig: return ( AlgorithmConfig() .framework("tf") .update_from_dict( { "mock_error": False, "persistent_error": False, "test_variable": 1, "user_checkpoint_freq": 0, "sleep": 0, } ) ) @classmethod def default_resource_request(cls, config: AlgorithmConfig): return None @override(Algorithm) def setup(self, config): self.callbacks = self.config.callbacks_class() # Add needed properties. self.info = None self.restored = False @override(Algorithm) def step(self): if ( self.config.mock_error and self.iteration == 1 and (self.config.persistent_error or not self.restored) ): raise Exception("mock error") if self.config.sleep: time.sleep(self.config.sleep) result = dict( episode_reward_mean=10, episode_len_mean=10, timesteps_this_iter=10, info={} ) if self.config.user_checkpoint_freq > 0 and self.iteration > 0: if self.iteration % self.config.user_checkpoint_freq == 0: result.update({tune_result.SHOULD_CHECKPOINT: True}) return result @override(Algorithm) def save_checkpoint(self, checkpoint_dir): path = os.path.join(checkpoint_dir, "mock_agent.pkl") with open(path, "wb") as f: pickle.dump(self.info, f) @override(Algorithm) def load_checkpoint(self, checkpoint_dir): path = os.path.join(checkpoint_dir, "mock_agent.pkl") with open(path, "rb") as f: info = pickle.load(f) self.info = info self.restored = True @staticmethod @override(Algorithm) def _get_env_id_and_creator(env_specifier, config): # No env to register. return None, None def set_info(self, info): self.info = info return info def get_info(self, sess=None): return self.info
_MockTrainer
python
pola-rs__polars
py-polars/src/polars/expr/whenthen.py
{ "start": 4305, "end": 6500 }
class ____(Expr): """ Utility class for the `when-then-otherwise` expression. Represents the state of the expression after an additional `then` is called. """ def __init__(self, chained_then: Any) -> None: self._chained_then = chained_then @classmethod def _from_pyexpr(cls, pyexpr: PyExpr) -> Expr: return wrap_expr(pyexpr) @property def _pyexpr(self) -> PyExpr: # type: ignore[override] return self._chained_then.otherwise(F.lit(None)._pyexpr) def when( self, *predicates: IntoExpr | Iterable[IntoExpr], **constraints: Any, ) -> ChainedWhen: """ Add another condition to the `when-then-otherwise` expression. Parameters ---------- predicates Condition(s) that must be met in order to apply the subsequent statement. Accepts one or more boolean expressions, which are implicitly combined with `&`. String input is parsed as a column name. constraints Apply conditions as `col_name = value` keyword arguments that are treated as equality matches, such as `x = 123`. As with the predicates parameter, multiple conditions are implicitly combined using `&`. Notes ----- The expression output name is taken from the first `then` statement. It is not affected by `predicates`, nor by `constraints`. """ condition_pyexpr = parse_predicates_constraints_into_expression( *predicates, **constraints ) return ChainedWhen(self._chained_then.when(condition_pyexpr)) def otherwise(self, statement: IntoExpr) -> Expr: """ Define a default for the `when-then-otherwise` expression. Parameters ---------- statement The statement to apply if all conditions are false. Accepts expression input. Strings are parsed as column names, other non-expression inputs are parsed as literals. """ statement_pyexpr = parse_into_expression(statement) return wrap_expr(self._chained_then.otherwise(statement_pyexpr))
ChainedThen
python
kamyu104__LeetCode-Solutions
Python/minimum-increments-for-target-multiples-in-an-array.py
{ "start": 105, "end": 1399 }
class ____(object): def minimumIncrements(self, nums, target): """ :type nums: List[int] :type target: List[int] :rtype: int """ INF = float("inf") def gcd(a, b): while b: a, b = b, a%b return a def lcm(a, b): return a//gcd(a, b)*b n = len(nums) m = len(target) lcms = [0]*(1<<m) for mask in xrange(1<<m): l = 1 for i in xrange(m): if mask&(1<<i): l = lcm(l, target[i]) lcms[mask] = l dp = [INF]*(1<<m) dp[0] = 0 for x in nums: for mask in reversed(xrange(1<<m)): if dp[mask] == INF: continue # submask enumeration: # => sum(nCr(n, k) * 2^k for k in xrange(n+1)) = (1 + 2)^n = 3^n # => Time: O(3^n), see https://cp-algorithms.com/algebra/all-submasks.html submask = new_mask = (((1<<m)-1)-mask) while submask: dp[mask|submask] = min(dp[mask|submask], dp[mask]+(lcms[submask]-x%lcms[submask] if x%lcms[submask] else 0)) submask = (submask-1)&new_mask return dp[-1]
Solution
python
chroma-core__chroma
chromadb/proto/convert.py
{ "start": 1036, "end": 26619 }
class ____(TypedDict): record: ProjectionRecord distance: Optional[float] # TODO: Unit tests for this file, handling optional states etc def to_proto_vector(vector: Vector, encoding: ScalarEncoding) -> chroma_pb.Vector: if encoding == ScalarEncoding.FLOAT32: as_bytes = np.array(vector, dtype=np.float32).tobytes() proto_encoding = chroma_pb.ScalarEncoding.FLOAT32 elif encoding == ScalarEncoding.INT32: as_bytes = np.array(vector, dtype=np.int32).tobytes() proto_encoding = chroma_pb.ScalarEncoding.INT32 else: raise ValueError( f"Unknown encoding {encoding}, expected one of {ScalarEncoding.FLOAT32} \ or {ScalarEncoding.INT32}" ) return chroma_pb.Vector( dimension=vector.size, vector=as_bytes, encoding=proto_encoding ) def from_proto_vector(vector: chroma_pb.Vector) -> Tuple[Embedding, ScalarEncoding]: encoding = vector.encoding as_array: Union[NDArray[np.int32], NDArray[np.float32]] if encoding == chroma_pb.ScalarEncoding.FLOAT32: as_array = np.frombuffer(vector.vector, dtype=np.float32) out_encoding = ScalarEncoding.FLOAT32 elif encoding == chroma_pb.ScalarEncoding.INT32: as_array = np.frombuffer(vector.vector, dtype=np.int32) out_encoding = ScalarEncoding.INT32 else: raise ValueError( f"Unknown encoding {encoding}, expected one of \ {chroma_pb.ScalarEncoding.FLOAT32} or {chroma_pb.ScalarEncoding.INT32}" ) return (as_array, out_encoding) def from_proto_operation(operation: chroma_pb.Operation) -> Operation: if operation == chroma_pb.Operation.ADD: return Operation.ADD elif operation == chroma_pb.Operation.UPDATE: return Operation.UPDATE elif operation == chroma_pb.Operation.UPSERT: return Operation.UPSERT elif operation == chroma_pb.Operation.DELETE: return Operation.DELETE else: # TODO: full error raise RuntimeError(f"Unknown operation {operation}") def from_proto_metadata(metadata: chroma_pb.UpdateMetadata) -> Optional[Metadata]: return cast(Optional[Metadata], _from_proto_metadata_handle_none(metadata, False)) def from_proto_update_metadata( metadata: chroma_pb.UpdateMetadata, ) -> Optional[UpdateMetadata]: return cast( Optional[UpdateMetadata], _from_proto_metadata_handle_none(metadata, True) ) def _from_proto_metadata_handle_none( metadata: chroma_pb.UpdateMetadata, is_update: bool ) -> Optional[Union[UpdateMetadata, Metadata]]: if not metadata.metadata: return None out_metadata: Dict[str, Union[str, int, float, bool, None]] = {} for key, value in metadata.metadata.items(): if value.HasField("bool_value"): out_metadata[key] = value.bool_value elif value.HasField("string_value"): out_metadata[key] = value.string_value elif value.HasField("int_value"): out_metadata[key] = value.int_value elif value.HasField("float_value"): out_metadata[key] = value.float_value elif is_update: out_metadata[key] = None else: raise ValueError(f"Metadata key {key} value cannot be None") return out_metadata def to_proto_update_metadata(metadata: UpdateMetadata) -> chroma_pb.UpdateMetadata: return chroma_pb.UpdateMetadata( metadata={k: to_proto_metadata_update_value(v) for k, v in metadata.items()} ) def from_proto_submit( operation_record: chroma_pb.OperationRecord, seq_id: SeqId ) -> LogRecord: embedding, encoding = from_proto_vector(operation_record.vector) record = LogRecord( log_offset=seq_id, record=OperationRecord( id=operation_record.id, embedding=embedding, encoding=encoding, metadata=from_proto_update_metadata(operation_record.metadata), operation=from_proto_operation(operation_record.operation), ), ) return record def from_proto_segment(segment: chroma_pb.Segment) -> Segment: return Segment( id=UUID(hex=segment.id), type=segment.type, scope=from_proto_segment_scope(segment.scope), collection=UUID(hex=segment.collection), metadata=from_proto_metadata(segment.metadata) if segment.HasField("metadata") else None, file_paths={ name: [path for path in paths.paths] for name, paths in segment.file_paths.items() }, ) def to_proto_segment(segment: Segment) -> chroma_pb.Segment: return chroma_pb.Segment( id=segment["id"].hex, type=segment["type"], scope=to_proto_segment_scope(segment["scope"]), collection=segment["collection"].hex, metadata=None if segment["metadata"] is None else to_proto_update_metadata(segment["metadata"]), file_paths={ name: chroma_pb.FilePaths(paths=paths) for name, paths in segment["file_paths"].items() }, ) def from_proto_segment_scope(segment_scope: chroma_pb.SegmentScope) -> SegmentScope: if segment_scope == chroma_pb.SegmentScope.VECTOR: return SegmentScope.VECTOR elif segment_scope == chroma_pb.SegmentScope.METADATA: return SegmentScope.METADATA elif segment_scope == chroma_pb.SegmentScope.RECORD: return SegmentScope.RECORD else: raise RuntimeError(f"Unknown segment scope {segment_scope}") def to_proto_segment_scope(segment_scope: SegmentScope) -> chroma_pb.SegmentScope: if segment_scope == SegmentScope.VECTOR: return chroma_pb.SegmentScope.VECTOR elif segment_scope == SegmentScope.METADATA: return chroma_pb.SegmentScope.METADATA elif segment_scope == SegmentScope.RECORD: return chroma_pb.SegmentScope.RECORD else: raise RuntimeError(f"Unknown segment scope {segment_scope}") def to_proto_metadata_update_value( value: Union[str, int, float, bool, None] ) -> chroma_pb.UpdateMetadataValue: # Be careful with the order here. Since bools are a subtype of int in python, # isinstance(value, bool) and isinstance(value, int) both return true # for a value of bool type. if isinstance(value, bool): return chroma_pb.UpdateMetadataValue(bool_value=value) elif isinstance(value, str): return chroma_pb.UpdateMetadataValue(string_value=value) elif isinstance(value, int): return chroma_pb.UpdateMetadataValue(int_value=value) elif isinstance(value, float): return chroma_pb.UpdateMetadataValue(float_value=value) # None is used to delete the metadata key. elif value is None: return chroma_pb.UpdateMetadataValue() else: raise ValueError( f"Unknown metadata value type {type(value)}, expected one of str, int, \ float, or None" ) def from_proto_collection(collection: chroma_pb.Collection) -> Collection: return Collection( id=UUID(hex=collection.id), name=collection.name, configuration_json=json.loads(collection.configuration_json_str), metadata=from_proto_metadata(collection.metadata) if collection.HasField("metadata") else None, dimension=collection.dimension if collection.HasField("dimension") and collection.dimension else None, database=collection.database, tenant=collection.tenant, version=collection.version, log_position=collection.log_position, ) def to_proto_collection(collection: Collection) -> chroma_pb.Collection: return chroma_pb.Collection( id=collection["id"].hex, name=collection["name"], configuration_json_str=collection_configuration_to_json_str( collection.get_configuration() ), metadata=None if collection["metadata"] is None else to_proto_update_metadata(collection["metadata"]), dimension=collection["dimension"], tenant=collection["tenant"], database=collection["database"], log_position=collection["log_position"], version=collection["version"], ) def to_proto_operation(operation: Operation) -> chroma_pb.Operation: if operation == Operation.ADD: return chroma_pb.Operation.ADD elif operation == Operation.UPDATE: return chroma_pb.Operation.UPDATE elif operation == Operation.UPSERT: return chroma_pb.Operation.UPSERT elif operation == Operation.DELETE: return chroma_pb.Operation.DELETE else: raise ValueError( f"Unknown operation {operation}, expected one of {Operation.ADD}, \ {Operation.UPDATE}, {Operation.UPDATE}, or {Operation.DELETE}" ) def to_proto_submit( submit_record: OperationRecord, ) -> chroma_pb.OperationRecord: vector = None if submit_record["embedding"] is not None and submit_record["encoding"] is not None: vector = to_proto_vector(submit_record["embedding"], submit_record["encoding"]) metadata = None if submit_record["metadata"] is not None: metadata = to_proto_update_metadata(submit_record["metadata"]) return chroma_pb.OperationRecord( id=submit_record["id"], vector=vector, metadata=metadata, operation=to_proto_operation(submit_record["operation"]), ) def from_proto_vector_embedding_record( embedding_record: chroma_pb.VectorEmbeddingRecord, ) -> VectorEmbeddingRecord: return VectorEmbeddingRecord( id=embedding_record.id, embedding=from_proto_vector(embedding_record.vector)[0], ) def to_proto_vector_embedding_record( embedding_record: VectorEmbeddingRecord, encoding: ScalarEncoding, ) -> chroma_pb.VectorEmbeddingRecord: return chroma_pb.VectorEmbeddingRecord( id=embedding_record["id"], vector=to_proto_vector(embedding_record["embedding"], encoding), ) def from_proto_vector_query_result( vector_query_result: chroma_pb.VectorQueryResult, ) -> VectorQueryResult: return VectorQueryResult( id=vector_query_result.id, distance=vector_query_result.distance, embedding=from_proto_vector(vector_query_result.vector)[0], ) def from_proto_request_version_context( request_version_context: chroma_pb.RequestVersionContext, ) -> RequestVersionContext: return RequestVersionContext( collection_version=request_version_context.collection_version, log_position=request_version_context.log_position, ) def to_proto_request_version_context( request_version_context: RequestVersionContext, ) -> chroma_pb.RequestVersionContext: return chroma_pb.RequestVersionContext( collection_version=request_version_context["collection_version"], log_position=request_version_context["log_position"], ) def to_proto_where(where: Where) -> chroma_pb.Where: response = chroma_pb.Where() if len(where) != 1: raise ValueError(f"Expected where to have exactly one operator, got {where}") for key, value in where.items(): if not isinstance(key, str): raise ValueError(f"Expected where key to be a str, got {key}") if key == "$and" or key == "$or": if not isinstance(value, list): raise ValueError( f"Expected where value for $and or $or to be a list of where expressions, got {value}" ) children: chroma_pb.WhereChildren = chroma_pb.WhereChildren( children=[to_proto_where(w) for w in value] ) if key == "$and": children.operator = chroma_pb.BooleanOperator.AND else: children.operator = chroma_pb.BooleanOperator.OR response.children.CopyFrom(children) return response # At this point we know we're at a direct comparison. It can either # be of the form {"key": "value"} or {"key": {"$operator": "value"}}. dc = chroma_pb.DirectComparison() dc.key = key if not isinstance(value, dict): # {'key': 'value'} case if type(value) is str: ssc = chroma_pb.SingleStringComparison() ssc.value = value ssc.comparator = chroma_pb.GenericComparator.EQ dc.single_string_operand.CopyFrom(ssc) elif type(value) is bool: sbc = chroma_pb.SingleBoolComparison() sbc.value = value sbc.comparator = chroma_pb.GenericComparator.EQ dc.single_bool_operand.CopyFrom(sbc) elif type(value) is int: sic = chroma_pb.SingleIntComparison() sic.value = value sic.generic_comparator = chroma_pb.GenericComparator.EQ dc.single_int_operand.CopyFrom(sic) elif type(value) is float: sdc = chroma_pb.SingleDoubleComparison() sdc.value = value sdc.generic_comparator = chroma_pb.GenericComparator.EQ dc.single_double_operand.CopyFrom(sdc) else: raise ValueError( f"Expected where value to be a string, int, or float, got {value}" ) else: for operator, operand in value.items(): if operator in ["$in", "$nin"]: if not isinstance(operand, list): raise ValueError( f"Expected where value for $in or $nin to be a list of values, got {value}" ) if len(operand) == 0 or not all( isinstance(x, type(operand[0])) for x in operand ): raise ValueError( f"Expected where operand value to be a non-empty list, and all values to be of the same type " f"got {operand}" ) list_operator = None if operator == "$in": list_operator = chroma_pb.ListOperator.IN else: list_operator = chroma_pb.ListOperator.NIN if type(operand[0]) is str: slo = chroma_pb.StringListComparison() for x in operand: slo.values.extend([x]) # type: ignore slo.list_operator = list_operator dc.string_list_operand.CopyFrom(slo) elif type(operand[0]) is bool: blo = chroma_pb.BoolListComparison() for x in operand: blo.values.extend([x]) # type: ignore blo.list_operator = list_operator dc.bool_list_operand.CopyFrom(blo) elif type(operand[0]) is int: ilo = chroma_pb.IntListComparison() for x in operand: ilo.values.extend([x]) # type: ignore ilo.list_operator = list_operator dc.int_list_operand.CopyFrom(ilo) elif type(operand[0]) is float: dlo = chroma_pb.DoubleListComparison() for x in operand: dlo.values.extend([x]) # type: ignore dlo.list_operator = list_operator dc.double_list_operand.CopyFrom(dlo) else: raise ValueError( f"Expected where operand value to be a list of strings, ints, or floats, got {operand}" ) elif operator in ["$eq", "$ne", "$gt", "$lt", "$gte", "$lte"]: # Direct comparison to a single value. if type(operand) is str: ssc = chroma_pb.SingleStringComparison() ssc.value = operand if operator == "$eq": ssc.comparator = chroma_pb.GenericComparator.EQ elif operator == "$ne": ssc.comparator = chroma_pb.GenericComparator.NE else: raise ValueError( f"Expected where operator to be $eq or $ne, got {operator}" ) dc.single_string_operand.CopyFrom(ssc) elif type(operand) is bool: sbc = chroma_pb.SingleBoolComparison() sbc.value = operand if operator == "$eq": sbc.comparator = chroma_pb.GenericComparator.EQ elif operator == "$ne": sbc.comparator = chroma_pb.GenericComparator.NE else: raise ValueError( f"Expected where operator to be $eq or $ne, got {operator}" ) dc.single_bool_operand.CopyFrom(sbc) elif type(operand) is int: sic = chroma_pb.SingleIntComparison() sic.value = operand if operator == "$eq": sic.generic_comparator = chroma_pb.GenericComparator.EQ elif operator == "$ne": sic.generic_comparator = chroma_pb.GenericComparator.NE elif operator == "$gt": sic.number_comparator = chroma_pb.NumberComparator.GT elif operator == "$lt": sic.number_comparator = chroma_pb.NumberComparator.LT elif operator == "$gte": sic.number_comparator = chroma_pb.NumberComparator.GTE elif operator == "$lte": sic.number_comparator = chroma_pb.NumberComparator.LTE else: raise ValueError( f"Expected where operator to be one of $eq, $ne, $gt, $lt, $gte, $lte, got {operator}" ) dc.single_int_operand.CopyFrom(sic) elif type(operand) is float: sfc = chroma_pb.SingleDoubleComparison() sfc.value = operand if operator == "$eq": sfc.generic_comparator = chroma_pb.GenericComparator.EQ elif operator == "$ne": sfc.generic_comparator = chroma_pb.GenericComparator.NE elif operator == "$gt": sfc.number_comparator = chroma_pb.NumberComparator.GT elif operator == "$lt": sfc.number_comparator = chroma_pb.NumberComparator.LT elif operator == "$gte": sfc.number_comparator = chroma_pb.NumberComparator.GTE elif operator == "$lte": sfc.number_comparator = chroma_pb.NumberComparator.LTE else: raise ValueError( f"Expected where operator to be one of $eq, $ne, $gt, $lt, $gte, $lte, got {operator}" ) dc.single_double_operand.CopyFrom(sfc) else: raise ValueError( f"Expected where operand value to be a string, int, or float, got {operand}" ) else: # This case should never happen, as we've already # handled the case for direct comparisons. pass response.direct_comparison.CopyFrom(dc) return response def to_proto_where_document(where_document: WhereDocument) -> chroma_pb.WhereDocument: response = chroma_pb.WhereDocument() if len(where_document) != 1: raise ValueError( f"Expected where_document to have exactly one operator, got {where_document}" ) for operator, operand in where_document.items(): if operator == "$and" or operator == "$or": # Nested "$and" or "$or" expression. if not isinstance(operand, list): raise ValueError( f"Expected where_document value for $and or $or to be a list of where_document expressions, got {operand}" ) children: chroma_pb.WhereDocumentChildren = chroma_pb.WhereDocumentChildren( children=[to_proto_where_document(w) for w in operand] ) if operator == "$and": children.operator = chroma_pb.BooleanOperator.AND else: children.operator = chroma_pb.BooleanOperator.OR response.children.CopyFrom(children) else: # Direct "$contains" or "$not_contains" comparison to a single # value. if not isinstance(operand, str): raise ValueError( f"Expected where_document operand to be a string, got {operand}" ) dwd = chroma_pb.DirectWhereDocument() dwd.document = operand if operator == "$contains": dwd.operator = chroma_pb.WhereDocumentOperator.CONTAINS elif operator == "$not_contains": dwd.operator = chroma_pb.WhereDocumentOperator.NOT_CONTAINS else: raise ValueError( f"Expected where_document operator to be one of $contains, $not_contains, got {operator}" ) response.direct.CopyFrom(dwd) return response def to_proto_scan(scan: Scan) -> query_pb.ScanOperator: return query_pb.ScanOperator( collection=to_proto_collection(scan.collection), knn=to_proto_segment(scan.knn), metadata=to_proto_segment(scan.metadata), record=to_proto_segment(scan.record), ) def to_proto_filter(filter: Filter) -> query_pb.FilterOperator: return query_pb.FilterOperator( ids=chroma_pb.UserIds(ids=filter.user_ids) if filter.user_ids is not None else None, where=to_proto_where(filter.where) if filter.where else None, where_document=to_proto_where_document(filter.where_document) if filter.where_document else None, ) def to_proto_knn(knn: KNN) -> query_pb.KNNOperator: return query_pb.KNNOperator( embeddings=[ to_proto_vector(vector=embedding, encoding=ScalarEncoding.FLOAT32) for embedding in knn.embeddings ], fetch=knn.fetch, ) def to_proto_limit(limit: Limit) -> query_pb.LimitOperator: return query_pb.LimitOperator(offset=limit.offset, limit=limit.limit) def to_proto_projection(projection: Projection) -> query_pb.ProjectionOperator: return query_pb.ProjectionOperator( document=projection.document, embedding=projection.embedding, metadata=projection.metadata, ) def to_proto_knn_projection(projection: Projection) -> query_pb.KNNProjectionOperator: return query_pb.KNNProjectionOperator( projection=to_proto_projection(projection), distance=projection.rank ) def to_proto_count_plan(count: CountPlan) -> query_pb.CountPlan: return query_pb.CountPlan(scan=to_proto_scan(count.scan)) def from_proto_count_result(result: query_pb.CountResult) -> int: return result.count def to_proto_get_plan(get: GetPlan) -> query_pb.GetPlan: return query_pb.GetPlan( scan=to_proto_scan(get.scan), filter=to_proto_filter(get.filter), limit=to_proto_limit(get.limit), projection=to_proto_projection(get.projection), ) def from_proto_projection_record(record: query_pb.ProjectionRecord) -> ProjectionRecord: return ProjectionRecord( id=record.id, document=record.document if record.document else None, embedding=from_proto_vector(record.embedding)[0] if record.embedding is not None else None, metadata=from_proto_metadata(record.metadata), ) def from_proto_get_result(result: query_pb.GetResult) -> Sequence[ProjectionRecord]: return [from_proto_projection_record(record) for record in result.records] def to_proto_knn_plan(knn: KNNPlan) -> query_pb.KNNPlan: return query_pb.KNNPlan( scan=to_proto_scan(knn.scan), filter=to_proto_filter(knn.filter), knn=to_proto_knn(knn.knn), projection=to_proto_knn_projection(knn.projection), ) def from_proto_knn_projection_record( record: query_pb.KNNProjectionRecord, ) -> KNNProjectionRecord: return KNNProjectionRecord( record=from_proto_projection_record(record.record), distance=record.distance ) def from_proto_knn_batch_result( results: query_pb.KNNBatchResult, ) -> Sequence[Sequence[KNNProjectionRecord]]: return [ [from_proto_knn_projection_record(record) for record in result.records] for result in results.results ]
KNNProjectionRecord
python
google__jax
jax/_src/lib/triton.py
{ "start": 839, "end": 2202 }
class ____(Protocol): def __call__( self, module: bytes, arch_name: str, num_warps: int, num_ctas: int, num_stages: int, ) -> CompilationResult: ... _compilation_handlers: dict[str, CompilationHandler] = {} _compilation_handlers_lock = threading.Lock() def register_compilation_handler( platform: str, handler: CompilationHandler ) -> None: platform = platform.upper() with _compilation_handlers_lock: if existing_handler := _compilation_handlers.get(platform): raise RuntimeError( f'Platform {platform} already has a Triton compilation handler:' f' {existing_handler}' ) _compilation_handlers[platform] = handler def has_compilation_handler(platform: str) -> bool: platform = platform.upper() with _compilation_handlers_lock: return platform in _compilation_handlers def compile( platform: str, module: bytes, arch_name: str, *, num_warps: int, num_ctas: int, num_stages: int, ) -> CompilationResult: platform = platform.upper() with _compilation_handlers_lock: handler = _compilation_handlers.get(platform) if handler is None: raise RuntimeError( f'Platform {platform} does not have a Triton compilation handler' ) return handler(module, arch_name, num_warps, num_ctas, num_stages)
CompilationHandler
python
fastai__fastai
fastai/data/transforms.py
{ "start": 3963, "end": 8668 }
class ____(ItemTransform): "Creates a proper transform that applies `attrgetter(nm)` (even on a tuple)" _retain = False def __init__(self, nm, default=None): store_attr() def encodes(self, x): return getattr(x, self.nm, self.default) # %% ../../nbs/05_data.transforms.ipynb 32 def RandomSplitter(valid_pct=0.2, seed=None): "Create function that splits `items` between train/val with `valid_pct` randomly." def _inner(o): if seed is not None: torch.manual_seed(seed) rand_idx = L(list(torch.randperm(len(o)).numpy())) cut = int(valid_pct * len(o)) return rand_idx[cut:],rand_idx[:cut] return _inner # %% ../../nbs/05_data.transforms.ipynb 36 def TrainTestSplitter(test_size=0.2, random_state=None, stratify=None, train_size=None, shuffle=True): "Split `items` into random train and test subsets using sklearn train_test_split utility." def _inner(o, **kwargs): train,valid = train_test_split(range_of(o), test_size=test_size, random_state=random_state, stratify=stratify, train_size=train_size, shuffle=shuffle) return L(train), L(valid) return _inner # %% ../../nbs/05_data.transforms.ipynb 38 def IndexSplitter(valid_idx): "Split `items` so that `val_idx` are in the validation set and the others in the training set" def _inner(o): train_idx = np.setdiff1d(np.array(range_of(o)), np.array(valid_idx)) return L(train_idx, use_list=True), L(valid_idx, use_list=True) return _inner # %% ../../nbs/05_data.transforms.ipynb 40 def EndSplitter(valid_pct=0.2, valid_last=True): "Create function that splits `items` between train/val with `valid_pct` at the end if `valid_last` else at the start. Useful for ordered data." assert 0<valid_pct<1, "valid_pct must be in (0,1)" def _inner(o): idxs = range_of(o) cut = int(valid_pct * len(o)) return (idxs[:-cut], idxs[-cut:]) if valid_last else (idxs[cut:],idxs[:cut]) return _inner # %% ../../nbs/05_data.transforms.ipynb 42 def _grandparent_idxs(items, name): def _inner(items, name): return mask2idxs(Path(o).parent.parent.name == name for o in items) return [i for n in L(name) for i in _inner(items,n)] # %% ../../nbs/05_data.transforms.ipynb 43 def GrandparentSplitter(train_name='train', valid_name='valid'): "Split `items` from the grand parent folder names (`train_name` and `valid_name`)." def _inner(o): return _grandparent_idxs(o, train_name),_grandparent_idxs(o, valid_name) return _inner # %% ../../nbs/05_data.transforms.ipynb 47 def FuncSplitter(func): "Split `items` by result of `func` (`True` for validation, `False` for training set)." def _inner(o): val_idx = mask2idxs(func(o_) for o_ in o) return IndexSplitter(val_idx)(o) return _inner # %% ../../nbs/05_data.transforms.ipynb 49 def MaskSplitter(mask): "Split `items` depending on the value of `mask`." def _inner(o): return IndexSplitter(mask2idxs(mask))(o) return _inner # %% ../../nbs/05_data.transforms.ipynb 51 def FileSplitter(fname): "Split `items` by providing file `fname` (contains names of valid items separated by newline)." valid = Path(fname).read_text().split('\n') def _func(x): return x.name in valid def _inner(o): return FuncSplitter(_func)(o) return _inner # %% ../../nbs/05_data.transforms.ipynb 53 def ColSplitter(col='is_valid', on=None): "Split `items` (supposed to be a dataframe) by value in `col`" def _inner(o): assert isinstance(o, pd.DataFrame), "ColSplitter only works when your items are a pandas DataFrame" c = o.iloc[:,col] if isinstance(col, int) else o[col] if on is None: valid_idx = c.values.astype('bool') elif is_listy(on): valid_idx = c.isin(on) else: valid_idx = c == on return IndexSplitter(mask2idxs(valid_idx))(o) return _inner # %% ../../nbs/05_data.transforms.ipynb 55 def RandomSubsetSplitter(train_sz, valid_sz, seed=None): "Take randoms subsets of `splits` with `train_sz` and `valid_sz`" assert 0 < train_sz < 1 assert 0 < valid_sz < 1 assert train_sz + valid_sz <= 1. def _inner(o): if seed is not None: torch.manual_seed(seed) train_len,valid_len = int(len(o)*train_sz),int(len(o)*valid_sz) idxs = L(list(torch.randperm(len(o)).numpy())) return idxs[:train_len],idxs[train_len:train_len+valid_len] return _inner # %% ../../nbs/05_data.transforms.ipynb 59 def parent_label(o): "Label `item` with the parent folder name." return Path(o).parent.name # %% ../../nbs/05_data.transforms.ipynb 63
AttrGetter
python
pydata__xarray
xarray/tests/test_variable.py
{ "start": 2257, "end": 38942 }
class ____(NamedArraySubclassobjects, ABC): @pytest.fixture def target(self, data): data = 0.5 * np.arange(10).reshape(2, 5) return Variable(["x", "y"], data) def test_getitem_dict(self): v = self.cls(["x"], np.random.randn(5)) actual = v[{"x": 0}] expected = v[0] assert_identical(expected, actual) def test_getitem_1d(self): data = np.array([0, 1, 2]) v = self.cls(["x"], data) v_new = v[dict(x=[0, 1])] assert v_new.dims == ("x",) assert_array_equal(v_new, data[[0, 1]]) v_new = v[dict(x=slice(None))] assert v_new.dims == ("x",) assert_array_equal(v_new, data) v_new = v[dict(x=Variable("a", [0, 1]))] assert v_new.dims == ("a",) assert_array_equal(v_new, data[[0, 1]]) v_new = v[dict(x=1)] assert v_new.dims == () assert_array_equal(v_new, data[1]) # tuple argument v_new = v[slice(None)] assert v_new.dims == ("x",) assert_array_equal(v_new, data) def test_getitem_1d_fancy(self): v = self.cls(["x"], [0, 1, 2]) # 1d-variable should be indexable by multi-dimensional Variable ind = Variable(("a", "b"), [[0, 1], [0, 1]]) v_new = v[ind] assert v_new.dims == ("a", "b") expected = np.array(v._data)[([0, 1], [0, 1]), ...] assert_array_equal(v_new, expected) # boolean indexing ind = Variable(("x",), [True, False, True]) v_new = v[ind] assert_identical(v[[0, 2]], v_new) v_new = v[[True, False, True]] assert_identical(v[[0, 2]], v_new) with pytest.raises(IndexError, match=r"Boolean indexer should"): ind = Variable(("a",), [True, False, True]) v[ind] def test_getitem_with_mask(self): v = self.cls(["x"], [0, 1, 2]) assert_identical(v._getitem_with_mask(-1), Variable((), np.nan)) assert_identical( v._getitem_with_mask([0, -1, 1]), self.cls(["x"], [0, np.nan, 1]) ) assert_identical(v._getitem_with_mask(slice(2)), self.cls(["x"], [0, 1])) assert_identical( v._getitem_with_mask([0, -1, 1], fill_value=-99), self.cls(["x"], [0, -99, 1]), ) def test_getitem_with_mask_size_zero(self): v = self.cls(["x"], []) assert_identical(v._getitem_with_mask(-1), Variable((), np.nan)) assert_identical( v._getitem_with_mask([-1, -1, -1]), self.cls(["x"], [np.nan, np.nan, np.nan]), ) def test_getitem_with_mask_nd_indexer(self): v = self.cls(["x"], [0, 1, 2]) indexer = Variable(("x", "y"), [[0, -1], [-1, 2]]) assert_identical(v._getitem_with_mask(indexer, fill_value=-1), indexer) def _assertIndexedLikeNDArray(self, variable, expected_value0, expected_dtype=None): """Given a 1-dimensional variable, verify that the variable is indexed like a numpy.ndarray. """ assert variable[0].shape == () assert variable[0].ndim == 0 assert variable[0].size == 1 # test identity assert variable.equals(variable.copy()) assert variable.identical(variable.copy()) # check value is equal for both ndarray and Variable with warnings.catch_warnings(): warnings.filterwarnings("ignore", "In the future, 'NAT == x'") np.testing.assert_equal(variable.values[0], expected_value0) np.testing.assert_equal(variable[0].values, expected_value0) # check type or dtype is consistent for both ndarray and Variable if expected_dtype is None: # check output type instead of array dtype assert type(variable.values[0]) is type(expected_value0) assert type(variable[0].values) is type(expected_value0) elif expected_dtype is not False: assert variable.values[0].dtype == expected_dtype assert variable[0].values.dtype == expected_dtype def test_index_0d_int(self): for value, dtype in [(0, np.int_), (np.int32(0), np.int32)]: x = self.cls(["x"], [value]) self._assertIndexedLikeNDArray(x, value, dtype) def test_index_0d_float(self): for value, dtype in [(0.5, float), (np.float32(0.5), np.float32)]: x = self.cls(["x"], [value]) self._assertIndexedLikeNDArray(x, value, dtype) def test_index_0d_string(self): value = "foo" dtype = np.dtype("U3") x = self.cls(["x"], [value]) self._assertIndexedLikeNDArray(x, value, dtype) def test_index_0d_datetime(self): d = datetime(2000, 1, 1) x = self.cls(["x"], [d]) self._assertIndexedLikeNDArray(x, np.datetime64(d)) x = self.cls(["x"], [np.datetime64(d)]) self._assertIndexedLikeNDArray(x, np.datetime64(d), "datetime64[us]") expected_unit = "us" if has_pandas_3 else "ns" x = self.cls(["x"], pd.DatetimeIndex([d])) self._assertIndexedLikeNDArray( x, np.datetime64(d), f"datetime64[{expected_unit}]" ) def test_index_0d_timedelta64(self): td = timedelta(hours=1) # todo: discussion needed x = self.cls(["x"], [np.timedelta64(td)]) self._assertIndexedLikeNDArray( x, np.timedelta64(td), np.dtype("timedelta64[us]") ) x = self.cls(["x"], pd.to_timedelta([td])) self._assertIndexedLikeNDArray(x, np.timedelta64(td), "timedelta64[ns]") def test_index_0d_not_a_time(self): d = np.datetime64("NaT", "ns") x = self.cls(["x"], [d]) self._assertIndexedLikeNDArray(x, d) def test_index_0d_object(self): class HashableItemWrapper: def __init__(self, item): self.item = item def __eq__(self, other): return self.item == other.item def __hash__(self): return hash(self.item) def __repr__(self): return f"{type(self).__name__}(item={self.item!r})" item = HashableItemWrapper((1, 2, 3)) x = self.cls("x", [item]) self._assertIndexedLikeNDArray(x, item, expected_dtype=False) def test_0d_object_array_with_list(self): listarray = np.empty((1,), dtype=object) listarray[0] = [1, 2, 3] x = self.cls("x", listarray) assert_array_equal(x.data, listarray) assert_array_equal(x[0].data, listarray.squeeze()) assert_array_equal(x.squeeze().data, listarray.squeeze()) def test_index_and_concat_datetime(self): # regression test for #125 date_range = pd.date_range("2011-09-01", periods=10) for dates in [date_range, date_range.values, date_range.to_pydatetime()]: expected = self.cls("t", dates) for times in [ [expected[i] for i in range(10)], [expected[i : (i + 1)] for i in range(10)], [expected[[i]] for i in range(10)], ]: actual = Variable.concat(times, "t") assert expected.dtype == actual.dtype assert_array_equal(expected, actual) def test_0d_time_data(self): # regression test for #105 x = self.cls("time", pd.date_range("2000-01-01", periods=5)) expected = np.datetime64("2000-01-01", "ns") assert x[0].values == expected dt64_data = pd.date_range("1970-01-01", periods=3) @pytest.mark.parametrize( "values, unit", [ (dt64_data, "ns"), (dt64_data.values, "ns"), (dt64_data.values.astype("datetime64[m]"), "s"), (dt64_data.values.astype("datetime64[s]"), "s"), (dt64_data.values.astype("datetime64[ps]"), "ns"), ( dt64_data.to_pydatetime(), "us" if has_pandas_3 else "ns", ), ], ) def test_datetime64_conversion(self, values, unit): v = self.cls(["t"], values) assert v.dtype == np.dtype(f"datetime64[{unit}]") assert_array_equal(v.values, self.dt64_data.values) assert v.values.dtype == np.dtype(f"datetime64[{unit}]") td64_data = pd.timedelta_range(start=0, periods=3) @pytest.mark.parametrize( "values, unit", [ (td64_data, "ns"), (td64_data.values, "ns"), (td64_data.values.astype("timedelta64[m]"), "s"), (td64_data.values.astype("timedelta64[s]"), "s"), (td64_data.values.astype("timedelta64[ps]"), "ns"), (td64_data.to_pytimedelta(), "ns"), ], ) def test_timedelta64_conversion(self, values, unit): v = self.cls(["t"], values) assert v.dtype == np.dtype(f"timedelta64[{unit}]") assert_array_equal(v.values, self.td64_data.values) assert v.values.dtype == np.dtype(f"timedelta64[{unit}]") def test_object_conversion(self): data = np.arange(5).astype(str).astype(object) actual = self.cls("x", data) assert actual.dtype == data.dtype def test_pandas_data(self): v = self.cls(["x"], pd.Series([0, 1, 2], index=[3, 2, 1])) assert_identical(v, v[[0, 1, 2]]) v = self.cls(["x"], pd.Index([0, 1, 2])) assert v[0].values == v.values[0] def test_pandas_period_index(self): v = self.cls(["x"], pd.period_range(start="2000", periods=20, freq="D")) v = v.load() # for dask-based Variable assert v[0] == pd.Period("2000", freq="D") assert "PeriodArray" in repr(v) @pytest.mark.parametrize("dtype", [float, int]) def test_1d_math(self, dtype: np.typing.DTypeLike | None) -> None: x = np.arange(5, dtype=dtype) y = np.ones(5, dtype=dtype) # should we need `.to_base_variable()`? # probably a break that `+v` changes type? v = self.cls(["x"], x) base_v = v.to_base_variable() # unary ops assert_identical(base_v, +v) assert_identical(base_v, abs(v)) assert_array_equal((-v).values, -x) # binary ops with numbers assert_identical(base_v, v + 0) assert_identical(base_v, 0 + v) assert_identical(base_v, v * 1) if dtype is int: assert_identical(base_v, v << 0) assert_array_equal(v << 3, x << 3) assert_array_equal(v >> 2, x >> 2) # binary ops with numpy arrays assert_array_equal((v * x).values, x**2) assert_array_equal((x * v).values, x**2) assert_array_equal(v - y, v - 1) assert_array_equal(y - v, 1 - v) if dtype is int: assert_array_equal(v << x, x << x) assert_array_equal(v >> x, x >> x) # verify attributes are dropped v2 = self.cls(["x"], x, {"units": "meters"}) with set_options(keep_attrs=False): assert_identical(base_v, +v2) # binary ops with all variables assert_array_equal(v + v, 2 * v) w = self.cls(["x"], y, {"foo": "bar"}) # With drop_conflicts, v (no attrs) + w (has attrs) should keep w's attrs # Note: IndexVariable ops return Variable, not IndexVariable expected = self.cls(["x"], x + y, {"foo": "bar"}).to_base_variable() assert_identical(v + w, expected) assert_array_equal((v * w).values, x * y) # something complicated assert_array_equal((v**2 * w - 1 + x).values, x**2 * y - 1 + x) # make sure dtype is preserved (for Index objects) assert dtype == (+v).dtype assert dtype == (+v).values.dtype assert dtype == (0 + v).dtype assert dtype == (0 + v).values.dtype # check types of returned data assert isinstance(+v, Variable) assert not isinstance(+v, IndexVariable) assert isinstance(0 + v, Variable) assert not isinstance(0 + v, IndexVariable) def test_1d_reduce(self): x = np.arange(5) v = self.cls(["x"], x) actual = v.sum() expected = Variable((), 10) assert_identical(expected, actual) assert type(actual) is Variable def test_array_interface(self): x = np.arange(5) v = self.cls(["x"], x) assert_array_equal(np.asarray(v), x) # test patched in methods assert_array_equal(v.astype(float), x.astype(float)) # think this is a break, that argsort changes the type assert_identical(v.argsort(), v.to_base_variable()) assert_identical(v.clip(2, 3), self.cls("x", x.clip(2, 3)).to_base_variable()) # test ufuncs assert_identical(np.sin(v), self.cls(["x"], np.sin(x)).to_base_variable()) assert isinstance(np.sin(v), Variable) assert not isinstance(np.sin(v), IndexVariable) def example_1d_objects(self): for data in [ range(3), 0.5 * np.arange(3), 0.5 * np.arange(3, dtype=np.float32), pd.date_range("2000-01-01", periods=3), np.array(["a", "b", "c"], dtype=object), ]: yield (self.cls("x", data), data) def test___array__(self): for v, data in self.example_1d_objects(): assert_array_equal(v.values, np.asarray(data)) assert_array_equal(np.asarray(v), np.asarray(data)) assert v[0].values == np.asarray(data)[0] assert np.asarray(v[0]) == np.asarray(data)[0] def test_equals_all_dtypes(self): for v, _ in self.example_1d_objects(): v2 = v.copy() assert v.equals(v2) assert v.identical(v2) assert v.no_conflicts(v2) assert v[0].equals(v2[0]) assert v[0].identical(v2[0]) assert v[0].no_conflicts(v2[0]) assert v[:2].equals(v2[:2]) assert v[:2].identical(v2[:2]) assert v[:2].no_conflicts(v2[:2]) def test_eq_all_dtypes(self): # ensure that we don't choke on comparisons for which numpy returns # scalars expected = Variable("x", 3 * [False]) for v, _ in self.example_1d_objects(): actual = "z" == v assert_identical(expected, actual) actual = ~("z" != v) assert_identical(expected, actual) def test_encoding_preserved(self): expected = self.cls("x", range(3), {"foo": 1}, {"bar": 2}) for actual in [ expected.T, expected[...], expected.squeeze(), expected.isel(x=slice(None)), expected.set_dims({"x": 3}), expected.copy(deep=True), expected.copy(deep=False), ]: assert_identical(expected.to_base_variable(), actual.to_base_variable()) assert expected.encoding == actual.encoding def test_drop_encoding(self) -> None: encoding1 = {"scale_factor": 1} # encoding set via cls constructor v1 = self.cls(["a"], [0, 1, 2], encoding=encoding1) assert v1.encoding == encoding1 v2 = v1.drop_encoding() assert v1.encoding == encoding1 assert v2.encoding == {} # encoding set via setter encoding3 = {"scale_factor": 10} v3 = self.cls(["a"], [0, 1, 2], encoding=encoding3) assert v3.encoding == encoding3 v4 = v3.drop_encoding() assert v3.encoding == encoding3 assert v4.encoding == {} def test_concat(self): x = np.arange(5) y = np.arange(5, 10) v = self.cls(["a"], x) w = self.cls(["a"], y) assert_identical( Variable(["b", "a"], np.array([x, y])), Variable.concat([v, w], "b") ) assert_identical( Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b") ) assert_identical( Variable(["b", "a"], np.array([x, y])), Variable.concat((v, w), "b") ) with pytest.raises(ValueError, match=r"Variable has dimensions"): Variable.concat([v, Variable(["c"], y)], "b") # test indexers actual = Variable.concat( [v, w], positions=[np.arange(0, 10, 2), np.arange(1, 10, 2)], dim="a" ) expected = Variable("a", np.array([x, y]).ravel(order="F")) assert_identical(expected, actual) # test concatenating along a dimension v = Variable(["time", "x"], np.random.random((10, 8))) assert_identical(v, Variable.concat([v[:5], v[5:]], "time")) assert_identical(v, Variable.concat([v[:5], v[5:6], v[6:]], "time")) assert_identical(v, Variable.concat([v[:1], v[1:]], "time")) # test dimension order assert_identical(v, Variable.concat([v[:, :5], v[:, 5:]], "x")) with pytest.raises(ValueError, match=r"all input arrays must have"): Variable.concat([v[:, 0], v[:, 1:]], "x") def test_concat_attrs(self): # always keep attrs from first variable v = self.cls("a", np.arange(5), {"foo": "bar"}) w = self.cls("a", np.ones(5)) expected = self.cls( "a", np.concatenate([np.arange(5), np.ones(5)]) ).to_base_variable() expected.attrs["foo"] = "bar" assert_identical(expected, Variable.concat([v, w], "a")) def test_concat_fixed_len_str(self): # regression test for #217 for kind in ["S", "U"]: x = self.cls("animal", np.array(["horse"], dtype=kind)) y = self.cls("animal", np.array(["aardvark"], dtype=kind)) actual = Variable.concat([x, y], "animal") expected = Variable("animal", np.array(["horse", "aardvark"], dtype=kind)) assert_equal(expected, actual) def test_concat_number_strings(self): # regression test for #305 a = self.cls("x", ["0", "1", "2"]) b = self.cls("x", ["3", "4"]) actual = Variable.concat([a, b], dim="x") expected = Variable("x", np.arange(5).astype(str)) assert_identical(expected, actual) assert actual.dtype.kind == expected.dtype.kind def test_concat_mixed_dtypes(self): a = self.cls("x", [0, 1]) b = self.cls("x", ["two"]) actual = Variable.concat([a, b], dim="x") expected = Variable("x", np.array([0, 1, "two"], dtype=object)) assert_identical(expected, actual) assert actual.dtype == object @pytest.mark.parametrize("deep", [True, False]) @pytest.mark.parametrize("astype", [float, int, str]) def test_copy(self, deep: bool, astype: type[object]) -> None: v = self.cls("x", (0.5 * np.arange(10)).astype(astype), {"foo": "bar"}) w = v.copy(deep=deep) assert type(v) is type(w) assert_identical(v, w) assert v.dtype == w.dtype if self.cls is Variable: if deep: assert source_ndarray(v.values) is not source_ndarray(w.values) else: assert source_ndarray(v.values) is source_ndarray(w.values) assert_identical(v, copy(v)) def test_copy_deep_recursive(self) -> None: # GH:issue:7111 # direct recursion v = self.cls("x", [0, 1]) v.attrs["other"] = v # TODO: cannot use assert_identical on recursive Vars yet... # lets just ensure that deep copy works without RecursionError v.copy(deep=True) # indirect recursion v2 = self.cls("y", [2, 3]) v.attrs["other"] = v2 v2.attrs["other"] = v # TODO: cannot use assert_identical on recursive Vars yet... # lets just ensure that deep copy works without RecursionError v.copy(deep=True) v2.copy(deep=True) def test_copy_index(self): midx = pd.MultiIndex.from_product( [["a", "b"], [1, 2], [-1, -2]], names=("one", "two", "three") ) v = self.cls("x", midx) for deep in [True, False]: w = v.copy(deep=deep) assert isinstance(w._data, PandasIndexingAdapter) assert isinstance(w.to_index(), pd.MultiIndex) assert_array_equal(v._data.array, w._data.array) def test_copy_with_data(self) -> None: orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"}) new_data = np.array([[2.5, 5.0], [7.1, 43]]) actual = orig.copy(data=new_data) expected = orig.copy() expected.data = new_data assert_identical(expected, actual) def test_copy_with_data_errors(self) -> None: orig = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"}) new_data = [2.5, 5.0] with pytest.raises(ValueError, match=r"must match shape of object"): orig.copy(data=new_data) # type: ignore[arg-type] def test_copy_index_with_data(self) -> None: orig = IndexVariable("x", np.arange(5)) new_data = np.arange(5, 10) actual = orig.copy(data=new_data) expected = IndexVariable("x", np.arange(5, 10)) assert_identical(expected, actual) def test_copy_index_with_data_errors(self) -> None: orig = IndexVariable("x", np.arange(5)) new_data = np.arange(5, 20) with pytest.raises(ValueError, match=r"must match shape of object"): orig.copy(data=new_data) with pytest.raises(ValueError, match=r"Cannot assign to the .data"): orig.data = new_data with pytest.raises(ValueError, match=r"Cannot assign to the .values"): orig.values = new_data def test_replace(self): var = Variable(("x", "y"), [[1.5, 2.0], [3.1, 4.3]], {"foo": "bar"}) result = var._replace() assert_identical(result, var) new_data = np.arange(4).reshape(2, 2) result = var._replace(data=new_data) assert_array_equal(result.data, new_data) def test_real_and_imag(self): v = self.cls("x", np.arange(3) - 1j * np.arange(3), {"foo": "bar"}) expected_re = self.cls("x", np.arange(3), {"foo": "bar"}) assert_identical(v.real, expected_re) expected_im = self.cls("x", -np.arange(3), {"foo": "bar"}) assert_identical(v.imag, expected_im) expected_abs = self.cls("x", np.sqrt(2 * np.arange(3) ** 2)).to_base_variable() assert_allclose(abs(v), expected_abs) def test_aggregate_complex(self): # should skip NaNs v = self.cls("x", [1, 2j, np.nan]) expected = Variable((), 0.5 + 1j) assert_allclose(v.mean(), expected) def test_pandas_categorical_dtype(self): data = pd.Categorical(np.arange(10, dtype="int64")) v = self.cls("x", data) print(v) # should not error assert v.dtype == data.dtype def test_pandas_datetime64_with_tz(self): data = pd.date_range( start="2000-01-01", tz=pytz.timezone("America/New_York"), periods=10, freq="1h", ) v = self.cls("x", data) print(v) # should not error if v.dtype == np.dtype("O"): import dask.array as da assert isinstance(v.data, da.Array) else: assert v.dtype == data.dtype def test_multiindex(self): idx = pd.MultiIndex.from_product([list("abc"), [0, 1]]) v = self.cls("x", idx) assert_identical(Variable((), ("a", 0)), v[0]) assert_identical(v, v[:]) def test_load(self): array = self.cls("x", np.arange(5)) orig_data = array._data copied = array.copy(deep=True) if array.chunks is None: array.load() assert type(array._data) is type(orig_data) assert type(copied._data) is type(orig_data) assert_identical(array, copied) def test_getitem_advanced(self): v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]]) v_data = v.compute().data # orthogonal indexing v_new = v[([0, 1], [1, 0])] assert v_new.dims == ("x", "y") assert_array_equal(v_new, v_data[[0, 1]][:, [1, 0]]) v_new = v[[0, 1]] assert v_new.dims == ("x", "y") assert_array_equal(v_new, v_data[[0, 1]]) # with mixed arguments ind = Variable(["a"], [0, 1]) v_new = v[dict(x=[0, 1], y=ind)] assert v_new.dims == ("x", "a") assert_array_equal(v_new, v_data[[0, 1]][:, [0, 1]]) # boolean indexing v_new = v[dict(x=[True, False], y=[False, True, False])] assert v_new.dims == ("x", "y") assert_array_equal(v_new, v_data[0][1]) # with scalar variable ind = Variable((), 2) v_new = v[dict(y=ind)] expected = v[dict(y=2)] assert_array_equal(v_new, expected) # with boolean variable with wrong shape ind2: np.ndarray[Any, np.dtype[np.bool_]] = np.array([True, False]) with pytest.raises(IndexError, match=r"Boolean array size 2 is "): v[Variable(("a", "b"), [[0, 1]]), ind2] # boolean indexing with different dimension ind = Variable(["a"], [True, False, False]) with pytest.raises(IndexError, match=r"Boolean indexer should be"): v[dict(y=ind)] def test_getitem_uint_1d(self): # regression test for #1405 v = self.cls(["x"], [0, 1, 2]) v_data = v.compute().data v_new = v[np.array([0])] assert_array_equal(v_new, v_data[0]) v_new = v[np.array([0], dtype="uint64")] assert_array_equal(v_new, v_data[0]) def test_getitem_uint(self): # regression test for #1405 v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]]) v_data = v.compute().data v_new = v[np.array([0])] assert_array_equal(v_new, v_data[[0], :]) v_new = v[np.array([0], dtype="uint64")] assert_array_equal(v_new, v_data[[0], :]) v_new = v[np.uint64(0)] assert_array_equal(v_new, v_data[0, :]) def test_getitem_0d_array(self): # make sure 0d-np.array can be used as an indexer v = self.cls(["x"], [0, 1, 2]) v_data = v.compute().data v_new = v[np.array([0])[0]] assert_array_equal(v_new, v_data[0]) v_new = v[np.array(0)] assert_array_equal(v_new, v_data[0]) v_new = v[Variable((), np.array(0))] assert_array_equal(v_new, v_data[0]) def test_getitem_fancy(self): v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]]) v_data = v.compute().data ind = Variable(["a", "b"], [[0, 1, 1], [1, 1, 0]]) v_new = v[ind] assert v_new.dims == ("a", "b", "y") assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :]) # It would be ok if indexed with the multi-dimensional array including # the same name ind = Variable(["x", "b"], [[0, 1, 1], [1, 1, 0]]) v_new = v[ind] assert v_new.dims == ("x", "b", "y") assert_array_equal(v_new, v_data[[[0, 1, 1], [1, 1, 0]], :]) ind = Variable(["a", "b"], [[0, 1, 2], [2, 1, 0]]) v_new = v[dict(y=ind)] assert v_new.dims == ("x", "a", "b") assert_array_equal(v_new, v_data[:, ([0, 1, 2], [2, 1, 0])]) ind = Variable(["a", "b"], [[0, 0], [1, 1]]) v_new = v[dict(x=[1, 0], y=ind)] assert v_new.dims == ("x", "a", "b") assert_array_equal(v_new, v_data[[1, 0]][:, ind]) # along diagonal ind = Variable(["a"], [0, 1]) v_new = v[ind, ind] assert v_new.dims == ("a",) assert_array_equal(v_new, v_data[[0, 1], [0, 1]]) # with integer ind = Variable(["a", "b"], [[0, 0], [1, 1]]) v_new = v[dict(x=0, y=ind)] assert v_new.dims == ("a", "b") assert_array_equal(v_new[0], v_data[0][[0, 0]]) assert_array_equal(v_new[1], v_data[0][[1, 1]]) # with slice ind = Variable(["a", "b"], [[0, 0], [1, 1]]) v_new = v[dict(x=slice(None), y=ind)] assert v_new.dims == ("x", "a", "b") assert_array_equal(v_new, v_data[:, [[0, 0], [1, 1]]]) ind = Variable(["a", "b"], [[0, 0], [1, 1]]) v_new = v[dict(x=ind, y=slice(None))] assert v_new.dims == ("a", "b", "y") assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], :]) ind = Variable(["a", "b"], [[0, 0], [1, 1]]) v_new = v[dict(x=ind, y=slice(None, 1))] assert v_new.dims == ("a", "b", "y") assert_array_equal(v_new, v_data[[[0, 0], [1, 1]], slice(None, 1)]) # slice matches explicit dimension ind = Variable(["y"], [0, 1]) v_new = v[ind, :2] assert v_new.dims == ("y",) assert_array_equal(v_new, v_data[[0, 1], [0, 1]]) # with multiple slices v = self.cls(["x", "y", "z"], [[[1, 2, 3], [4, 5, 6]]]) ind = Variable(["a", "b"], [[0]]) v_new = v[ind, :, :] expected = Variable(["a", "b", "y", "z"], v.data[np.newaxis, ...]) assert_identical(v_new, expected) v = Variable(["w", "x", "y", "z"], [[[[1, 2, 3], [4, 5, 6]]]]) ind = Variable(["y"], [0]) v_new = v[ind, :, 1:2, 2] expected = Variable(["y", "x"], [[6]]) assert_identical(v_new, expected) # slice and vector mixed indexing resulting in the same dimension v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5)) ind = Variable(["x"], [0, 1, 2]) v_new = v[:, ind] expected = Variable(("x", "z"), np.zeros((3, 5))) expected[0] = v.data[0, 0] expected[1] = v.data[1, 1] expected[2] = v.data[2, 2] assert_identical(v_new, expected) v_new = v[:, ind.data] assert v_new.shape == (3, 3, 5) def test_getitem_error(self): v = self.cls(["x", "y"], [[0, 1, 2], [3, 4, 5]]) with pytest.raises(IndexError, match=r"labeled multi-"): v[[[0, 1], [1, 2]]] ind_x = Variable(["a"], [0, 1, 1]) ind_y = Variable(["a"], [0, 1]) with pytest.raises(IndexError, match=r"Dimensions of indexers "): v[ind_x, ind_y] ind = Variable(["a", "b"], [[True, False], [False, True]]) with pytest.raises(IndexError, match=r"2-dimensional boolean"): v[dict(x=ind)] v = Variable(["x", "y", "z"], np.arange(60).reshape(3, 4, 5)) ind = Variable(["x"], [0, 1]) with pytest.raises(IndexError, match=r"Dimensions of indexers mismatch"): v[:, ind] @pytest.mark.parametrize( "mode", [ "mean", "median", "reflect", "edge", "linear_ramp", "maximum", "minimum", "symmetric", "wrap", ], ) @pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS) @pytest.mark.filterwarnings( r"ignore:dask.array.pad.+? converts integers to floats." ) def test_pad(self, mode, xr_arg, np_arg): data = np.arange(4 * 3 * 2).reshape(4, 3, 2) v = self.cls(["x", "y", "z"], data) actual = v.pad(mode=mode, **xr_arg) expected = np.pad(data, np_arg, mode=mode) assert_array_equal(actual, expected) assert isinstance(actual._data, type(v._data)) @pytest.mark.parametrize("xr_arg, np_arg", _PAD_XR_NP_ARGS) def test_pad_constant_values(self, xr_arg, np_arg): data = np.arange(4 * 3 * 2).reshape(4, 3, 2) v = self.cls(["x", "y", "z"], data) actual = v.pad(**xr_arg) expected = np.pad( np.array(duck_array_ops.astype(v.data, float)), np_arg, mode="constant", constant_values=np.nan, ) assert_array_equal(actual, expected) assert isinstance(actual._data, type(v._data)) # for the boolean array, we pad False data = np.full_like(data, False, dtype=bool).reshape(4, 3, 2) v = self.cls(["x", "y", "z"], data) actual = v.pad(mode="constant", constant_values=False, **xr_arg) expected = np.pad( np.array(v.data), np_arg, mode="constant", constant_values=False ) assert_array_equal(actual, expected) @pytest.mark.parametrize( ["keep_attrs", "attrs", "expected"], [ pytest.param(None, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="default"), pytest.param(False, {"a": 1, "b": 2}, {}, id="False"), pytest.param(True, {"a": 1, "b": 2}, {"a": 1, "b": 2}, id="True"), ], ) def test_pad_keep_attrs(self, keep_attrs, attrs, expected): data = np.arange(10, dtype=float) v = self.cls(["x"], data, attrs) keep_attrs_ = "default" if keep_attrs is None else keep_attrs with set_options(keep_attrs=keep_attrs_): actual = v.pad({"x": (1, 1)}, mode="constant", constant_values=np.nan) assert actual.attrs == expected actual = v.pad( {"x": (1, 1)}, mode="constant", constant_values=np.nan, keep_attrs=keep_attrs, ) assert actual.attrs == expected @pytest.mark.parametrize("d, w", (("x", 3), ("y", 5))) def test_rolling_window(self, d, w): # Just a working test. See test_nputils for the algorithm validation v = self.cls(["x", "y", "z"], np.arange(40 * 30 * 2).reshape(40, 30, 2)) v_rolling = v.rolling_window(d, w, d + "_window") assert v_rolling.dims == ("x", "y", "z", d + "_window") assert v_rolling.shape == v.shape + (w,) v_rolling = v.rolling_window(d, w, d + "_window", center=True) assert v_rolling.dims == ("x", "y", "z", d + "_window") assert v_rolling.shape == v.shape + (w,) # dask and numpy result should be the same v_loaded = v.load().rolling_window(d, w, d + "_window", center=True) assert_array_equal(v_rolling, v_loaded) # numpy backend should not be over-written if isinstance(v._data, np.ndarray): with pytest.raises(ValueError): v_loaded[0] = 1.0 def test_rolling_1d(self): x = self.cls("x", np.array([1, 2, 3, 4], dtype=float)) kwargs = dict(dim="x", window=3, window_dim="xw") actual = x.rolling_window(**kwargs, center=True, fill_value=np.nan) expected = Variable( ("x", "xw"), np.array( [[np.nan, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, np.nan]], dtype=float ), ) assert_equal(actual, expected) actual = x.rolling_window(**kwargs, center=False, fill_value=0.0) expected = self.cls( ("x", "xw"), np.array([[0, 0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4]], dtype=float), ) assert_equal(actual, expected) x = self.cls(("y", "x"), np.stack([x, x * 1.1])) actual = x.rolling_window(**kwargs, center=False, fill_value=0.0) expected = self.cls( ("y", "x", "xw"), np.stack([expected.data, expected.data * 1.1], axis=0) ) assert_equal(actual, expected) @pytest.mark.parametrize("center", [[True, True], [False, False]]) @pytest.mark.parametrize("dims", [("x", "y"), ("y", "z"), ("z", "x")]) def test_nd_rolling(self, center, dims): x = self.cls( ("x", "y", "z"), np.arange(7 * 6 * 8).reshape(7, 6, 8).astype(float), ) window = [3, 3] actual = x.rolling_window( dim=dims, window=window, window_dim=[f"{k}w" for k in dims], center=center, fill_value=np.nan, ) expected = x for dim, win, cent in zip(dims, window, center, strict=True): expected = expected.rolling_window( dim=dim, window=win, window_dim=f"{dim}w", center=cent, fill_value=np.nan, ) assert_equal(actual, expected) @pytest.mark.parametrize( ("dim, window, window_dim, center"), [ ("x", [3, 3], "x_w", True), ("x", 3, ("x_w", "x_w"), True), ("x", 3, "x_w", [True, True]), ], ) def test_rolling_window_errors(self, dim, window, window_dim, center): x = self.cls( ("x", "y", "z"), np.arange(7 * 6 * 8).reshape(7, 6, 8).astype(float), ) with pytest.raises(ValueError): x.rolling_window( dim=dim, window=window, window_dim=window_dim, center=center, )
VariableSubclassobjects
python
crytic__slither
slither/slithir/operations/event_call.py
{ "start": 143, "end": 622 }
class ____(Call): def __init__(self, name: Union[str, Constant]) -> None: super().__init__() self._name = name # todo add instance of the Event @property def name(self) -> Union[str, Constant]: return self._name @property def read(self) -> List[Any]: return self._unroll(self.arguments) def __str__(self): args = [str(a) for a in self.arguments] return f"Emit {self.name}({','.join(args)})"
EventCall
python
matplotlib__matplotlib
lib/matplotlib/widgets.py
{ "start": 2771, "end": 5191 }
class ____(Widget): """ Widget connected to a single `~matplotlib.axes.Axes`. To guarantee that the widget remains responsive and not garbage-collected, a reference to the object should be maintained by the user. This is necessary because the callback registry maintains only weak-refs to the functions, which are member functions of the widget. If there are no references to the widget object it may be garbage collected which will disconnect the callbacks. Attributes ---------- ax : `~matplotlib.axes.Axes` The parent Axes for the widget. canvas : `~matplotlib.backend_bases.FigureCanvasBase` The parent figure canvas for the widget. active : bool If False, the widget does not respond to events. """ def __init__(self, ax): self.ax = ax self._cids = [] canvas = property( lambda self: getattr(self.ax.get_figure(root=True), 'canvas', None) ) def connect_event(self, event, callback): """ Connect a callback function with an event. This should be used in lieu of ``figure.canvas.mpl_connect`` since this function stores callback ids for later clean up. """ cid = self.canvas.mpl_connect(event, callback) self._cids.append(cid) def disconnect_events(self): """Disconnect all events created by this widget.""" for c in self._cids: self.canvas.mpl_disconnect(c) def _get_data_coords(self, event): """Return *event*'s data coordinates in this widget's Axes.""" # This method handles the possibility that event.inaxes != self.ax (which may # occur if multiple Axes are overlaid), in which case event.xdata/.ydata will # be wrong. Note that we still special-case the common case where # event.inaxes == self.ax and avoid re-running the inverse data transform, # because that can introduce floating point errors for synthetic events. return ((event.xdata, event.ydata) if event.inaxes is self.ax else self.ax.transData.inverted().transform((event.x, event.y))) def ignore(self, event): # docstring inherited return super().ignore(event) or self.canvas is None def _set_cursor(self, cursor): """Update the canvas cursor.""" self.ax.get_figure(root=True).canvas.set_cursor(cursor)
AxesWidget
python
celery__celery
t/unit/concurrency/test_eventlet.py
{ "start": 2476, "end": 4721 }
class ____(EventletCase): @pytest.fixture(autouse=True) def setup_patches(self, patching): self.GreenPool = patching('eventlet.greenpool.GreenPool') self.greenthread = patching('eventlet.greenthread') def test_pool(self): x = TaskPool() x.on_start() x.on_stop() x.on_apply(Mock()) x._pool = None x.on_stop() assert len(x._pool_map.keys()) == 1 assert x.getpid() @patch('celery.concurrency.eventlet.base') def test_apply_target(self, base): apply_target(Mock(), getpid=Mock()) base.apply_target.assert_called() def test_grow(self): x = TaskPool(10) x._pool = Mock(name='_pool') x.grow(2) assert x.limit == 12 x._pool.resize.assert_called_with(12) def test_shrink(self): x = TaskPool(10) x._pool = Mock(name='_pool') x.shrink(2) assert x.limit == 8 x._pool.resize.assert_called_with(8) def test_get_info(self): x = TaskPool(10) x._pool = Mock(name='_pool') assert x._get_info() == { 'implementation': 'celery.concurrency.eventlet:TaskPool', 'max-concurrency': 10, 'free-threads': x._pool.free(), 'running-threads': x._pool.running(), } def test_terminate_job(self): func = Mock() pool = TaskPool(10) pool.on_start() pool.on_apply(func) assert len(pool._pool_map.keys()) == 1 pid = list(pool._pool_map.keys())[0] greenlet = pool._pool_map[pid] pool.terminate_job(pid) greenlet.link.assert_called_once() greenlet.kill.assert_called_once() def test_make_killable_target(self): def valid_target(): return "some result..." def terminating_target(): raise GreenletExit() assert TaskPool._make_killable_target(valid_target)() == "some result..." assert TaskPool._make_killable_target(terminating_target)() == (False, None, None) def test_cleanup_after_job_finish(self): testMap = {'1': None} TaskPool._cleanup_after_job_finish(None, testMap, '1') assert len(testMap) == 0
test_TaskPool
python
astropy__astropy
astropy/utils/masked/tests/test_masked.py
{ "start": 2759, "end": 3150 }
class ____(ArraySetup): _data_cls = Longitude @classmethod def setup_class(cls): super().setup_class() cls.a = Longitude(cls.a, u.deg) cls.b = Longitude(cls.b, u.deg) cls.c = Longitude(cls.c, u.deg) # Note: Longitude does not work on structured arrays, so # leaving it as regular array (which just reruns some tests).
LongitudeSetup
python
sympy__sympy
sympy/codegen/ast.py
{ "start": 35751, "end": 35909 }
class ____(Type): """ Integer base type, contains no size information. """ __slots__ = () cast_nocheck = lambda self, i: Integer(int(i))
IntBaseType
python
apache__airflow
providers/google/tests/unit/google/cloud/transfers/test_gcs_to_bigquery.py
{ "start": 69534, "end": 85219 }
class ____: def _set_execute_complete(self, session, ti, **next_kwargs): ti.next_method = "execute_complete" ti.next_kwargs = next_kwargs session.flush() @pytest.mark.db_test @mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook")) def test_execute_without_external_table_async_should_execute_successfully( self, hook, create_task_instance, session ): """ Asserts that a task is deferred and a BigQueryInsertJobTrigger will be fired when Operator is executed in deferrable. """ hook.return_value.insert_job.return_value = MagicMock(job_id=REAL_JOB_ID, error_result=False) hook.return_value.generate_job_id.return_value = REAL_JOB_ID hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE) hook.return_value.get_job.return_value.result.return_value = ("1",) ti = create_task_instance( task_id=TASK_ID, bucket=TEST_BUCKET, source_objects=TEST_SOURCE_OBJECTS, destination_project_dataset_table=TEST_EXPLICIT_DEST, write_disposition=WRITE_DISPOSITION, schema_fields=SCHEMA_FIELDS, external_table=False, autodetect=True, deferrable=True, project_id=JOB_PROJECT_ID, ) ti.run(session=session) assert ti.state == TaskInstanceState.DEFERRED trigger_cls = session.scalar(select(Trigger.classpath).where(Trigger.id == ti.trigger_id)) assert trigger_cls == "airflow.providers.google.cloud.triggers.bigquery.BigQueryInsertJobTrigger" @pytest.mark.db_test def test_execute_without_external_table_async_should_throw_ex_when_event_status_error( self, create_task_instance, session ): """ Tests that an AirflowException is raised in case of error event. """ ti = create_task_instance( task_id=TASK_ID, bucket=TEST_BUCKET, source_objects=TEST_SOURCE_OBJECTS, destination_project_dataset_table=TEST_EXPLICIT_DEST, write_disposition=WRITE_DISPOSITION, schema_fields=SCHEMA_FIELDS, external_table=False, autodetect=True, deferrable=True, project_id=JOB_PROJECT_ID, ) self._set_execute_complete(session, ti, event={"status": "error", "message": "test failure message"}) with pytest.raises(AirflowException): ti.run() @pytest.mark.db_test def test_execute_logging_without_external_table_async_should_execute_successfully( self, caplog, create_task_instance, session ): """ Asserts that logging occurs as expected. """ ti = create_task_instance( task_id=TASK_ID, bucket=TEST_BUCKET, source_objects=TEST_SOURCE_OBJECTS, destination_project_dataset_table=TEST_EXPLICIT_DEST, write_disposition=WRITE_DISPOSITION, schema_fields=SCHEMA_FIELDS, external_table=False, autodetect=True, deferrable=True, project_id=JOB_PROJECT_ID, ) self._set_execute_complete( session, ti, event={"status": "success", "message": "Job completed", "job_id": job_id} ) with mock.patch.object(ti.task.log, "info") as mock_log_info: ti.run() mock_log_info.assert_called_with( "%s completed with response %s ", "test-gcs-to-bq-operator", "Job completed" ) @pytest.mark.db_test @mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook")) def test_execute_without_external_table_generate_job_id_async_should_execute_successfully( self, hook, create_task_instance, session ): hook.return_value.insert_job.side_effect = Conflict("any") hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE) job = MagicMock( job_id=REAL_JOB_ID, error_result=False, state="PENDING", done=lambda: False, ) hook.return_value.get_job.return_value = job ti = create_task_instance( task_id=TASK_ID, bucket=TEST_BUCKET, source_objects=TEST_SOURCE_OBJECTS, destination_project_dataset_table=TEST_EXPLICIT_DEST, write_disposition=WRITE_DISPOSITION, schema_fields=SCHEMA_FIELDS, reattach_states={"PENDING"}, external_table=False, autodetect=True, deferrable=True, project_id=JOB_PROJECT_ID, ) ti.run(session=session) assert ti.state == TaskInstanceState.DEFERRED hook.return_value.generate_job_id.assert_called_once_with( job_id=None, dag_id="adhoc_airflow", task_id=TASK_ID, logical_date=None, run_after=hook.return_value.get_run_after_or_logical_date(), configuration={}, force_rerun=True, ) @pytest.mark.db_test @mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook")) def test_execute_without_external_table_reattach_async_should_execute_successfully( self, hook, create_task_instance, session ): hook.return_value.generate_job_id.return_value = REAL_JOB_ID hook.return_value.insert_job.side_effect = Conflict("any") hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE) job = MagicMock( job_id=REAL_JOB_ID, error_result=False, state="PENDING", done=lambda: False, ) hook.return_value.get_job.return_value = job ti = create_task_instance( task_id=TASK_ID, bucket=TEST_BUCKET, source_objects=TEST_SOURCE_OBJECTS, destination_project_dataset_table=TEST_EXPLICIT_DEST, write_disposition=WRITE_DISPOSITION, schema_fields=SCHEMA_FIELDS, location=TEST_DATASET_LOCATION, reattach_states={"PENDING"}, external_table=False, autodetect=True, deferrable=True, project_id=JOB_PROJECT_ID, ) ti.run(session=session) assert ti.state == TaskInstanceState.DEFERRED hook.return_value.get_job.assert_called_once_with( location=TEST_DATASET_LOCATION, job_id=REAL_JOB_ID, project_id=JOB_PROJECT_ID, ) @pytest.mark.db_test @mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook")) def test_execute_without_external_table_force_rerun_async_should_execute_successfully( self, hook, create_task_instance ): hook.return_value.generate_job_id.return_value = f"{job_id}_{hash_}" hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE) hook.return_value.insert_job.side_effect = Conflict("any") job = MagicMock( job_id=REAL_JOB_ID, error_result=False, state="DONE", done=lambda: False, ) hook.return_value.get_job.return_value = job ti = create_task_instance( task_id=TASK_ID, bucket=TEST_BUCKET, source_objects=TEST_SOURCE_OBJECTS, destination_project_dataset_table=TEST_EXPLICIT_DEST, write_disposition=WRITE_DISPOSITION, schema_fields=SCHEMA_FIELDS, location=TEST_DATASET_LOCATION, reattach_states={"PENDING"}, external_table=False, autodetect=True, deferrable=True, project_id=JOB_PROJECT_ID, ) with pytest.raises(AirflowException) as exc: ti.run() expected_exception_msg = ( f"Job with id: {REAL_JOB_ID} already exists and is in {job.state} state. " f"If you want to force rerun it consider setting `force_rerun=True`." f"Or, if you want to reattach in this scenario add {job.state} to `reattach_states`" ) assert str(exc.value) == expected_exception_msg hook.return_value.get_job.assert_called_once_with( location=TEST_DATASET_LOCATION, job_id=REAL_JOB_ID, project_id=JOB_PROJECT_ID, ) @pytest.mark.db_test @mock.patch(GCS_TO_BQ_PATH.format("GCSHook")) @mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook")) def test_schema_fields_without_external_table_async_should_execute_successfully( self, bq_hook, gcs_hook, create_task_instance ): bq_hook.return_value.insert_job.return_value = MagicMock(job_id=REAL_JOB_ID, error_result=False) bq_hook.return_value.generate_job_id.return_value = REAL_JOB_ID bq_hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE) bq_hook.return_value.get_job.return_value.result.return_value = ("1",) gcs_hook.return_value.download.return_value = b"id,name\r\none,Anna" ti = create_task_instance( task_id=TASK_ID, bucket=TEST_BUCKET, source_objects=TEST_SOURCE_OBJECTS, destination_project_dataset_table=TEST_EXPLICIT_DEST, write_disposition=WRITE_DISPOSITION, schema_fields=SCHEMA_FIELDS, max_id_key=MAX_ID_KEY, external_table=False, autodetect=True, deferrable=True, project_id=JOB_PROJECT_ID, ) ti.run() assert ti.state == TaskInstanceState.DEFERRED calls = [ call( configuration={ "load": dict( autodetect=True, createDisposition="CREATE_IF_NEEDED", destinationTable={ "projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE, }, sourceFormat="CSV", skipLeadingRows=None, sourceUris=[f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"], writeDisposition=WRITE_DISPOSITION, ignoreUnknownValues=False, allowQuotedNewlines=False, encoding="UTF-8", fieldDelimiter=",", schema={"fields": SCHEMA_FIELDS}, quote=None, ), }, project_id=JOB_PROJECT_ID, location=None, job_id=REAL_JOB_ID, timeout=None, retry=DEFAULT_RETRY, nowait=True, ) ] bq_hook.return_value.insert_job.assert_has_calls(calls) @pytest.mark.db_test @mock.patch(GCS_TO_BQ_PATH.format("GCSHook")) @mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook")) def test_schema_fields_int_without_external_table_async_should_execute_successfully( self, bq_hook, gcs_hook, create_task_instance ): bq_hook.return_value.insert_job.return_value = MagicMock(job_id=REAL_JOB_ID, error_result=False) bq_hook.return_value.generate_job_id.return_value = REAL_JOB_ID bq_hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE) bq_hook.return_value.get_job.return_value.result.return_value = ("1",) gcs_hook.return_value.download.return_value = b"id,name\r\n1,Anna" ti = create_task_instance( task_id=TASK_ID, bucket=TEST_BUCKET, source_objects=TEST_SOURCE_OBJECTS, destination_project_dataset_table=TEST_EXPLICIT_DEST, write_disposition=WRITE_DISPOSITION, schema_fields=SCHEMA_FIELDS, max_id_key=MAX_ID_KEY, external_table=False, autodetect=True, deferrable=True, project_id=JOB_PROJECT_ID, ) ti.run() assert ti.state == TaskInstanceState.DEFERRED calls = [ call( configuration={ "load": dict( autodetect=True, createDisposition="CREATE_IF_NEEDED", destinationTable={ "projectId": PROJECT_ID, "datasetId": DATASET, "tableId": TABLE, }, fieldDelimiter=",", quote=None, sourceFormat="CSV", skipLeadingRows=None, sourceUris=[f"gs://{TEST_BUCKET}/{TEST_SOURCE_OBJECTS}"], writeDisposition=WRITE_DISPOSITION, ignoreUnknownValues=False, allowQuotedNewlines=False, encoding="UTF-8", schema={ "fields": [ {"mode": "NULLABLE", "name": "id", "type": "STRING"}, {"mode": "NULLABLE", "name": "name", "type": "STRING"}, ], }, ), }, project_id=JOB_PROJECT_ID, location=None, job_id=REAL_JOB_ID, timeout=None, retry=DEFAULT_RETRY, nowait=True, ) ] bq_hook.return_value.insert_job.assert_has_calls(calls) @pytest.mark.db_test @mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook")) def test_execute_complete_reassigns_job_id(self, bq_hook, create_task_instance, session): """Assert that we use job_id from event after deferral.""" bq_hook.return_value.split_tablename.return_value = "", "", "" ti = create_task_instance( task_id=TASK_ID, bucket=TEST_BUCKET, source_objects=TEST_SOURCE_OBJECTS, destination_project_dataset_table=TEST_EXPLICIT_DEST, deferrable=True, job_id=None, ) generated_job_id = "123456" ti.next_method = "execute_complete" ti.next_kwargs = { "event": {"status": "success", "message": "Job completed", "job_id": generated_job_id}, } session.flush() assert ti.task.job_id is None ti.run(session=session) assert ti.task.job_id == generated_job_id @mock.patch(GCS_TO_BQ_PATH.format("BigQueryHook")) def test_force_delete_should_execute_successfully(self, hook): hook.return_value.insert_job.side_effect = [ MagicMock(job_id=REAL_JOB_ID, error_result=False), REAL_JOB_ID, ] hook.return_value.generate_job_id.return_value = REAL_JOB_ID hook.return_value.split_tablename.return_value = (PROJECT_ID, DATASET, TABLE) hook.return_value.get_job.return_value.result.return_value = ("1",) operator = GCSToBigQueryOperator( task_id=TASK_ID, bucket=TEST_BUCKET, source_objects=TEST_SOURCE_OBJECTS, destination_project_dataset_table=TEST_EXPLICIT_DEST, write_disposition=WRITE_DISPOSITION, schema_fields=SCHEMA_FIELDS_INT, autodetect=True, project_id=JOB_PROJECT_ID, force_delete=True, ) operator.execute(context=MagicMock()) hook.return_value.delete_table.assert_called_once_with(table_id=TEST_EXPLICIT_DEST)
TestAsyncGCSToBigQueryOperator
python
huggingface__transformers
tests/models/levit/test_image_processing_levit.py
{ "start": 3020, "end": 4775 }
class ____(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = LevitImageProcessor if is_vision_available() else None fast_image_processing_class = LevitImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = LevitImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processing = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
LevitImageProcessingTest
python
huggingface__transformers
src/transformers/models/videomae/modeling_videomae.py
{ "start": 12940, "end": 13563 }
class ____(nn.Module): def __init__(self, config: VideoMAEConfig): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = hidden_states + input_tensor return hidden_states # Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->VideoMAE,VIT->VIDEOMAE
VideoMAEOutput
python
lazyprogrammer__machine_learning_examples
cnn_class2/tf_resnet_convblock.py
{ "start": 1285, "end": 2310 }
class ____: def __init__(self, D): self.running_mean = tf.Variable(np.zeros(D, dtype=np.float32), trainable=False) self.running_var = tf.Variable(np.ones(D, dtype=np.float32), trainable=False) self.gamma = tf.Variable(np.ones(D, dtype=np.float32)) self.beta = tf.Variable(np.zeros(D, dtype=np.float32)) def forward(self, X): return tf.nn.batch_normalization( X, self.running_mean, self.running_var, self.beta, self.gamma, 1e-3 ) def copyFromKerasLayers(self, layer): # only 1 layer to copy from # order: # gamma, beta, moving mean, moving variance gamma, beta, running_mean, running_var = layer.get_weights() op1 = self.running_mean.assign(running_mean) op2 = self.running_var.assign(running_var) op3 = self.gamma.assign(gamma) op4 = self.beta.assign(beta) self.session.run((op1, op2, op3, op4)) def get_params(self): return [self.running_mean, self.running_var, self.gamma, self.beta]
BatchNormLayer
python
ipython__ipython
tests/test_pretty.py
{ "start": 5918, "end": 14450 }
class ____(type): def __new__(cls, name): return type.__new__(cls, name, (object,), {"name": name}) def __repr__(self): return "[CUSTOM REPR FOR CLASS %s]" % self.name ClassWithMeta = MetaClass("ClassWithMeta") def test_metaclass_repr(): output = pretty.pretty(ClassWithMeta) assert output == "[CUSTOM REPR FOR CLASS ClassWithMeta]" def test_unicode_repr(): u = "üniçodé" ustr = u class C(object): def __repr__(self): return ustr c = C() p = pretty.pretty(c) assert p == u p = pretty.pretty([c]) assert p == "[%s]" % u def test_basic_class(): def type_pprint_wrapper(obj, p, cycle): if obj is MyObj: type_pprint_wrapper.called = True return pretty._type_pprint(obj, p, cycle) type_pprint_wrapper.called = False stream = StringIO() printer = pretty.RepresentationPrinter(stream) printer.type_pprinters[type] = type_pprint_wrapper printer.pretty(MyObj) printer.flush() output = stream.getvalue() assert output == "%s.MyObj" % __name__ assert type_pprint_wrapper.called is True def test_collections_userlist(): # Create userlist with cycle a = UserList() a.append(a) cases = [ (UserList(), "UserList([])"), ( UserList(i for i in range(1000, 1020)), "UserList([1000,\n" " 1001,\n" " 1002,\n" " 1003,\n" " 1004,\n" " 1005,\n" " 1006,\n" " 1007,\n" " 1008,\n" " 1009,\n" " 1010,\n" " 1011,\n" " 1012,\n" " 1013,\n" " 1014,\n" " 1015,\n" " 1016,\n" " 1017,\n" " 1018,\n" " 1019])", ), (a, "UserList([UserList(...)])"), ] for obj, expected in cases: assert pretty.pretty(obj) == expected # TODO : pytest.mark.parametrise once nose is gone. def test_collections_defaultdict(): # Create defaultdicts with cycles a = defaultdict() a.default_factory = a b = defaultdict(list) b["key"] = b # Dictionary order cannot be relied on, test against single keys. cases = [ (defaultdict(list), "defaultdict(list, {})"), ( defaultdict(list, {"key": "-" * 50}), "defaultdict(list,\n" " {'key': '--------------------------------------------------'})", ), (a, "defaultdict(defaultdict(...), {})"), (b, "defaultdict(list, {'key': defaultdict(...)})"), ] for obj, expected in cases: assert pretty.pretty(obj) == expected # TODO : pytest.mark.parametrise once nose is gone. def test_collections_ordereddict(): # Create OrderedDict with cycle a = OrderedDict() a["key"] = a cases = [ (OrderedDict(), "OrderedDict()"), ( OrderedDict((i, i) for i in range(1000, 1010)), "OrderedDict([(1000, 1000),\n" " (1001, 1001),\n" " (1002, 1002),\n" " (1003, 1003),\n" " (1004, 1004),\n" " (1005, 1005),\n" " (1006, 1006),\n" " (1007, 1007),\n" " (1008, 1008),\n" " (1009, 1009)])", ), (a, "OrderedDict([('key', OrderedDict(...))])"), ] for obj, expected in cases: assert pretty.pretty(obj) == expected # TODO : pytest.mark.parametrise once nose is gone. def test_collections_deque(): # Create deque with cycle a = deque() a.append(a) cases = [ (deque(), "deque([])"), ( deque(i for i in range(1000, 1020)), "deque([1000,\n" " 1001,\n" " 1002,\n" " 1003,\n" " 1004,\n" " 1005,\n" " 1006,\n" " 1007,\n" " 1008,\n" " 1009,\n" " 1010,\n" " 1011,\n" " 1012,\n" " 1013,\n" " 1014,\n" " 1015,\n" " 1016,\n" " 1017,\n" " 1018,\n" " 1019])", ), (a, "deque([deque(...)])"), ] for obj, expected in cases: assert pretty.pretty(obj) == expected # TODO : pytest.mark.parametrise once nose is gone. def test_collections_counter(): class MyCounter(Counter): pass cases = [ (Counter(), "Counter()"), (Counter(a=1), "Counter({'a': 1})"), (MyCounter(a=1), "MyCounter({'a': 1})"), (Counter(a=1, c=22), "Counter({'c': 22, 'a': 1})"), ] for obj, expected in cases: assert pretty.pretty(obj) == expected # TODO : pytest.mark.parametrise once nose is gone. def test_mappingproxy(): MP = types.MappingProxyType underlying_dict = {} mp_recursive = MP(underlying_dict) underlying_dict[2] = mp_recursive underlying_dict[3] = underlying_dict cases = [ (MP({}), "mappingproxy({})"), (MP({None: MP({})}), "mappingproxy({None: mappingproxy({})})"), ( MP({k: k.upper() for k in string.ascii_lowercase}), "mappingproxy({'a': 'A',\n" " 'b': 'B',\n" " 'c': 'C',\n" " 'd': 'D',\n" " 'e': 'E',\n" " 'f': 'F',\n" " 'g': 'G',\n" " 'h': 'H',\n" " 'i': 'I',\n" " 'j': 'J',\n" " 'k': 'K',\n" " 'l': 'L',\n" " 'm': 'M',\n" " 'n': 'N',\n" " 'o': 'O',\n" " 'p': 'P',\n" " 'q': 'Q',\n" " 'r': 'R',\n" " 's': 'S',\n" " 't': 'T',\n" " 'u': 'U',\n" " 'v': 'V',\n" " 'w': 'W',\n" " 'x': 'X',\n" " 'y': 'Y',\n" " 'z': 'Z'})", ), (mp_recursive, "mappingproxy({2: {...}, 3: {2: {...}, 3: {...}}})"), (underlying_dict, "{2: mappingproxy({2: {...}, 3: {...}}), 3: {...}}"), ] for obj, expected in cases: assert pretty.pretty(obj) == expected # TODO : pytest.mark.parametrise once nose is gone. def test_simplenamespace(): SN = types.SimpleNamespace sn_recursive = SN() sn_recursive.first = sn_recursive sn_recursive.second = sn_recursive cases = [ (SN(), "namespace()"), (SN(x=SN()), "namespace(x=namespace())"), ( SN(a_long_name=[SN(s=string.ascii_lowercase)] * 3, a_short_name=None), "namespace(a_long_name=[namespace(s='abcdefghijklmnopqrstuvwxyz'),\n" " namespace(s='abcdefghijklmnopqrstuvwxyz'),\n" " namespace(s='abcdefghijklmnopqrstuvwxyz')],\n" " a_short_name=None)", ), (sn_recursive, "namespace(first=namespace(...), second=namespace(...))"), ] for obj, expected in cases: assert pretty.pretty(obj) == expected def test_pretty_environ(): dict_repr = pretty.pretty(dict(os.environ)) # reindent to align with 'environ' prefix dict_indented = dict_repr.replace("\n", "\n" + (" " * len("environ"))) env_repr = pretty.pretty(os.environ) assert env_repr == "environ" + dict_indented def test_function_pretty(): "Test pretty print of function" # posixpath is a pure python module, its interface is consistent # across Python distributions import posixpath assert pretty.pretty(posixpath.join) == "<function posixpath.join(a, *p)>" # custom function def meaning_of_life(question=None): if question: return 42 return "Don't panic" assert "meaning_of_life(question=None)" in pretty.pretty(meaning_of_life)
MetaClass
python
wandb__wandb
wandb/vendor/pygments/lexers/parsers.py
{ "start": 19846, "end": 20394 }
class ____(DelegatingLexer): """ `ANTLR`_ with Objective-C Target .. versionadded:: 1.1 """ name = 'ANTLR With ObjectiveC Target' aliases = ['antlr-objc'] filenames = ['*.G', '*.g'] def __init__(self, **options): super(AntlrObjectiveCLexer, self).__init__(ObjectiveCLexer, AntlrLexer, **options) def analyse_text(text): return AntlrLexer.analyse_text(text) and \ re.search(r'^\s*language\s*=\s*ObjC\s*;', text)
AntlrObjectiveCLexer
python
gevent__gevent
src/greentest/3.10/test_ssl.py
{ "start": 97823, "end": 99860 }
class ____(unittest.TestCase): def test_timeout_connect_ex(self): # Issue #12065: on a timeout, connect_ex() should return the original # errno (mimicking the behaviour of non-SSL sockets). with socket_helper.transient_internet(REMOTE_HOST): s = test_wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, do_handshake_on_connect=False) self.addCleanup(s.close) s.settimeout(0.0000001) rc = s.connect_ex((REMOTE_HOST, 443)) if rc == 0: self.skipTest("REMOTE_HOST responded too quickly") elif rc == errno.ENETUNREACH: self.skipTest("Network unreachable.") self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK)) @unittest.skipUnless(socket_helper.IPV6_ENABLED, 'Needs IPv6') def test_get_server_certificate_ipv6(self): with socket_helper.transient_internet('ipv6.google.com'): _test_get_server_certificate(self, 'ipv6.google.com', 443) _test_get_server_certificate_fail(self, 'ipv6.google.com', 443) def _test_get_server_certificate(test, host, port, cert=None): pem = ssl.get_server_certificate((host, port)) if not pem: test.fail("No server certificate on %s:%s!" % (host, port)) pem = ssl.get_server_certificate((host, port), ca_certs=cert) if not pem: test.fail("No server certificate on %s:%s!" % (host, port)) if support.verbose: sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem)) def _test_get_server_certificate_fail(test, host, port): try: pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE) except ssl.SSLError as x: #should fail if support.verbose: sys.stdout.write("%s\n" % x) else: test.fail("Got server certificate %s for %s:%s!" % (pem, host, port)) from test.ssl_servers import make_https_server
NetworkedTests
python
spyder-ide__spyder
spyder/widgets/dock.py
{ "start": 5549, "end": 7764 }
class ____(QWidget): """ Custom title bar for our dock widgets. Inspired from https://stackoverflow.com/a/40894225/438386 """ def __init__(self, parent): super().__init__(parent) button_size = QSize(20, 20) drag_button = DragButton(self, button_size) left_spacer = QWidget(self) left_spacer.setToolTip(drag_button.toolTip()) left_spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum) right_spacer = QWidget(self) right_spacer.setToolTip(drag_button.toolTip()) right_spacer.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Minimum) close_button = CloseButton(self, button_size) close_button.clicked.connect(parent.remove_title_bar) hlayout = QHBoxLayout(self) hlayout.setSpacing(0) hlayout.setContentsMargins(0, 0, 0, 0) hlayout.addWidget(left_spacer) hlayout.addWidget(drag_button) hlayout.addWidget(right_spacer) hlayout.addWidget(close_button) self._apply_stylesheet(SpyderPalette.COLOR_BACKGROUND_3) def mouseReleaseEvent(self, event): self.setCursor(Qt.OpenHandCursor) self._apply_stylesheet(SpyderPalette.COLOR_BACKGROUND_5) QWidget.mouseReleaseEvent(self, event) def mousePressEvent(self, event): self.setCursor(Qt.ClosedHandCursor) self._apply_stylesheet(SpyderPalette.COLOR_BACKGROUND_6) QWidget.mousePressEvent(self, event) def enterEvent(self, event): # To signal that dock widgets can be dragged from here self.setCursor(Qt.OpenHandCursor) self._apply_stylesheet(SpyderPalette.COLOR_BACKGROUND_5) super().enterEvent(event) def leaveEvent(self, event): """Remove customizations when leaving widget.""" self.unsetCursor() self._apply_stylesheet(SpyderPalette.COLOR_BACKGROUND_3) super().leaveEvent(event) def _apply_stylesheet(self, bgcolor): css = qstylizer.style.StyleSheet() css.QWidget.setValues( height=PanesToolbarStyleSheet.BUTTON_HEIGHT, backgroundColor=bgcolor ) self.setStyleSheet(css.toString())
DockTitleBar
python
matplotlib__matplotlib
lib/matplotlib/widgets.py
{ "start": 68077, "end": 71066 }
class ____(AxesWidget): """ A crosshair cursor that spans the Axes and moves with mouse cursor. For the cursor to remain responsive you must keep a reference to it. Parameters ---------- ax : `~matplotlib.axes.Axes` The `~.axes.Axes` to attach the cursor to. horizOn : bool, default: True Whether to draw the horizontal line. vertOn : bool, default: True Whether to draw the vertical line. useblit : bool, default: False Use blitting for faster drawing if supported by the backend. See the tutorial :ref:`blitting` for details. Other Parameters ---------------- **lineprops `.Line2D` properties that control the appearance of the lines. See also `~.Axes.axhline`. Examples -------- See :doc:`/gallery/widgets/cursor`. """ def __init__(self, ax, *, horizOn=True, vertOn=True, useblit=False, **lineprops): super().__init__(ax) self.connect_event('motion_notify_event', self.onmove) self.connect_event('draw_event', self.clear) self.visible = True self.horizOn = horizOn self.vertOn = vertOn self.useblit = useblit and self.canvas.supports_blit if self.useblit: lineprops['animated'] = True self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops) self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops) self.background = None self.needclear = False def clear(self, event): """Internal event handler to clear the cursor.""" if self.ignore(event) or self.canvas.is_saving(): return if self.useblit: self.background = self.canvas.copy_from_bbox(self.ax.bbox) def onmove(self, event): """Internal event handler to draw the cursor when the mouse moves.""" if self.ignore(event): return if not self.canvas.widgetlock.available(self): return if not self.ax.contains(event)[0]: self.linev.set_visible(False) self.lineh.set_visible(False) if self.needclear: self.canvas.draw() self.needclear = False return self.needclear = True xdata, ydata = self._get_data_coords(event) self.linev.set_xdata((xdata, xdata)) self.linev.set_visible(self.visible and self.vertOn) self.lineh.set_ydata((ydata, ydata)) self.lineh.set_visible(self.visible and self.horizOn) if not (self.visible and (self.vertOn or self.horizOn)): return # Redraw. if self.useblit: if self.background is not None: self.canvas.restore_region(self.background) self.ax.draw_artist(self.linev) self.ax.draw_artist(self.lineh) self.canvas.blit(self.ax.bbox) else: self.canvas.draw_idle()
Cursor
python
pytorch__pytorch
torch/testing/_internal/common_pruning.py
{ "start": 2214, "end": 2800 }
class ____(nn.Module): r"""Model with only Linear layers, alternating layers with biases, wrapped in a Sequential. Used to test pruned Linear-Bias-Linear fusion.""" def __init__(self) -> None: super().__init__() self.seq = nn.Sequential( nn.Linear(7, 5, bias=True), nn.Linear(5, 6, bias=False), nn.Linear(6, 3, bias=True), nn.Linear(3, 3, bias=True), nn.Linear(3, 10, bias=False), ) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.seq(x) return x
LinearBias
python
huggingface__transformers
src/transformers/models/colqwen2/modeling_colqwen2.py
{ "start": 1598, "end": 2812 }
class ____(PreTrainedModel): config: ColQwen2Config base_model_prefix = "model" input_modalities = ("image", "text") _no_split_modules = [] _supports_sdpa = True _supports_flash_attn = True _supports_flex_attn = True @torch.no_grad() def _init_weights(self, module): std = ( self.config.initializer_range if hasattr(self.config, "initializer_range") else self.config.vlm_config.text_config.initializer_range ) if isinstance(module, (nn.Linear, nn.Conv2d)): init.normal_(module.weight, mean=0.0, std=std) if module.bias is not None: init.zeros_(module.bias) elif isinstance(module, nn.Embedding): init.normal_(module.weight, mean=0.0, std=std) # Here we need the check explicitly, as we slice the weight in the `zeros_` call, so it looses the flag if module.padding_idx is not None and not getattr(module.weight, "_is_hf_initialized", False): init.zeros_(module.weight[module.padding_idx]) @dataclass @auto_docstring( custom_intro=""" Base class for ColQwen2 embeddings output. """ )
ColQwen2PreTrainedModel
python
great-expectations__great_expectations
great_expectations/profile/base.py
{ "start": 3628, "end": 4883 }
class ____(metaclass=abc.ABCMeta): """ Profiler creates suites from various sources of truth. These sources of truth can be data or non-data sources such as DDLs. When implementing a Profiler ensure that you: - Implement a . _profile() method - Optionally implement .validate() method that verifies you are running on the right kind of object. You should raise an appropriate Exception if the object is not valid. """ def __init__(self, configuration: Optional[dict] = None) -> None: self.configuration = configuration def validate( # noqa: B027 # empty-method-without-abstract-decorator self, item_to_validate: Any ) -> None: """Raise an exception if `item_to_validate` cannot be profiled. Args: item_to_validate: The item to validate. """ pass def profile(self, item_to_profile: Any, suite_name: Optional[str] = None) -> ExpectationSuite: self.validate(item_to_profile) expectation_suite = self._profile(item_to_profile, suite_name=suite_name) return expectation_suite @abc.abstractmethod def _profile(self, item_to_profile: Any, suite_name: Optional[str] = None) -> ExpectationSuite: pass
Profiler
python
kamyu104__LeetCode-Solutions
Python/populating-next-right-pointers-in-each-node.py
{ "start": 761, "end": 1122 }
class ____(object): # @param root, a tree node # @return nothing def connect(self, root): if root is None: return if root.left: root.left.next = root.right if root.right and root.next: root.right.next = root.next.left self.connect(root.left) self.connect(root.right)
Solution2
python
gawel__pyquery
tests/test_pyquery.py
{ "start": 31243, "end": 31485 }
class ____(TestCase): def test_get(self): d = pq(url='http://ru.wikipedia.org/wiki/Заглавная_страница', method='get') print(d) self.assertEqual(d('#pt-login').text(), 'Войти')
TestWebScrappingEncoding
python
huggingface__transformers
src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
{ "start": 18340, "end": 21259 }
class ____(GradientCheckpointingLayer): def __init__(self, config, layer_idx=None): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = RobertaPreLayerNormAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = RobertaPreLayerNormAttention( config, is_causal=False, layer_idx=layer_idx, is_cross_attention=True, ) self.intermediate = RobertaPreLayerNormIntermediate(config) self.output = RobertaPreLayerNormOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, cache_position: Optional[torch.Tensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple[torch.Tensor]: self_attention_output, _ = self.attention( hidden_states, attention_mask, past_key_values=past_key_values, cache_position=cache_position, **kwargs, ) attention_output = self_attention_output if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) cross_attention_output, _ = self.crossattention( self_attention_output, None, # attention_mask encoder_hidden_states, encoder_attention_mask, past_key_values=past_key_values, **kwargs, ) attention_output = cross_attention_output layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) return layer_output def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->RobertaPreLayerNorm
RobertaPreLayerNormLayer
python
sphinx-doc__sphinx
tests/roots/test-ext-autosummary-recursive/package/module.py
{ "start": 33, "end": 161 }
class ____: def __init__(self): pass def bar(self): pass @property def baz(self): pass
Foo
python
run-llama__llama_index
llama-index-core/llama_index/core/evaluation/retrieval/evaluator.py
{ "start": 1834, "end": 3717 }
class ____(BaseRetrievalEvaluator): """ Retriever evaluator. This module will evaluate a retriever using a set of metrics. Args: metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate retriever: Retriever to evaluate. node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval. """ retriever: BaseRetriever = Field(..., description="Retriever to evaluate") node_postprocessors: Optional[List[SerializeAsAny[BaseNodePostprocessor]]] = Field( default=None, description="Optional post-processor" ) async def _aget_retrieved_ids_and_texts( self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT ) -> Tuple[List[str], List[str]]: """Get retrieved ids.""" retrieved_nodes = await self.retriever.aretrieve(query) image_nodes: List[ImageNode] = [] text_nodes: List[TextNode] = [] if self.node_postprocessors: for node_postprocessor in self.node_postprocessors: retrieved_nodes = node_postprocessor.postprocess_nodes( retrieved_nodes, query_str=query ) for scored_node in retrieved_nodes: node = scored_node.node if isinstance(node, ImageNode): image_nodes.append(node) if isinstance(node, TextNode): text_nodes.append(node) if mode == "text": return ( [node.node_id for node in text_nodes], [node.text for node in text_nodes], ) elif mode == "image": return ( [node.node_id for node in image_nodes], [node.text for node in image_nodes], ) else: raise ValueError("Unsupported mode.")
MultiModalRetrieverEvaluator
python
getsentry__sentry
src/sentry/auth/providers/github/client.py
{ "start": 391, "end": 2025 }
class ____: def __init__(self, access_token: str) -> None: self.http = http.build_session() self.access_token = access_token def __enter__(self) -> GitHubClient: return self def __exit__( self, exc_type: type | None, exc_value: Exception | None, traceback: TracebackType | None ) -> None: self.http.close() def _request(self, path: str) -> dict[str, Any] | list[dict[str, Any]]: headers = {"Authorization": f"token {self.access_token}"} try: req = self.http.get( f"https://{API_DOMAIN}/{path.lstrip('/')}", headers=headers, ) except RequestException as e: raise GitHubApiError(f"{e}", status=getattr(e, "status_code", 0)) if req.status_code < 200 or req.status_code >= 300: raise GitHubApiError(req.content, status=req.status_code) return orjson.loads(req.content) def get_org_list(self) -> list[dict[str, Any]]: res = self._request("/user/orgs") if not isinstance(res, list): return [res] return res def get_user(self) -> dict[str, Any] | list[dict[str, Any]]: return self._request("/user") def get_user_emails(self) -> list[dict[str, Any]]: res = self._request("/user/emails") if not isinstance(res, list): return [res] return res def is_org_member(self, org_id: int) -> bool: org_id_str = str(org_id) for o in self.get_org_list(): if str(o["id"]) == org_id_str: return True return False
GitHubClient
python
django__django
tests/auth_tests/test_mixins.py
{ "start": 525, "end": 618 }
class ____(UserPassesTestMixin): def test_func(self): return False
AlwaysFalseMixin
python
ray-project__ray
python/ray/util/client/runtime_context.py
{ "start": 173, "end": 1886 }
class ____: """Emulates the properties of the ray._private.worker object for the client""" def __init__(self, worker): assert worker is not None self.worker = worker def build_runtime_context(self) -> "RuntimeContext": """Creates a RuntimeContext backed by the properites of this API""" # Defer the import of RuntimeContext until needed to avoid cycles from ray.runtime_context import RuntimeContext return RuntimeContext(self) def _fetch_runtime_context(self): import ray.core.generated.ray_client_pb2 as ray_client_pb2 return self.worker.get_cluster_info( ray_client_pb2.ClusterInfoType.RUNTIME_CONTEXT ) @property def mode(self): from ray._private.worker import SCRIPT_MODE return SCRIPT_MODE @property def current_job_id(self) -> "JobID": from ray import JobID return JobID(self._fetch_runtime_context().job_id) @property def current_node_id(self) -> "NodeID": from ray import NodeID return NodeID(self._fetch_runtime_context().node_id) @property def namespace(self) -> str: return self._fetch_runtime_context().namespace @property def should_capture_child_tasks_in_placement_group(self) -> bool: return self._fetch_runtime_context().capture_client_tasks @property def runtime_env(self) -> str: return self._fetch_runtime_context().runtime_env def check_connected(self) -> bool: return self.worker.ping_server() @property def gcs_client(self) -> str: return SimpleNamespace(address=self._fetch_runtime_context().gcs_address)
_ClientWorkerPropertyAPI
python
sqlalchemy__sqlalchemy
test/sql/test_selectable.py
{ "start": 58630, "end": 64270 }
class ____(fixtures.TestBase): def test_join_uninit(self): a = table("a", column("x")) b = table("b", column("y")) j = a.join(b, a.c.x == b.c.y) q = column("q") b.append_column(q) j._refresh_for_new_column(q) assert j.c.b_q is q def test_join_init(self): a = table("a", column("x")) b = table("b", column("y")) j = a.join(b, a.c.x == b.c.y) j.c q = column("q") b.append_column(q) j._refresh_for_new_column(q) assert j.c.b_q is q def test_join_samename_init(self): a = table("a", column("x")) b = table("b", column("y")) j = a.join(b, a.c.x == b.c.y) j.c q = column("x") b.append_column(q) j._refresh_for_new_column(q) assert j.c.b_x is q def test_select_samename_init(self): a = table("a", column("x")) b = table("b", column("y")) s = select(a, b).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) s.selected_columns q = column("x") b.append_column(q) s._refresh_for_new_column(q) assert q in s.selected_columns.b_x.proxy_set def test_alias_alias_samename_init(self): a = table("a", column("x")) b = table("b", column("y")) s1 = ( select(a, b) .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) .alias() ) s2 = s1.alias() s1.c s2.c q = column("x") b.append_column(q) assert "_columns" in s2.__dict__ s2._refresh_for_new_column(q) assert "_columns" not in s2.__dict__ is_(s1.corresponding_column(s2.c.b_x), s1.c.b_x) def test_aliased_select_samename_uninit(self): a = table("a", column("x")) b = table("b", column("y")) s = ( select(a, b) .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) .alias() ) q = column("x") b.append_column(q) s._refresh_for_new_column(q) assert q in s.c.b_x.proxy_set def test_aliased_select_samename_init(self): a = table("a", column("x")) b = table("b", column("y")) s = ( select(a, b) .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) .alias() ) s.c q = column("x") b.append_column(q) s._refresh_for_new_column(q) assert q in s.c.b_x.proxy_set def test_aliased_select_irrelevant(self): a = table("a", column("x")) b = table("b", column("y")) c = table("c", column("z")) s = ( select(a, b) .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) .alias() ) s.c q = column("x") c.append_column(q) s._refresh_for_new_column(q) assert "c_x" not in s.c def test_aliased_select_no_cols_clause(self): a = table("a", column("x")) s = ( select(a.c.x) .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) .alias() ) s.c q = column("q") a.append_column(q) s._refresh_for_new_column(q) assert "a_q" not in s.c def test_union_uninit(self): a = table("a", column("x")) s1 = select(a) s2 = select(a) s3 = s1.union(s2) q = column("q") a.append_column(q) s3._refresh_for_new_column(q) assert a.c.q in s3.selected_columns.q.proxy_set def test_union_init(self): a = table("a", column("x")) s1 = select(a) s2 = select(a) s3 = s1.union(s2) s3.selected_columns q = column("q") a.append_column(q) s3._refresh_for_new_column(q) assert a.c.q in s3.selected_columns.q.proxy_set def test_nested_join_uninit(self): a = table("a", column("x")) b = table("b", column("y")) c = table("c", column("z")) j = a.join(b, a.c.x == b.c.y).join(c, b.c.y == c.c.z) q = column("q") b.append_column(q) j._refresh_for_new_column(q) assert j.c.b_q is q def test_nested_join_init(self): a = table("a", column("x")) b = table("b", column("y")) c = table("c", column("z")) j = a.join(b, a.c.x == b.c.y).join(c, b.c.y == c.c.z) j.c q = column("q") b.append_column(q) j._refresh_for_new_column(q) assert j.c.b_q is q def test_fk_table(self): m = MetaData() fk = ForeignKey("x.id") Table("x", m, Column("id", Integer)) a = Table("a", m, Column("x", Integer, fk)) a.c q = Column("q", Integer) a.append_column(q) a._refresh_for_new_column(q) eq_(a.foreign_keys, {fk}) fk2 = ForeignKey("g.id") p = Column("p", Integer, fk2) a.append_column(p) a._refresh_for_new_column(p) eq_(a.foreign_keys, {fk, fk2}) def test_fk_join(self): m = MetaData() fk = ForeignKey("x.id") Table("x", m, Column("id", Integer)) a = Table("a", m, Column("x", Integer, fk)) b = Table("b", m, Column("y", Integer)) j = a.join(b, a.c.x == b.c.y) j.c q = Column("q", Integer) b.append_column(q) j._refresh_for_new_column(q) eq_(j.foreign_keys, {fk}) fk2 = ForeignKey("g.id") p = Column("p", Integer, fk2) b.append_column(p) j._refresh_for_new_column(p) eq_(j.foreign_keys, {fk, fk2})
RefreshForNewColTest
python
openai__openai-python
src/openai/types/beta/threads/run.py
{ "start": 1640, "end": 2218 }
class ____(BaseModel): type: Literal["auto", "last_messages"] """The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. """ last_messages: Optional[int] = None """ The number of most recent messages from the thread when constructing the context for the run. """
TruncationStrategy
python
pandas-dev__pandas
pandas/core/methods/describe.py
{ "start": 2422, "end": 3010 }
class ____(ABC): """Abstract class for describing dataframe or series. Parameters ---------- obj : Series or DataFrame Object to be described. """ def __init__(self, obj: DataFrame | Series) -> None: self.obj = obj @abstractmethod def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame | Series: """Do describe either series or dataframe. Parameters ---------- percentiles : list-like of numbers The percentiles to include in the output. """
NDFrameDescriberAbstract
python
dask__distributed
distributed/dashboard/components/shared.py
{ "start": 3770, "end": 4922 }
class ____(DashboardComponent): """Time plots of the current resource usage on the cluster This is two plots, one for CPU and Memory and another for Network I/O """ def __init__(self, **kwargs): state = profile.create() data = profile.plot_data(state, profile_interval) self.states = data.pop("states") self.root, self.source = profile.plot_figure(data, **kwargs) @without_property_validation @log_errors def cb(attr, old, new): try: ind = new.indices[0] except IndexError: return data = profile.plot_data(self.states[ind], profile_interval) del self.states[:] self.states.extend(data.pop("states")) update(self.source, data) self.source.selected = old self.source.selected.on_change("indices", cb) @without_property_validation @log_errors def update(self, state): self.state = state data = profile.plot_data(self.state, profile_interval) self.states = data.pop("states") update(self.source, data)
ProfilePlot
python
simonw__datasette
datasette/facets.py
{ "start": 5217, "end": 11294 }
class ____(Facet): type = "column" async def suggest(self): row_count = await self.get_row_count() columns = await self.get_columns(self.sql, self.params) facet_size = self.get_facet_size() suggested_facets = [] already_enabled = [c["config"]["simple"] for c in self.get_configs()] for column in columns: if column in already_enabled: continue suggested_facet_sql = """ with limited as (select * from ({sql}) limit {suggest_consider}) select {column} as value, count(*) as n from limited where value is not null group by value limit {limit} """.format( column=escape_sqlite(column), sql=self.sql, limit=facet_size + 1, suggest_consider=self.suggest_consider, ) distinct_values = None try: distinct_values = await self.ds.execute( self.database, suggested_facet_sql, self.params, truncate=False, custom_time_limit=self.ds.setting("facet_suggest_time_limit_ms"), ) num_distinct_values = len(distinct_values) if ( 1 < num_distinct_values < row_count and num_distinct_values <= facet_size # And at least one has n > 1 and any(r["n"] > 1 for r in distinct_values) ): suggested_facets.append( { "name": column, "toggle_url": self.ds.absolute_url( self.request, self.ds.urls.path( path_with_added_args( self.request, {"_facet": column} ) ), ), } ) except QueryInterrupted: continue return suggested_facets async def get_row_count(self): if self.row_count is None: self.row_count = ( await self.ds.execute( self.database, f"select count(*) from (select * from ({self.sql}) limit {self.suggest_consider})", self.params, ) ).rows[0][0] return self.row_count async def facet_results(self): facet_results = [] facets_timed_out = [] qs_pairs = self.get_querystring_pairs() facet_size = self.get_facet_size() for source_and_config in self.get_configs(): config = source_and_config["config"] source = source_and_config["source"] column = config.get("column") or config["simple"] facet_sql = """ select {col} as value, count(*) as count from ( {sql} ) where {col} is not null group by {col} order by count desc, value limit {limit} """.format( col=escape_sqlite(column), sql=self.sql, limit=facet_size + 1 ) try: facet_rows_results = await self.ds.execute( self.database, facet_sql, self.params, truncate=False, custom_time_limit=self.ds.setting("facet_time_limit_ms"), ) facet_results_values = [] facet_results.append( { "name": column, "type": self.type, "hideable": source != "metadata", "toggle_url": self.ds.urls.path( path_with_removed_args(self.request, {"_facet": column}) ), "results": facet_results_values, "truncated": len(facet_rows_results) > facet_size, } ) facet_rows = facet_rows_results.rows[:facet_size] if self.table: # Attempt to expand foreign keys into labels values = [row["value"] for row in facet_rows] expanded = await self.ds.expand_foreign_keys( self.request.actor, self.database, self.table, column, values ) else: expanded = {} for row in facet_rows: column_qs = column if column.startswith("_"): column_qs = "{}__exact".format(column) selected = (column_qs, str(row["value"])) in qs_pairs if selected: toggle_path = path_with_removed_args( self.request, {column_qs: str(row["value"])} ) else: toggle_path = path_with_added_args( self.request, {column_qs: row["value"]} ) facet_results_values.append( { "value": row["value"], "label": expanded.get((column, row["value"]), row["value"]), "count": row["count"], "toggle_url": self.ds.absolute_url( self.request, self.ds.urls.path(toggle_path) ), "selected": selected, } ) except QueryInterrupted: facets_timed_out.append(column) return facet_results, facets_timed_out
ColumnFacet
python
django__django
django/db/backends/sqlite3/features.py
{ "start": 259, "end": 6886 }
class ____(BaseDatabaseFeatures): minimum_database_version = (3, 37) test_db_allows_multiple_connections = False supports_unspecified_pk = True supports_timezones = False supports_transactions = True atomic_transactions = False can_rollback_ddl = True can_create_inline_fk = False requires_literal_defaults = True can_clone_databases = True supports_temporal_subtraction = True ignores_table_name_case = True supports_cast_with_precision = False time_cast_precision = 3 can_release_savepoints = True has_case_insensitive_like = True supports_parentheses_in_compound = False can_defer_constraint_checks = True supports_over_clause = True supports_frame_range_fixed_distance = True supports_frame_exclusion = True supports_aggregate_filter_clause = True supports_aggregate_order_by_clause = Database.sqlite_version_info >= (3, 44, 0) supports_aggregate_distinct_multiple_argument = False supports_any_value = True order_by_nulls_first = True supports_json_field_contains = False supports_update_conflicts = True supports_update_conflicts_with_target = True supports_stored_generated_columns = True supports_virtual_generated_columns = True test_collations = { "ci": "nocase", "cs": "binary", "non_default": "nocase", "virtual": "nocase", } django_test_expected_failures = { # The django_format_dtdelta() function doesn't properly handle mixed # Date/DateTime fields and timedeltas. "expressions.tests.FTimeDeltaTests.test_mixed_comparisons1", } insert_test_table_with_defaults = 'INSERT INTO {} ("null") VALUES (1)' supports_default_keyword_in_insert = False supports_unlimited_charfield = True supports_no_precision_decimalfield = True can_return_columns_from_insert = True can_return_rows_from_bulk_insert = True can_return_rows_from_update = True @cached_property def django_test_skips(self): skips = { "SQLite stores values rounded to 15 significant digits.": { "model_fields.test_decimalfield.DecimalFieldTests." "test_fetch_from_db_without_float_rounding", }, "SQLite naively remakes the table on field alteration.": { "schema.tests.SchemaTests.test_unique_no_unnecessary_fk_drops", "schema.tests.SchemaTests.test_unique_and_reverse_m2m", "schema.tests.SchemaTests." "test_alter_field_default_doesnt_perform_queries", "schema.tests.SchemaTests." "test_rename_column_renames_deferred_sql_references", }, "SQLite doesn't support negative precision for ROUND().": { "db_functions.math.test_round.RoundTests." "test_null_with_negative_precision", "db_functions.math.test_round.RoundTests." "test_decimal_with_negative_precision", "db_functions.math.test_round.RoundTests." "test_float_with_negative_precision", "db_functions.math.test_round.RoundTests." "test_integer_with_negative_precision", }, "The actual query cannot be determined on SQLite": { "backends.base.test_base.ExecuteWrapperTests.test_wrapper_debug", }, } if self.connection.is_in_memory_db(): skips.update( { "the sqlite backend's close() method is a no-op when using an " "in-memory database": { "servers.test_liveserverthread.LiveServerThreadTest." "test_closes_connections", "servers.tests.LiveServerTestCloseConnectionTest." "test_closes_connections", }, "For SQLite in-memory tests, closing the connection destroys " "the database.": { "test_utils.tests.AssertNumQueriesUponConnectionTests." "test_ignores_connection_configuration_queries", }, } ) else: skips.update( { "Only connections to in-memory SQLite databases are passed to the " "server thread.": { "servers.tests.LiveServerInMemoryDatabaseLockTest." "test_in_memory_database_lock", }, "multiprocessing's start method is checked only for in-memory " "SQLite databases": { "backends.sqlite.test_creation.TestDbSignatureTests." "test_get_test_db_clone_settings_not_supported", }, } ) if Database.sqlite_version_info < (3, 47): skips.update( { "SQLite does not parse escaped double quotes in the JSON path " "notation": { "model_fields.test_jsonfield.TestQuerying." "test_lookups_special_chars_double_quotes", }, } ) return skips @cached_property def introspected_field_types(self): return { **super().introspected_field_types, "BigAutoField": "AutoField", "DurationField": "BigIntegerField", "GenericIPAddressField": "CharField", "SmallAutoField": "AutoField", } @property def max_query_params(self): """ SQLite has a variable limit per query. The limit can be changed using the SQLITE_MAX_VARIABLE_NUMBER compile-time option (which defaults to 32766) or lowered per connection at run-time with setlimit(SQLITE_LIMIT_VARIABLE_NUMBER, N). """ return self.connection.connection.getlimit(sqlite3.SQLITE_LIMIT_VARIABLE_NUMBER) @cached_property def supports_json_field(self): with self.connection.cursor() as cursor: try: with transaction.atomic(self.connection.alias): cursor.execute('SELECT JSON(\'{"a": "b"}\')') except OperationalError: return False return True can_introspect_json_field = property(operator.attrgetter("supports_json_field")) has_json_object_function = property(operator.attrgetter("supports_json_field"))
DatabaseFeatures
python
pytorch__pytorch
benchmarks/transformer/score_mod.py
{ "start": 5677, "end": 5763 }
class ____: eager_time: float compiled_time: float @dataclass(frozen=True)
Times
python
PyCQA__pylint
tests/functional/s/slots_checks.py
{ "start": 3107, "end": 3321 }
class ____(BaseWithSlots): __slots__ = ("c",) # Is in base __slots__ a: int # Not in any base __slots__ d: int # [declare-non-slot] e: str= "AnnAssign.value is not None"
DerivedWithMoreSlots
python
getsentry__sentry
tests/sentry/hybridcloud/tasks/test_deliver_webhooks.py
{ "start": 1044, "end": 11012 }
class ____(TestCase): @patch("sentry.hybridcloud.tasks.deliver_webhooks.drain_mailbox") def test_schedule_no_records(self, mock_deliver: MagicMock) -> None: schedule_webhook_delivery() assert mock_deliver.delay.call_count == 0 @patch("sentry.hybridcloud.tasks.deliver_webhooks.drain_mailbox") def test_schedule_multiple_mailboxes(self, mock_deliver: MagicMock) -> None: webhook_one = self.create_webhook_payload( mailbox_name="github:123", region_name="us", ) webhook_two = self.create_webhook_payload( mailbox_name="github:256", region_name="us", ) assert webhook_one.schedule_for < timezone.now() assert webhook_two.schedule_for < timezone.now() schedule_webhook_delivery() assert mock_deliver.delay.call_count == 2 @patch("sentry.hybridcloud.tasks.deliver_webhooks.drain_mailbox") def test_schedule_one_mailbox_multiple_messages(self, mock_deliver: MagicMock) -> None: webhook_one = self.create_webhook_payload( mailbox_name="github:123", region_name="us", ) self.create_webhook_payload( mailbox_name="github:123", region_name="us", ) schedule_webhook_delivery() assert mock_deliver.delay.call_count == 1 mock_deliver.delay.assert_called_with(webhook_one.id) @patch("sentry.hybridcloud.tasks.deliver_webhooks.drain_mailbox") def test_schedule_mailbox_scheduled_later(self, mock_deliver: MagicMock) -> None: webhook_one = self.create_webhook_payload( mailbox_name="github:123", region_name="us", ) self.create_webhook_payload( mailbox_name="github:256", region_name="us", schedule_for=timezone.now() + timedelta(minutes=1), ) schedule_webhook_delivery() assert mock_deliver.delay.call_count == 1 mock_deliver.delay.assert_called_with(webhook_one.id) @patch("sentry.hybridcloud.tasks.deliver_webhooks.drain_mailbox") def test_schedule_updates_mailbox_attributes(self, mock_deliver: MagicMock) -> None: webhook_one = self.create_webhook_payload( mailbox_name="github:123", region_name="us", ) webhook_two = self.create_webhook_payload( mailbox_name="github:123", region_name="us", ) schedule_webhook_delivery() webhook_one.refresh_from_db() webhook_two.refresh_from_db() # Scheduler should move all messages forward assert webhook_one.attempts == 0 assert webhook_one.schedule_for > timezone.now() assert webhook_two.attempts == 0 assert webhook_two.schedule_for > timezone.now() assert mock_deliver.delay.call_count == 1 mock_deliver.delay.assert_called_with(webhook_one.id) @responses.activate @override_regions(region_config) def test_schedule_mailbox_with_more_than_batch_size_records(self) -> None: responses.add( responses.POST, "http://us.testserver/extensions/github/webhook/", body=ReadTimeout() ) num_records = 55 for _ in range(0, num_records): self.create_webhook_payload( mailbox_name="github:123", region_name="us", ) # Run the task that is spawned to provide some integration test coverage. with self.tasks(): schedule_webhook_delivery() # First attempt will fail rescheduling messages. assert len(responses.calls) == 1 assert WebhookPayload.objects.count() == num_records head = WebhookPayload.objects.all().order_by("id").first() assert head assert head.schedule_for > timezone.now() # Do another scheduled run. This should not make any forwarding requests with self.tasks(): schedule_webhook_delivery() assert len(responses.calls) == 1 # Head doesn't move. new_head = WebhookPayload.objects.all().order_by("id").first() assert new_head assert head.schedule_for == new_head.schedule_for # No messages delivered assert WebhookPayload.objects.count() == num_records @patch("sentry.hybridcloud.tasks.deliver_webhooks.drain_mailbox_parallel") def test_schedule_mailbox_parallel_task(self, mock_deliver: MagicMock) -> None: for _ in range(0, int(MAX_MAILBOX_DRAIN / 3 + 1)): self.create_webhook_payload( mailbox_name="github:123", region_name="us", ) schedule_webhook_delivery() assert mock_deliver.delay.call_count == 1 @patch("sentry.hybridcloud.tasks.deliver_webhooks.drain_mailbox") @patch( "sentry.hybridcloud.tasks.deliver_webhooks.PROVIDER_PRIORITY", {"stripe": 1, "github": 2, "slack": 3}, ) def test_schedule_prioritizes_by_provider(self, mock_deliver: MagicMock) -> None: """Test that webhooks are prioritized based on provider priority.""" # Create webhooks with different providers (intentionally in non-priority order) slack_webhook = self.create_webhook_payload( mailbox_name="slack:123", provider="slack", region_name="us", ) github_webhook = self.create_webhook_payload( mailbox_name="github:123", provider="github", region_name="us", ) stripe_webhook = self.create_webhook_payload( mailbox_name="stripe:123", provider="stripe", region_name="us", ) # Run the scheduler schedule_webhook_delivery() # Verify webhooks were processed in priority order (stripe first, then github, then slack) assert mock_deliver.delay.call_count == 3 # Check the order of calls call_args_list = [call[0][0] for call in mock_deliver.delay.call_args_list] # Stripe (priority 1) should be first assert call_args_list[0] == stripe_webhook.id # GitHub (priority 2) should be second assert call_args_list[1] == github_webhook.id # Slack (priority 3) should be last assert call_args_list[2] == slack_webhook.id @patch("sentry.hybridcloud.tasks.deliver_webhooks.drain_mailbox") @patch( "sentry.hybridcloud.tasks.deliver_webhooks.PROVIDER_PRIORITY", {"stripe": 1, "github": 2} ) @patch("sentry.hybridcloud.tasks.deliver_webhooks.DEFAULT_PROVIDER_PRIORITY", 10) def test_schedule_handles_unknown_providers(self, mock_deliver: MagicMock) -> None: """Test that webhooks with unknown providers use the default priority.""" # Create webhooks with known and unknown providers unknown_webhook = self.create_webhook_payload( mailbox_name="unknown:123", provider="unknown", region_name="us", ) stripe_webhook = self.create_webhook_payload( mailbox_name="stripe:123", provider="stripe", region_name="us", ) # Run the scheduler schedule_webhook_delivery() # Verify webhooks were processed in priority order (stripe first, then unknown) assert mock_deliver.delay.call_count == 2 # Check the order of calls call_args_list = [call[0][0] for call in mock_deliver.delay.call_args_list] # Stripe (priority 1) should be first assert call_args_list[0] == stripe_webhook.id # Unknown (default priority 10) should be last assert call_args_list[1] == unknown_webhook.id @patch("sentry.hybridcloud.tasks.deliver_webhooks.drain_mailbox") @patch( "sentry.hybridcloud.tasks.deliver_webhooks.PROVIDER_PRIORITY", {"stripe": 1, "github": 2} ) def test_schedule_handles_null_provider(self, mock_deliver: MagicMock) -> None: """Test that webhooks with null provider field use the default priority.""" # Create webhooks - one with a provider field, one with null provider # Create webhook with null provider null_provider_webhook = WebhookPayload.objects.create( mailbox_name="github:456", provider=None, region_name="us", request_method="POST", request_path="/webhook/", request_headers="{}", request_body="{}", ) # Create webhook with stripe provider stripe_webhook = self.create_webhook_payload( mailbox_name="stripe:123", provider="stripe", region_name="us", ) # Run the scheduler schedule_webhook_delivery() # Verify webhooks were processed in priority order (stripe first, then null provider) assert mock_deliver.delay.call_count == 2 # Check the order of calls call_args_list = [call[0][0] for call in mock_deliver.delay.call_args_list] # Stripe (priority 1) should be first assert call_args_list[0] == stripe_webhook.id # Null provider (default priority) should be last assert call_args_list[1] == null_provider_webhook.id def create_payloads(num: int, mailbox: str) -> list[WebhookPayload]: created = [] for _ in range(0, num): hook = Factories.create_webhook_payload( mailbox_name=mailbox, region_name="us", ) created.append(hook) return created def create_payloads_with_destination_type( num: int, mailbox: str, destination_type: DestinationType ) -> list[WebhookPayload]: created = [] for _ in range(0, num): hook = Factories.create_webhook_payload( mailbox_name=mailbox, region_name=None, destination_type=destination_type, ) created.append(hook) return created @control_silo_test
ScheduleWebhooksTest
python
mlflow__mlflow
mlflow/gateway/config.py
{ "start": 2248, "end": 2465 }
class ____(ConfigModel): ai21labs_api_key: str @field_validator("ai21labs_api_key", mode="before") def validate_ai21labs_api_key(cls, value): return _resolve_api_key_from_input(value)
AI21LabsConfig
python
dagster-io__dagster
python_modules/dagster/dagster/_daemon/types.py
{ "start": 298, "end": 1157 }
class ____(NamedTupleSerializer["DaemonHeartbeat"]): def before_unpack(self, context, unpacked_dict): # Previously daemon types were enums, now they are strings. If we find a packed enum, # just extract the name, which is the string we want. if isinstance(unpacked_dict.get("daemon_type"), UnknownSerdesValue): unknown = unpacked_dict["daemon_type"] unpacked_dict["daemon_type"] = unknown.value["__enum__"].split(".")[-1] # pyright: ignore[reportOptionalMemberAccess,reportAttributeAccessIssue] context.clear_ignored_unknown_values(unknown) if unpacked_dict.get("error"): unpacked_dict["errors"] = [unpacked_dict["error"]] del unpacked_dict["error"] return unpacked_dict @whitelist_for_serdes(serializer=DaemonHeartbeatSerializer)
DaemonHeartbeatSerializer
python
pandas-dev__pandas
pandas/tests/reshape/concat/test_append.py
{ "start": 257, "end": 12744 }
class ____: def test_append(self, sort, float_frame): mixed_frame = float_frame.copy() mixed_frame["foo"] = "bar" begin_index = float_frame.index[:5] end_index = float_frame.index[5:] begin_frame = float_frame.reindex(begin_index) end_frame = float_frame.reindex(end_index) appended = concat([begin_frame, end_frame]) tm.assert_almost_equal(appended["A"], float_frame["A"]) del end_frame["A"] partial_appended = concat([begin_frame, end_frame], sort=sort) assert "A" in partial_appended partial_appended = concat([end_frame, begin_frame], sort=sort) assert "A" in partial_appended # mixed type handling appended = concat([mixed_frame[:5], mixed_frame[5:]]) tm.assert_frame_equal(appended, mixed_frame) # what to test here mixed_appended = concat([mixed_frame[:5], float_frame[5:]], sort=sort) mixed_appended2 = concat([float_frame[:5], mixed_frame[5:]], sort=sort) # all equal except 'foo' column tm.assert_frame_equal( mixed_appended.reindex(columns=["A", "B", "C", "D"]), mixed_appended2.reindex(columns=["A", "B", "C", "D"]), ) def test_append_empty(self, float_frame): empty = DataFrame() appended = concat([float_frame, empty]) tm.assert_frame_equal(float_frame, appended) assert appended is not float_frame appended = concat([empty, float_frame]) tm.assert_frame_equal(float_frame, appended) assert appended is not float_frame def test_append_overlap_raises(self, float_frame): msg = "Indexes have overlapping values" with pytest.raises(ValueError, match=msg): concat([float_frame, float_frame], verify_integrity=True) def test_append_new_columns(self): # see gh-6129: new columns df = DataFrame({"a": {"x": 1, "y": 2}, "b": {"x": 3, "y": 4}}) row = Series([5, 6, 7], index=["a", "b", "c"], name="z") expected = DataFrame( { "a": {"x": 1, "y": 2, "z": 5}, "b": {"x": 3, "y": 4, "z": 6}, "c": {"z": 7}, } ) result = df._append_internal(row) tm.assert_frame_equal(result, expected) def test_append_length0_frame(self, sort): df = DataFrame(columns=["A", "B", "C"]) df3 = DataFrame(index=[0, 1], columns=["A", "B"]) df5 = concat([df, df3], sort=sort) expected = DataFrame(index=[0, 1], columns=["A", "B", "C"]) tm.assert_frame_equal(df5, expected) def test_append_records(self): arr1 = np.zeros((2,), dtype=("i4,f4,S10")) arr1[:] = [(1, 2.0, "Hello"), (2, 3.0, "World")] arr2 = np.zeros((3,), dtype=("i4,f4,S10")) arr2[:] = [(3, 4.0, "foo"), (5, 6.0, "bar"), (7.0, 8.0, "baz")] df1 = DataFrame(arr1) df2 = DataFrame(arr2) result = concat([df1, df2], ignore_index=True) expected = DataFrame(np.concatenate((arr1, arr2))) tm.assert_frame_equal(result, expected) # rewrite sort fixture, since we also want to test default of None def test_append_sorts(self, sort): df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"]) df2 = DataFrame({"a": [1, 2], "c": [3, 4]}, index=[2, 3]) result = concat([df1, df2], sort=sort) # for None / True expected = DataFrame( {"b": [1, 2, None, None], "a": [1, 2, 1, 2], "c": [None, None, 3, 4]}, columns=["a", "b", "c"], ) if sort is False: expected = expected[["b", "a", "c"]] tm.assert_frame_equal(result, expected) def test_append_different_columns(self, sort): df = DataFrame( { "bools": np.random.default_rng(2).standard_normal(10) > 0, "ints": np.random.default_rng(2).integers(0, 10, 10), "floats": np.random.default_rng(2).standard_normal(10), "strings": ["foo", "bar"] * 5, } ) a = df[:5].loc[:, ["bools", "ints", "floats"]] b = df[5:].loc[:, ["strings", "ints", "floats"]] appended = concat([a, b], sort=sort) assert isna(appended["strings"][0:4]).all() assert isna(appended["bools"][5:]).all() def test_append_preserve_index_name(self): # #980 df1 = DataFrame(columns=["A", "B", "C"]) df1 = df1.set_index(["A"]) df2 = DataFrame(data=[[1, 4, 7], [2, 5, 8], [3, 6, 9]], columns=["A", "B", "C"]) df2 = df2.set_index(["A"]) result = concat([df1, df2]) assert result.index.name == "A" indexes_can_append = [ pd.RangeIndex(3), Index([4, 5, 6]), Index([4.5, 5.5, 6.5]), Index(list("abc")), pd.CategoricalIndex("A B C".split()), pd.CategoricalIndex("D E F".split(), ordered=True), pd.IntervalIndex.from_breaks([7, 8, 9, 10]), pd.DatetimeIndex( [ dt.datetime(2013, 1, 3, 0, 0), dt.datetime(2013, 1, 3, 6, 10), dt.datetime(2013, 1, 3, 7, 12), ] ), pd.MultiIndex.from_arrays(["A B C".split(), "D E F".split()]), ] @pytest.mark.parametrize( "index", indexes_can_append, ids=lambda x: type(x).__name__ ) def test_append_same_columns_type(self, index): # GH18359 # df wider than ser df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=index) ser_index = index[:2] ser = Series([7, 8], index=ser_index, name=2) result = df._append_internal(ser) expected = DataFrame( [[1, 2, 3.0], [4, 5, 6], [7, 8, np.nan]], index=[0, 1, 2], columns=index ) # integer dtype is preserved for columns present in ser.index assert expected.dtypes.iloc[0].kind == "i" assert expected.dtypes.iloc[1].kind == "i" tm.assert_frame_equal(result, expected) # ser wider than df ser_index = index index = index[:2] df = DataFrame([[1, 2], [4, 5]], columns=index) ser = Series([7, 8, 9], index=ser_index, name=2) result = df._append_internal(ser) expected = DataFrame( [[1, 2, np.nan], [4, 5, np.nan], [7, 8, 9]], index=[0, 1, 2], columns=ser_index, ) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "df_columns, series_index", combinations(indexes_can_append, r=2), ids=lambda x: type(x).__name__, ) def test_append_different_columns_types(self, df_columns, series_index): # GH18359 # See also test 'test_append_different_columns_types_raises' below # for errors raised when appending df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=df_columns) ser = Series([7, 8, 9], index=series_index, name=2) result = df._append_internal(ser) idx_diff = ser.index.difference(df_columns) combined_columns = Index(df_columns.tolist()).append(idx_diff) expected = DataFrame( [ [1.0, 2.0, 3.0, np.nan, np.nan, np.nan], [4, 5, 6, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, 7, 8, 9], ], index=[0, 1, 2], columns=combined_columns, ) tm.assert_frame_equal(result, expected) def test_append_dtype_coerce(self, sort): # GH 4993 # appending with datetime will incorrectly convert datetime64 df1 = DataFrame( index=[1, 2], data=[dt.datetime(2013, 1, 1, 0, 0), dt.datetime(2013, 1, 2, 0, 0)], columns=["start_time"], ) df2 = DataFrame( index=[4, 5], data=[ [dt.datetime(2013, 1, 3, 0, 0), dt.datetime(2013, 1, 3, 6, 10)], [dt.datetime(2013, 1, 4, 0, 0), dt.datetime(2013, 1, 4, 7, 10)], ], columns=["start_time", "end_time"], ) expected = concat( [ Series( [ pd.NaT, pd.NaT, dt.datetime(2013, 1, 3, 6, 10), dt.datetime(2013, 1, 4, 7, 10), ], name="end_time", ), Series( [ dt.datetime(2013, 1, 1, 0, 0), dt.datetime(2013, 1, 2, 0, 0), dt.datetime(2013, 1, 3, 0, 0), dt.datetime(2013, 1, 4, 0, 0), ], name="start_time", ), ], axis=1, sort=sort, ) result = concat([df1, df2], ignore_index=True, sort=sort) if sort: expected = expected[["end_time", "start_time"]] else: expected = expected[["start_time", "end_time"]] tm.assert_frame_equal(result, expected) def test_append_missing_column_proper_upcast(self, sort): df1 = DataFrame({"A": np.array([1, 2, 3, 4], dtype="i8")}) df2 = DataFrame({"B": np.array([True, False, True, False], dtype=bool)}) appended = concat([df1, df2], sort=sort) assert appended["A"].dtype == "f8" assert appended["B"].dtype == "O" def test_append_empty_frame_to_series_with_dateutil_tz(self): # GH 23682 date = Timestamp("2018-10-24 07:30:00", tz=dateutil.tz.tzutc()) ser = Series({"a": 1.0, "b": 2.0, "date": date}) df = DataFrame(columns=["c", "d"]) result_a = df._append_internal(ser, ignore_index=True) expected = DataFrame( [[np.nan, np.nan, 1.0, 2.0, date]], columns=["c", "d", "a", "b", "date"] ) # These columns get cast to object after append expected["c"] = expected["c"].astype(object) expected["d"] = expected["d"].astype(object) tm.assert_frame_equal(result_a, expected) expected = DataFrame( [[np.nan, np.nan, 1.0, 2.0, date]] * 2, columns=["c", "d", "a", "b", "date"] ) expected["c"] = expected["c"].astype(object) expected["d"] = expected["d"].astype(object) result_b = result_a._append_internal(ser, ignore_index=True) tm.assert_frame_equal(result_b, expected) def test_append_empty_tz_frame_with_datetime64ns(self): # https://github.com/pandas-dev/pandas/issues/35460 df = DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") # also test with typed value to append df = DataFrame(columns=["a"]).astype("datetime64[ns, UTC]") other = Series({"a": pd.NaT}, dtype="datetime64[ns]").to_frame().T result = concat([df, other], ignore_index=True) expected = DataFrame({"a": [pd.NaT]}, dtype=object) tm.assert_frame_equal(result, expected) # mismatched tz other = Series({"a": pd.NaT}, dtype="datetime64[ns, US/Pacific]").to_frame().T result = concat([df, other], ignore_index=True) expected = DataFrame({"a": [pd.NaT]}).astype(object) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "dtype_str", ["datetime64[ns, UTC]", "datetime64[ns]", "Int64", "int64"] ) @pytest.mark.parametrize("val", [1, "NaT"]) def test_append_empty_frame_with_timedelta64ns_nat(self, dtype_str, val): # https://github.com/pandas-dev/pandas/issues/35460 df = DataFrame(columns=["a"]).astype(dtype_str) other = DataFrame({"a": [np.timedelta64(val, "ns")]}) result = concat([df, other]) expected = other.astype(object) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "dtype_str", ["datetime64[ns, UTC]", "datetime64[ns]", "Int64", "int64"] ) @pytest.mark.parametrize("val", [1, "NaT"]) def test_append_frame_with_timedelta64ns_nat(self, dtype_str, val): # https://github.com/pandas-dev/pandas/issues/35460 df = DataFrame({"a": pd.array([1], dtype=dtype_str)}) other = DataFrame({"a": [np.timedelta64(val, "ns")]}) result = concat([df, other], ignore_index=True) expected = DataFrame({"a": [df.iloc[0, 0], other.iloc[0, 0]]}, dtype=object) tm.assert_frame_equal(result, expected)
TestAppend
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-file/tests/test_image_vision_llm.py
{ "start": 999, "end": 2593 }
class ____: """ This double fakes the `Blip2Processor` tokenizer object so as to avoid having to instantiate the actual tokenizer for these tests. """ def __call__(self, img, prompt, return_tensors) -> TokenizerFake: """ This is just a stub for the purposes of the test, so we just return the instance itself. """ return self def to(self, device, dtype) -> Dict[str, list]: """ The output is the tokenized version of the prompt "Question: describe what you see in this image. Answer:" It should be of type `transformers.image_processing_base.BatchFeature` with `torch.Tensor` typed values for `"input_ids"`, `"attention_mask"`, and `"pixel_values"` keys. However, we will fake them as lists of integers where values are needed (`None` elsewhere) in order to not require `torch` or `numpy` imports. """ return { "input_ids": [ [2, 45641, 35, 6190, 99, 47, 192, 11, 42, 2274, 4, 31652, 35] ], "attention_mask": [[None]], "pixel_values": [[[[None]]]], } def decode( self, tokens: Dict[str, List[int]], skip_special_tokens: bool = True ) -> str: """ We return the known expected decoded response for the `test_16x16_png_image_file` fixture and the default prompt of the `ImageVisionLLMReader` class. """ return "Question: describe what you see in this image. Answer: a black and white checkered pattern"
TokenizerFake