language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
imageio__imageio
imageio/v2.py
{ "start": 2842, "end": 4628 }
class ____: def __init__(self, plugin_instance: PluginV3, **kwargs): self.instance = plugin_instance self.last_index = 0 self.closed = False if ( type(self.instance).__name__ == "PillowPlugin" and kwargs.get("pilmode") is not None ): kwargs["mode"] = kwargs["pilmode"] del kwargs["pilmode"] self.read_args = kwargs def close(self): if not self.closed: self.instance.close() self.closed = True def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __del__(self): self.close() @property def request(self): return self.instance.request @property def format(self): raise TypeError("V3 Plugins don't have a format.") def get_length(self): return self.instance.properties(index=...).n_images def get_data(self, index): self.last_index = index img = self.instance.read(index=index, **self.read_args) metadata = self.instance.metadata(index=index, exclude_applied=False) return Array(img, metadata) def get_next_data(self): return self.get_data(self.last_index + 1) def set_image_index(self, index): self.last_index = index - 1 def get_meta_data(self, index=None): return self.instance.metadata(index=index, exclude_applied=False) def iter_data(self): for idx, img in enumerate(self.instance.iter()): metadata = self.instance.metadata(index=idx, exclude_applied=False) yield Array(img, metadata) def __iter__(self): return self.iter_data() def __len__(self): return self.get_length()
LegacyReader
python
google__pytype
pytype/compare_test.py
{ "start": 2947, "end": 9004 }
class ____(CompareTestBase): def setUp(self): super().setUp() self._var = self._program.NewVariable() self._var.AddBinding(abstract.Unknown(self._ctx), [], self._node) def test_compatible_with__not_empty(self): t = self._ctx.convert.tuple_to_value((self._var,)) self.assertTruthy(t) def test_compatible_with__empty(self): t = self._ctx.convert.tuple_to_value(()) self.assertFalsy(t) def test_getitem__concrete_index(self): t = self._ctx.convert.tuple_to_value((self._var,)) index = self._convert.constant_to_var(0) node, var = t.cls.getitem_slot(self._node, index) self.assertIs(node, self._node) self.assertIs( abstract_utils.get_atomic_value(var), abstract_utils.get_atomic_value(self._var), ) def test_getitem__abstract_index(self): t = self._ctx.convert.tuple_to_value((self._var,)) index = self._convert.build_int(self._node) node, var = t.cls.getitem_slot(self._node, index) self.assertIs(node, self._node) self.assertIs( abstract_utils.get_atomic_value(var), abstract_utils.get_atomic_value(self._var), ) def test_cmp_rel__equal(self): tup = self._convert.constant_to_value((3, 1)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.LT, tup, tup)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.LE, tup, tup)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.EQ, tup, tup)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.NE, tup, tup)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.GE, tup, tup)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.GT, tup, tup)) def test_cmp_rel__not_equal(self): tup1 = self._convert.constant_to_value((3, 1)) tup2 = self._convert.constant_to_value((3, 5)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.LT, tup1, tup2)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.LT, tup2, tup1)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.LE, tup1, tup2)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.LE, tup2, tup1)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.EQ, tup1, tup2)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.EQ, tup2, tup1)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.NE, tup1, tup2)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.NE, tup2, tup1)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.GE, tup1, tup2)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.GE, tup2, tup1)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.GT, tup1, tup2)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.GT, tup2, tup1)) def test_cmp_rel__unknown(self): tup1 = self._convert.constant_to_value((3, 1)) tup2 = abstract.Instance(self._convert.tuple_type, self._ctx) for op in (slots.LT, slots.LE, slots.EQ, slots.NE, slots.GE, slots.GT): self.assertIsNone(compare.cmp_rel(self._ctx, op, tup1, tup2)) self.assertIsNone(compare.cmp_rel(self._ctx, op, tup2, tup1)) def test_cmp_rel__prefix_equal(self): tup1 = self._ctx.convert.tuple_to_value(( self._convert.constant_to_value(3).to_variable(self._node), self._convert.constant_to_value(1).to_variable(self._node), self._convert.primitive_instances[int].to_variable(self._node), )) tup2 = self._convert.constant_to_value((3, 1)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.LT, tup1, tup2)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.LT, tup2, tup1)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.LE, tup1, tup2)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.LE, tup2, tup1)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.EQ, tup1, tup2)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.EQ, tup2, tup1)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.NE, tup1, tup2)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.NE, tup2, tup1)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.GE, tup1, tup2)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.GE, tup2, tup1)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.GT, tup1, tup2)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.GT, tup2, tup1)) def test_cmp_rel__prefix_not_equal(self): tup1 = self._ctx.convert.tuple_to_value(( self._convert.constant_to_value(3).to_variable(self._node), self._convert.constant_to_value(1).to_variable(self._node), self._convert.primitive_instances[int].to_variable(self._node), )) tup2 = self._convert.constant_to_value((4, 2)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.LT, tup1, tup2)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.LT, tup2, tup1)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.LE, tup1, tup2)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.LE, tup2, tup1)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.EQ, tup1, tup2)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.EQ, tup2, tup1)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.NE, tup1, tup2)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.NE, tup2, tup1)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.GE, tup1, tup2)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.GE, tup2, tup1)) self.assertIs(False, compare.cmp_rel(self._ctx, slots.GT, tup1, tup2)) self.assertIs(True, compare.cmp_rel(self._ctx, slots.GT, tup2, tup1)) def test_cmp_rel__prefix_unknown(self): tup1 = self._ctx.convert.tuple_to_value(( self._convert.constant_to_value(3).to_variable(self._node), self._convert.primitive_instances[int].to_variable(self._node), )) tup2 = self._convert.constant_to_value((3, 1)) for op in (slots.LT, slots.LE, slots.EQ, slots.NE, slots.GE, slots.GT): self.assertIsNone(compare.cmp_rel(self._ctx, op, tup1, tup2)) self.assertIsNone(compare.cmp_rel(self._ctx, op, tup2, tup1))
TupleTest
python
ray-project__ray
python/ray/_private/resource_isolation_config.py
{ "start": 333, "end": 13648 }
class ____: """Configuration for enabling resource isolation by reserving memory and cpu for ray system processes through cgroupv2. Validates configuration for resource isolation by enforcing types, correct combinations of values, applying default values, and sanity checking cpu and memory reservations. Also, converts system_reserved_cpu into cpu.weights for cgroupv2. Raises: ValueError: On invalid inputs. Attributes: enable_resource_isolation: True if cgroupv2 based isolation of ray system processes is enabled. cgroup_path: The path for the cgroup the raylet should use to enforce resource isolation. system_reserved_cpu: The amount of cores reserved for ray system processes. Must be >= ray_constants.MINIMUM_SYSTEM_RESERVED_CPU_CORES and < the total number of cores available. system_reserved_memory: The amount of memory in bytes reserved for ray system processes. Must be >= ray_constants.MINIMUM_SYSTEM_RESERVED_MEMORY_BYTES and system_reserved_cpu + object_store_bytes < the total memory available. TODO(54703): Link documentation when it's available. """ def __init__( self, enable_resource_isolation: bool = False, cgroup_path: Optional[str] = None, system_reserved_cpu: Optional[float] = None, system_reserved_memory: Optional[int] = None, ): self._resource_isolation_enabled = enable_resource_isolation self.cgroup_path = cgroup_path self.system_reserved_memory = system_reserved_memory self.system_pids = "" # cgroupv2 cpu.weight calculated from system_reserved_cpu assumes ray uses all available cores. self.system_reserved_cpu_weight: int = None # TODO(irabbani): this is used to ensure that object_store_memory is not added twice # to self._system_reserved_memory. This should be refactored in the future so that ResourceIsolationConfig # can take object_store_memory as a constructor parameter and be constructed fully by the constructor. self._constructed = False if not enable_resource_isolation: if self.cgroup_path: raise ValueError( "cgroup_path cannot be set when resource isolation is not enabled. " "Set enable_resource_isolation to True if you're using ray.init or use the " "--enable-resource-isolation flag if you're using the ray cli." ) if system_reserved_cpu: raise ValueError( "system_reserved_cpu cannot be set when resource isolation is not enabled. " "Set enable_resource_isolation to True if you're using ray.init or use the " "--enable-resource-isolation flag if you're using the ray cli." ) if self.system_reserved_memory: raise ValueError( "system_reserved_memory cannot be set when resource isolation is not enabled. " "Set enable_resource_isolation to True if you're using ray.init or use the " "--enable-resource-isolation flag if you're using the ray cli." ) return self.system_reserved_cpu_weight = self._validate_and_get_system_reserved_cpu( system_reserved_cpu ) self.system_reserved_memory = self._validate_and_get_system_reserved_memory( system_reserved_memory ) self.cgroup_path = self._validate_and_get_cgroup_path(cgroup_path) def is_enabled(self) -> bool: return self._resource_isolation_enabled def add_object_store_memory(self, object_store_memory_bytes: int): """Adds object_store_memory to the memory reserved for system processes. Args: object_store_memory_bytes: The amount processes. Must be >= ray_constants.MINIMUM_SYSTEM_RESERVED_CPU_CORES and < the total number of cores available. Raises: AssertionError: If called with resource isolation not enabled or called more than once for the same instance. ValueError: If the input is not an integer or if the system_reserved_memory + object_store_memory is greater than the total memory available on the system. """ assert self.is_enabled(), ( "Cannot add object_store_memory to system_reserved_memory when " "enable_resource_isolation is False." ) assert not self._constructed, ( "Cannot call add_object_store_memory more than once with an instance " "ResourceIsolationConfig. This is a bug in the ray code. " ) self.system_reserved_memory += object_store_memory_bytes available_system_memory = ray._common.utils.get_system_memory() if self.system_reserved_memory > available_system_memory: raise ValueError( f"The total requested system_reserved_memory={self.system_reserved_memory}, calculated by " "object_store_bytes + system_reserved_memory, is greater than the total memory " f"available={available_system_memory}. Pick a smaller number of bytes for object_store_bytes " "or system_reserved_memory." ) self._constructed = True def add_system_pids(self, system_pids: str): """A comma-separated list of pids to move into the system cgroup.""" self.system_pids = system_pids @staticmethod def _validate_and_get_cgroup_path(cgroup_path: Optional[str]) -> str: """Returns the ray_constants.DEFAULT_CGROUP_PATH if cgroup_path is not specified. Args: cgroup_path: The path for the cgroup the raylet should use to enforce resource isolation. Returns: str: The validated cgroup path. Raises: ValueError: If cgroup_path is not a string. """ if not cgroup_path: cgroup_path = ray_constants.DEFAULT_CGROUP_PATH if not isinstance(cgroup_path, str): raise ValueError( f"Invalid value={cgroup_path} for cgroup_path. " "Use a string to represent the path for the cgroup that the raylet should use " "to enable resource isolation." ) return cgroup_path @staticmethod def _validate_and_get_system_reserved_cpu( system_reserved_cpu: Optional[float], ) -> int: """If system_reserved_cpu is specified, validates it, otherwise returns the default value. Validation entails checking the type, ensuring that the value is in range, and converts it into cpu.weights for cgroupv2. See https://docs.kernel.org/admin-guide/cgroup-v2.html#weights for more information. If system_reserved_cpu is not specified, returns a default value between [DEFAULT_MIN_SYSTEM_RESERVED_CPU_CORES, DEFAULT_MAX_SYSTEM_RESERVED_CPU_CORES]. # TODO(54703): The errors from this method are user-facing and thus need to be linked the user-facing documentation once it's available. Args: system_reserved_cpu: The amount of cores reserved for ray system processes. Must be >= ray_constants.MINIMUM_SYSTEM_RESERVED_CPU_CORES and < the total number of cores available. Raises: ValueError: If system_reserved_cpu is specified, but invalid or if the system does not have enough available cpus. """ available_system_cpus = utils.get_num_cpus(truncate=False) if available_system_cpus < ray_constants.DEFAULT_MIN_SYSTEM_RESERVED_CPU_CORES: raise ValueError( f"The available number of cpu cores on this system {available_system_cpus} is less than " f"the minimum amount that is required for ray's system processes. " f"Pick a number of cpu cores greater than or equal to {ray_constants.DEFAULT_MIN_SYSTEM_RESERVED_CPU_CORES}" ) if not system_reserved_cpu: system_reserved_cpu = float( min( max( ray_constants.DEFAULT_MIN_SYSTEM_RESERVED_CPU_CORES, ray_constants.DEFAULT_SYSTEM_RESERVED_CPU_PROPORTION * available_system_cpus, ), ray_constants.DEFAULT_MAX_SYSTEM_RESERVED_CPU_CORES, ) ) if not ( isinstance(system_reserved_cpu, float) or isinstance(system_reserved_cpu, int) ): raise ValueError( f"Invalid value={system_reserved_cpu} for system_reserved_cpu. " "Use a float to represent the number of cores that need to be reserved for " "ray system processes to enable resource isolation." ) system_reserved_cpu = float(system_reserved_cpu) if system_reserved_cpu < ray_constants.DEFAULT_MIN_SYSTEM_RESERVED_CPU_CORES: raise ValueError( f"The requested system_reserved_cpu={system_reserved_cpu} is less than " f"the minimum number of cpus that can be used for resource isolation. " "Pick a number of cpu cores to reserve for ray system processes " f"greater than or equal to {ray_constants.DEFAULT_MIN_SYSTEM_RESERVED_CPU_CORES}" ) if system_reserved_cpu >= available_system_cpus: raise ValueError( f"The requested system_reserved_cpu={system_reserved_cpu} is greater than or equal to " f"the number of cpus available={available_system_cpus}. " "Pick a smaller number of cpu cores to reserve for ray system processes." ) # Converting the number of cores the user defined into cpu.weights # This assumes that ray is allowed to use all available CPU # cores and distribute them between system, worker and # user processes return int( (system_reserved_cpu / float(available_system_cpus)) * _CGROUP_CPU_MAX_WEIGHT ) @staticmethod def _validate_and_get_system_reserved_memory( system_reserved_memory: Optional[int], ) -> int: """If system_reserved_memory is not specified, returns the default value. Otherwise, checks the type, makes sure that the value is in range. Args: system_reserved_memory: The amount of memory in bytes reserved for ray system processes. Must be >= ray_constants.MINIMUM_SYSTEM_RESERVED_MEMORY_BYTES and < the total memory available. Returns: int: The validated system reserved memory in bytes. Raises: ValueError: If system_reserved_memory is specified, but invalid. """ available_system_memory = ray._common.utils.get_system_memory() if ( available_system_memory < ray_constants.DEFAULT_MIN_SYSTEM_RESERVED_MEMORY_BYTES ): raise ValueError( f"The available memory on this system {available_system_memory} is less than " f"the minimum amount that is required for ray's system processes. " f"Pick a number of bytes greater than or equal to {ray_constants.DEFAULT_MIN_SYSTEM_RESERVED_MEMORY_BYTES}" ) if not system_reserved_memory: system_reserved_memory = int( min( max( ray_constants.DEFAULT_MIN_SYSTEM_RESERVED_MEMORY_BYTES, ray_constants.DEFAULT_SYSTEM_RESERVED_MEMORY_PROPORTION * available_system_memory, ), ray_constants.DEFAULT_MAX_SYSTEM_RESERVED_MEMORY_BYTES, ) ) if not isinstance(system_reserved_memory, int): raise ValueError( f"Invalid value {system_reserved_memory} for system_reserved_memory. " "Use an integer to represent the number bytes that need to be reserved for " "ray system processes to enable resource isolation." ) if ( system_reserved_memory < ray_constants.DEFAULT_MIN_SYSTEM_RESERVED_MEMORY_BYTES ): raise ValueError( f"The requested system_reserved_memory {system_reserved_memory} is less than " f"the minimum number of bytes that can be used for resource isolation. " "Pick a number of bytes to reserve for ray system processes " f"greater than or equal to {ray_constants.DEFAULT_MIN_SYSTEM_RESERVED_MEMORY_BYTES}" ) if system_reserved_memory > available_system_memory: raise ValueError( f"The total requested system_reserved_memory={system_reserved_memory} is greater than " f"the amount of memory available={available_system_memory}." ) return system_reserved_memory
ResourceIsolationConfig
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 936852, "end": 973933 }
class ____( sgqlc.types.Type, Node, ProjectV2Recent, ProjectOwner, PackageOwner, Subscribable, Starrable, UniformResourceLocatable, RepositoryInfo, ): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ( "allow_update_branch", "assignable_users", "auto_merge_allowed", "branch_protection_rules", "code_of_conduct", "codeowners", "collaborators", "commit_comments", "contact_links", "database_id", "default_branch_ref", "delete_branch_on_merge", "deploy_keys", "deployments", "discussion", "discussion_categories", "discussion_category", "discussions", "disk_usage", "environment", "environments", "forking_allowed", "forks", "funding_links", "interaction_ability", "is_blank_issues_enabled", "is_disabled", "is_empty", "is_security_policy_enabled", "is_user_configuration_repository", "issue", "issue_or_pull_request", "issue_templates", "issues", "label", "labels", "languages", "latest_release", "mentionable_users", "merge_commit_allowed", "merge_commit_message", "merge_commit_title", "milestone", "milestones", "object", "parent", "pinned_discussions", "pinned_issues", "primary_language", "project_next", "project_v2", "projects_next", "projects_v2", "pull_request", "pull_request_templates", "pull_requests", "rebase_merge_allowed", "ref", "refs", "release", "releases", "repository_topics", "security_policy_url", "squash_merge_allowed", "squash_merge_commit_message", "squash_merge_commit_title", "squash_pr_title_used_as_default", "ssh_url", "submodules", "temp_clone_token", "template_repository", "viewer_can_administer", "viewer_can_update_topics", "viewer_default_commit_email", "viewer_default_merge_method", "viewer_permission", "viewer_possible_commit_emails", "vulnerability_alerts", "watchers", "web_commit_signoff_required", ) allow_update_branch = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="allowUpdateBranch" ) assignable_users = sgqlc.types.Field( sgqlc.types.non_null(UserConnection), graphql_name="assignableUsers", args=sgqlc.types.ArgDict( ( ("query", sgqlc.types.Arg(String, graphql_name="query", default=None)), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) auto_merge_allowed = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="autoMergeAllowed" ) branch_protection_rules = sgqlc.types.Field( sgqlc.types.non_null(BranchProtectionRuleConnection), graphql_name="branchProtectionRules", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) code_of_conduct = sgqlc.types.Field(CodeOfConduct, graphql_name="codeOfConduct") codeowners = sgqlc.types.Field( RepositoryCodeowners, graphql_name="codeowners", args=sgqlc.types.ArgDict( ( ( "ref_name", sgqlc.types.Arg(String, graphql_name="refName", default=None), ), ) ), ) collaborators = sgqlc.types.Field( RepositoryCollaboratorConnection, graphql_name="collaborators", args=sgqlc.types.ArgDict( ( ( "affiliation", sgqlc.types.Arg( CollaboratorAffiliation, graphql_name="affiliation", default=None, ), ), ("query", sgqlc.types.Arg(String, graphql_name="query", default=None)), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) commit_comments = sgqlc.types.Field( sgqlc.types.non_null(CommitCommentConnection), graphql_name="commitComments", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) contact_links = sgqlc.types.Field( sgqlc.types.list_of(sgqlc.types.non_null(RepositoryContactLink)), graphql_name="contactLinks", ) database_id = sgqlc.types.Field(Int, graphql_name="databaseId") default_branch_ref = sgqlc.types.Field(Ref, graphql_name="defaultBranchRef") delete_branch_on_merge = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="deleteBranchOnMerge" ) deploy_keys = sgqlc.types.Field( sgqlc.types.non_null(DeployKeyConnection), graphql_name="deployKeys", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) deployments = sgqlc.types.Field( sgqlc.types.non_null(DeploymentConnection), graphql_name="deployments", args=sgqlc.types.ArgDict( ( ( "environments", sgqlc.types.Arg( sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="environments", default=None, ), ), ( "order_by", sgqlc.types.Arg( DeploymentOrder, graphql_name="orderBy", default={"field": "CREATED_AT", "direction": "ASC"}, ), ), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) discussion = sgqlc.types.Field( Discussion, graphql_name="discussion", args=sgqlc.types.ArgDict( ( ( "number", sgqlc.types.Arg( sgqlc.types.non_null(Int), graphql_name="number", default=None ), ), ) ), ) discussion_categories = sgqlc.types.Field( sgqlc.types.non_null(DiscussionCategoryConnection), graphql_name="discussionCategories", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ( "filter_by_assignable", sgqlc.types.Arg( Boolean, graphql_name="filterByAssignable", default=False ), ), ) ), ) discussion_category = sgqlc.types.Field( DiscussionCategory, graphql_name="discussionCategory", args=sgqlc.types.ArgDict( ( ( "slug", sgqlc.types.Arg( sgqlc.types.non_null(String), graphql_name="slug", default=None ), ), ) ), ) discussions = sgqlc.types.Field( sgqlc.types.non_null(DiscussionConnection), graphql_name="discussions", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ( "category_id", sgqlc.types.Arg(ID, graphql_name="categoryId", default=None), ), ( "order_by", sgqlc.types.Arg( DiscussionOrder, graphql_name="orderBy", default={"field": "UPDATED_AT", "direction": "DESC"}, ), ), ) ), ) disk_usage = sgqlc.types.Field(Int, graphql_name="diskUsage") environment = sgqlc.types.Field( Environment, graphql_name="environment", args=sgqlc.types.ArgDict( ( ( "name", sgqlc.types.Arg( sgqlc.types.non_null(String), graphql_name="name", default=None ), ), ) ), ) environments = sgqlc.types.Field( sgqlc.types.non_null(EnvironmentConnection), graphql_name="environments", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) forking_allowed = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="forkingAllowed" ) forks = sgqlc.types.Field( sgqlc.types.non_null(RepositoryConnection), graphql_name="forks", args=sgqlc.types.ArgDict( ( ( "privacy", sgqlc.types.Arg( RepositoryPrivacy, graphql_name="privacy", default=None ), ), ( "order_by", sgqlc.types.Arg( RepositoryOrder, graphql_name="orderBy", default=None ), ), ( "affiliations", sgqlc.types.Arg( sgqlc.types.list_of(RepositoryAffiliation), graphql_name="affiliations", default=None, ), ), ( "owner_affiliations", sgqlc.types.Arg( sgqlc.types.list_of(RepositoryAffiliation), graphql_name="ownerAffiliations", default=("OWNER", "COLLABORATOR"), ), ), ( "is_locked", sgqlc.types.Arg(Boolean, graphql_name="isLocked", default=None), ), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) funding_links = sgqlc.types.Field( sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(FundingLink))), graphql_name="fundingLinks", ) interaction_ability = sgqlc.types.Field( RepositoryInteractionAbility, graphql_name="interactionAbility" ) is_blank_issues_enabled = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="isBlankIssuesEnabled" ) is_disabled = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="isDisabled" ) is_empty = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isEmpty") is_security_policy_enabled = sgqlc.types.Field( Boolean, graphql_name="isSecurityPolicyEnabled" ) is_user_configuration_repository = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="isUserConfigurationRepository" ) issue = sgqlc.types.Field( Issue, graphql_name="issue", args=sgqlc.types.ArgDict( ( ( "number", sgqlc.types.Arg( sgqlc.types.non_null(Int), graphql_name="number", default=None ), ), ) ), ) issue_or_pull_request = sgqlc.types.Field( "IssueOrPullRequest", graphql_name="issueOrPullRequest", args=sgqlc.types.ArgDict( ( ( "number", sgqlc.types.Arg( sgqlc.types.non_null(Int), graphql_name="number", default=None ), ), ) ), ) issue_templates = sgqlc.types.Field( sgqlc.types.list_of(sgqlc.types.non_null(IssueTemplate)), graphql_name="issueTemplates", ) issues = sgqlc.types.Field( sgqlc.types.non_null(IssueConnection), graphql_name="issues", args=sgqlc.types.ArgDict( ( ( "order_by", sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None), ), ( "labels", sgqlc.types.Arg( sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="labels", default=None, ), ), ( "states", sgqlc.types.Arg( sgqlc.types.list_of(sgqlc.types.non_null(IssueState)), graphql_name="states", default=None, ), ), ( "filter_by", sgqlc.types.Arg( IssueFilters, graphql_name="filterBy", default=None ), ), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) label = sgqlc.types.Field( Label, graphql_name="label", args=sgqlc.types.ArgDict( ( ( "name", sgqlc.types.Arg( sgqlc.types.non_null(String), graphql_name="name", default=None ), ), ) ), ) labels = sgqlc.types.Field( LabelConnection, graphql_name="labels", args=sgqlc.types.ArgDict( ( ( "order_by", sgqlc.types.Arg( LabelOrder, graphql_name="orderBy", default={"field": "CREATED_AT", "direction": "ASC"}, ), ), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ("query", sgqlc.types.Arg(String, graphql_name="query", default=None)), ) ), ) languages = sgqlc.types.Field( LanguageConnection, graphql_name="languages", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ( "order_by", sgqlc.types.Arg( LanguageOrder, graphql_name="orderBy", default=None ), ), ) ), ) latest_release = sgqlc.types.Field(Release, graphql_name="latestRelease") mentionable_users = sgqlc.types.Field( sgqlc.types.non_null(UserConnection), graphql_name="mentionableUsers", args=sgqlc.types.ArgDict( ( ("query", sgqlc.types.Arg(String, graphql_name="query", default=None)), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) merge_commit_allowed = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="mergeCommitAllowed" ) merge_commit_message = sgqlc.types.Field( sgqlc.types.non_null(MergeCommitMessage), graphql_name="mergeCommitMessage" ) merge_commit_title = sgqlc.types.Field( sgqlc.types.non_null(MergeCommitTitle), graphql_name="mergeCommitTitle" ) milestone = sgqlc.types.Field( Milestone, graphql_name="milestone", args=sgqlc.types.ArgDict( ( ( "number", sgqlc.types.Arg( sgqlc.types.non_null(Int), graphql_name="number", default=None ), ), ) ), ) milestones = sgqlc.types.Field( MilestoneConnection, graphql_name="milestones", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ( "states", sgqlc.types.Arg( sgqlc.types.list_of(sgqlc.types.non_null(MilestoneState)), graphql_name="states", default=None, ), ), ( "order_by", sgqlc.types.Arg( MilestoneOrder, graphql_name="orderBy", default=None ), ), ("query", sgqlc.types.Arg(String, graphql_name="query", default=None)), ) ), ) object = sgqlc.types.Field( GitObject, graphql_name="object", args=sgqlc.types.ArgDict( ( ("oid", sgqlc.types.Arg(GitObjectID, graphql_name="oid", default=None)), ( "expression", sgqlc.types.Arg(String, graphql_name="expression", default=None), ), ) ), ) parent = sgqlc.types.Field("Repository", graphql_name="parent") pinned_discussions = sgqlc.types.Field( sgqlc.types.non_null(PinnedDiscussionConnection), graphql_name="pinnedDiscussions", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) pinned_issues = sgqlc.types.Field( PinnedIssueConnection, graphql_name="pinnedIssues", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) primary_language = sgqlc.types.Field(Language, graphql_name="primaryLanguage") project_next = sgqlc.types.Field( ProjectNext, graphql_name="projectNext", args=sgqlc.types.ArgDict( ( ( "number", sgqlc.types.Arg( sgqlc.types.non_null(Int), graphql_name="number", default=None ), ), ) ), ) project_v2 = sgqlc.types.Field( ProjectV2, graphql_name="projectV2", args=sgqlc.types.ArgDict( ( ( "number", sgqlc.types.Arg( sgqlc.types.non_null(Int), graphql_name="number", default=None ), ), ) ), ) projects_next = sgqlc.types.Field( sgqlc.types.non_null(ProjectNextConnection), graphql_name="projectsNext", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ("query", sgqlc.types.Arg(String, graphql_name="query", default=None)), ( "sort_by", sgqlc.types.Arg( ProjectNextOrderField, graphql_name="sortBy", default="TITLE" ), ), ) ), ) projects_v2 = sgqlc.types.Field( sgqlc.types.non_null(ProjectV2Connection), graphql_name="projectsV2", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ("query", sgqlc.types.Arg(String, graphql_name="query", default=None)), ( "order_by", sgqlc.types.Arg( ProjectV2Order, graphql_name="orderBy", default={"field": "NUMBER", "direction": "DESC"}, ), ), ) ), ) pull_request = sgqlc.types.Field( PullRequest, graphql_name="pullRequest", args=sgqlc.types.ArgDict( ( ( "number", sgqlc.types.Arg( sgqlc.types.non_null(Int), graphql_name="number", default=None ), ), ) ), ) pull_request_templates = sgqlc.types.Field( sgqlc.types.list_of(sgqlc.types.non_null(PullRequestTemplate)), graphql_name="pullRequestTemplates", ) pull_requests = sgqlc.types.Field( sgqlc.types.non_null(PullRequestConnection), graphql_name="pullRequests", args=sgqlc.types.ArgDict( ( ( "states", sgqlc.types.Arg( sgqlc.types.list_of(sgqlc.types.non_null(PullRequestState)), graphql_name="states", default=None, ), ), ( "labels", sgqlc.types.Arg( sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="labels", default=None, ), ), ( "head_ref_name", sgqlc.types.Arg(String, graphql_name="headRefName", default=None), ), ( "base_ref_name", sgqlc.types.Arg(String, graphql_name="baseRefName", default=None), ), ( "order_by", sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None), ), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) rebase_merge_allowed = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="rebaseMergeAllowed" ) ref = sgqlc.types.Field( Ref, graphql_name="ref", args=sgqlc.types.ArgDict( ( ( "qualified_name", sgqlc.types.Arg( sgqlc.types.non_null(String), graphql_name="qualifiedName", default=None, ), ), ) ), ) refs = sgqlc.types.Field( RefConnection, graphql_name="refs", args=sgqlc.types.ArgDict( ( ("query", sgqlc.types.Arg(String, graphql_name="query", default=None)), ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ( "ref_prefix", sgqlc.types.Arg( sgqlc.types.non_null(String), graphql_name="refPrefix", default=None, ), ), ( "direction", sgqlc.types.Arg( OrderDirection, graphql_name="direction", default=None ), ), ( "order_by", sgqlc.types.Arg(RefOrder, graphql_name="orderBy", default=None), ), ) ), ) release = sgqlc.types.Field( Release, graphql_name="release", args=sgqlc.types.ArgDict( ( ( "tag_name", sgqlc.types.Arg( sgqlc.types.non_null(String), graphql_name="tagName", default=None, ), ), ) ), ) releases = sgqlc.types.Field( sgqlc.types.non_null(ReleaseConnection), graphql_name="releases", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ( "order_by", sgqlc.types.Arg(ReleaseOrder, graphql_name="orderBy", default=None), ), ) ), ) repository_topics = sgqlc.types.Field( sgqlc.types.non_null(RepositoryTopicConnection), graphql_name="repositoryTopics", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) security_policy_url = sgqlc.types.Field(URI, graphql_name="securityPolicyUrl") squash_merge_allowed = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="squashMergeAllowed" ) squash_merge_commit_message = sgqlc.types.Field( sgqlc.types.non_null(SquashMergeCommitMessage), graphql_name="squashMergeCommitMessage", ) squash_merge_commit_title = sgqlc.types.Field( sgqlc.types.non_null(SquashMergeCommitTitle), graphql_name="squashMergeCommitTitle", ) squash_pr_title_used_as_default = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="squashPrTitleUsedAsDefault" ) ssh_url = sgqlc.types.Field( sgqlc.types.non_null(GitSSHRemote), graphql_name="sshUrl" ) submodules = sgqlc.types.Field( sgqlc.types.non_null(SubmoduleConnection), graphql_name="submodules", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) temp_clone_token = sgqlc.types.Field(String, graphql_name="tempCloneToken") template_repository = sgqlc.types.Field( "Repository", graphql_name="templateRepository" ) viewer_can_administer = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="viewerCanAdminister" ) viewer_can_update_topics = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="viewerCanUpdateTopics" ) viewer_default_commit_email = sgqlc.types.Field( String, graphql_name="viewerDefaultCommitEmail" ) viewer_default_merge_method = sgqlc.types.Field( sgqlc.types.non_null(PullRequestMergeMethod), graphql_name="viewerDefaultMergeMethod", ) viewer_permission = sgqlc.types.Field( RepositoryPermission, graphql_name="viewerPermission" ) viewer_possible_commit_emails = sgqlc.types.Field( sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="viewerPossibleCommitEmails", ) vulnerability_alerts = sgqlc.types.Field( RepositoryVulnerabilityAlertConnection, graphql_name="vulnerabilityAlerts", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ( "states", sgqlc.types.Arg( sgqlc.types.list_of( sgqlc.types.non_null(RepositoryVulnerabilityAlertState) ), graphql_name="states", default=None, ), ), ( "dependency_scopes", sgqlc.types.Arg( sgqlc.types.list_of( sgqlc.types.non_null( RepositoryVulnerabilityAlertDependencyScope ) ), graphql_name="dependencyScopes", default=None, ), ), ) ), ) watchers = sgqlc.types.Field( sgqlc.types.non_null(UserConnection), graphql_name="watchers", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ) ), ) web_commit_signoff_required = sgqlc.types.Field( sgqlc.types.non_null(Boolean), graphql_name="webCommitSignoffRequired" )
Repository
python
qdrant__qdrant-client
qdrant_client/http/models/models.py
{ "start": 45526, "end": 46463 }
class ____(BaseModel): """ Usage of the hardware resources, spent to process the request """ cpu: int = Field(..., description="Usage of the hardware resources, spent to process the request") payload_io_read: int = Field(..., description="Usage of the hardware resources, spent to process the request") payload_io_write: int = Field(..., description="Usage of the hardware resources, spent to process the request") payload_index_io_read: int = Field(..., description="Usage of the hardware resources, spent to process the request") payload_index_io_write: int = Field( ..., description="Usage of the hardware resources, spent to process the request" ) vector_io_read: int = Field(..., description="Usage of the hardware resources, spent to process the request") vector_io_write: int = Field(..., description="Usage of the hardware resources, spent to process the request")
HardwareUsage
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/initsubclass1.py
{ "start": 906, "end": 1097 }
class ____(ClassA, param1="hi", param2=3.4): def __init_subclass__(cls, param_alt1: int): super().__init_subclass__(param1="yo", param2=param_alt1) def func2(cls): pass
ClassF
python
PyCQA__pylint
doc/data/messages/u/using-final-decorator-in-unsupported-version/good.py
{ "start": 0, "end": 52 }
class ____(Animal): def lay_egg(self): ...
Playtypus
python
tqdm__tqdm
tqdm/rich.py
{ "start": 1376, "end": 2328 }
class ____(ProgressColumn): """Renders human readable transfer speed.""" def __init__(self, unit="", unit_scale=False, unit_divisor=1000): self.unit = unit self.unit_scale = unit_scale self.unit_divisor = unit_divisor super().__init__() def render(self, task): """Show data transfer speed.""" speed = task.speed if speed is None: return Text(f"? {self.unit}/s", style="progress.data.speed") if self.unit_scale: unit, suffix = filesize.pick_unit_and_suffix( speed, ["", "K", "M", "G", "T", "P", "E", "Z", "Y"], self.unit_divisor, ) else: unit, suffix = filesize.pick_unit_and_suffix(speed, [""], 1) precision = 0 if unit == 1 else 1 return Text(f"{speed/unit:,.{precision}f} {suffix}{self.unit}/s", style="progress.data.speed")
RateColumn
python
python-markdown__markdown
tests/test_syntax/extensions/test_md_in_html.py
{ "start": 1725, "end": 41971 }
class ____(TestCase): default_kwargs = {'extensions': ['md_in_html']} def test_md1_paragraph(self): self.assertMarkdownRenders( '<p markdown="1">*foo*</p>', '<p><em>foo</em></p>' ) def test_md1_p_linebreaks(self): self.assertMarkdownRenders( self.dedent( """ <p markdown="1"> *foo* </p> """ ), self.dedent( """ <p> <em>foo</em> </p> """ ) ) def test_md1_p_blank_lines(self): self.assertMarkdownRenders( self.dedent( """ <p markdown="1"> *foo* </p> """ ), self.dedent( """ <p> <em>foo</em> </p> """ ) ) def test_md1_div(self): self.assertMarkdownRenders( '<div markdown="1">*foo*</div>', self.dedent( """ <div> <p><em>foo</em></p> </div> """ ) ) def test_md1_div_linebreaks(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> *foo* </div> """ ), self.dedent( """ <div> <p><em>foo</em></p> </div> """ ) ) def test_md1_code_span(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> `<h1>code span</h1>` </div> """ ), self.dedent( """ <div> <p><code>&lt;h1&gt;code span&lt;/h1&gt;</code></p> </div> """ ) ) def test_md1_code_span_oneline(self): self.assertMarkdownRenders( '<div markdown="1">`<h1>code span</h1>`</div>', self.dedent( """ <div> <p><code>&lt;h1&gt;code span&lt;/h1&gt;</code></p> </div> """ ) ) def test_md1_code_span_unclosed(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> `<p>` </div> """ ), self.dedent( """ <div> <p><code>&lt;p&gt;</code></p> </div> """ ) ) def test_md1_code_span_script_tag(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> `<script>` </div> """ ), self.dedent( """ <div> <p><code>&lt;script&gt;</code></p> </div> """ ) ) def test_md1_div_blank_lines(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> *foo* </div> """ ), self.dedent( """ <div> <p><em>foo</em></p> </div> """ ) ) def test_md1_div_multi(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> *foo* __bar__ </div> """ ), self.dedent( """ <div> <p><em>foo</em></p> <p><strong>bar</strong></p> </div> """ ) ) def test_md1_div_nested(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> <div markdown="1"> *foo* </div> </div> """ ), self.dedent( """ <div> <div> <p><em>foo</em></p> </div> </div> """ ) ) def test_md1_div_multi_nest(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> <div markdown="1"> <p markdown="1">*foo*</p> </div> </div> """ ), self.dedent( """ <div> <div> <p><em>foo</em></p> </div> </div> """ ) ) def text_md1_details(self): self.assertMarkdownRenders( self.dedent( """ <details markdown="1"> <summary>Click to expand</summary> *foo* </details> """ ), self.dedent( """ <details> <summary>Click to expand</summary> <p><em>foo</em></p> </details> """ ) ) def test_md1_mix(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> A _Markdown_ paragraph before a raw child. <p markdown="1">A *raw* child.</p> A _Markdown_ tail to the raw child. </div> """ ), self.dedent( """ <div> <p>A <em>Markdown</em> paragraph before a raw child.</p> <p>A <em>raw</em> child.</p> <p>A <em>Markdown</em> tail to the raw child.</p> </div> """ ) ) def test_md1_deep_mix(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> A _Markdown_ paragraph before a raw child. A second Markdown paragraph with two lines. <div markdown="1"> A *raw* child. <p markdown="1">*foo*</p> Raw child tail. </div> A _Markdown_ tail to the raw child. A second tail item with two lines. <p markdown="1">More raw.</p> </div> """ ), self.dedent( """ <div> <p>A <em>Markdown</em> paragraph before a raw child.</p> <p>A second Markdown paragraph with two lines.</p> <div> <p>A <em>raw</em> child.</p> <p><em>foo</em></p> <p>Raw child tail.</p> </div> <p>A <em>Markdown</em> tail to the raw child.</p> <p>A second tail item with two lines.</p> <p>More raw.</p> </div> """ ) ) def test_md1_div_raw_inline(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> <em>foo</em> </div> """ ), self.dedent( """ <div> <p><em>foo</em></p> </div> """ ) ) def test_no_md1_paragraph(self): self.assertMarkdownRenders( '<p>*foo*</p>', '<p>*foo*</p>' ) def test_no_md1_nest(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> A _Markdown_ paragraph before a raw child. <p>A *raw* child.</p> A _Markdown_ tail to the raw child. </div> """ ), self.dedent( """ <div> <p>A <em>Markdown</em> paragraph before a raw child.</p> <p>A *raw* child.</p> <p>A <em>Markdown</em> tail to the raw child.</p> </div> """ ) ) def test_md1_nested_empty(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> A _Markdown_ paragraph before a raw empty tag. <img src="image.png" alt="An image" /> A _Markdown_ tail to the raw empty tag. </div> """ ), self.dedent( """ <div> <p>A <em>Markdown</em> paragraph before a raw empty tag.</p> <p><img src="image.png" alt="An image" /></p> <p>A <em>Markdown</em> tail to the raw empty tag.</p> </div> """ ) ) def test_md1_nested_empty_block(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> A _Markdown_ paragraph before a raw empty tag. <hr /> A _Markdown_ tail to the raw empty tag. </div> """ ), self.dedent( """ <div> <p>A <em>Markdown</em> paragraph before a raw empty tag.</p> <hr /> <p>A <em>Markdown</em> tail to the raw empty tag.</p> </div> """ ) ) def test_empty_tags(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> <div></div> </div> """ ), self.dedent( """ <div> <div></div> </div> """ ) ) def test_orphan_end_tag_in_raw_html(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> <div> Test </pre> Test </div> </div> """ ), self.dedent( """ <div> <div> Test </pre> Test </div> </div> """ ) ) def test_complex_nested_case(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> **test** <div> **test** <img src=""/> <code>Test</code> <span>**test**</span> <p>Test 2</p> </div> </div> """ ), self.dedent( """ <div> <p><strong>test</strong></p> <div> **test** <img src=""/> <code>Test</code> <span>**test**</span> <p>Test 2</p> </div> </div> """ ) ) def test_complex_nested_case_whitespace(self): self.assertMarkdownRenders( self.dedent( """ Text with space\t <div markdown="1">\t \t <div> **test** <img src=""/> <code>Test</code> <span>**test**</span> <div>With whitespace</div> <p>Test 2</p> </div> **test** </div> """ ), self.dedent( """ <p>Text with space </p> <div> <div> **test** <img src=""/> <code>Test</code> <span>**test**</span> <div>With whitespace</div> <p>Test 2</p> </div> <p><strong>test</strong></p> </div> """ ) ) def test_md1_intail_md1(self): self.assertMarkdownRenders( '<div markdown="1">*foo*</div><div markdown="1">*bar*</div>', self.dedent( """ <div> <p><em>foo</em></p> </div> <div> <p><em>bar</em></p> </div> """ ) ) def test_md1_no_blank_line_before(self): self.assertMarkdownRenders( self.dedent( """ A _Markdown_ paragraph with no blank line after. <div markdown="1"> A _Markdown_ paragraph in an HTML block with no blank line before. </div> """ ), self.dedent( """ <p>A <em>Markdown</em> paragraph with no blank line after.</p> <div> <p>A <em>Markdown</em> paragraph in an HTML block with no blank line before.</p> </div> """ ) ) def test_md1_no_line_break(self): # The div here is parsed as a span-level element. Bad input equals bad output! self.assertMarkdownRenders( 'A _Markdown_ paragraph with <div markdown="1">no _line break_.</div>', '<p>A <em>Markdown</em> paragraph with <div markdown="1">no <em>line break</em>.</div></p>' ) def test_md1_in_tail(self): self.assertMarkdownRenders( self.dedent( """ <div></div><div markdown="1"> A _Markdown_ paragraph in an HTML block in tail of previous element. </div> """ ), self.dedent( """ <div></div> <div> <p>A <em>Markdown</em> paragraph in an HTML block in tail of previous element.</p> </div> """ ) ) def test_md1_PI_oneliner(self): self.assertMarkdownRenders( '<div markdown="1"><?php print("foo"); ?></div>', self.dedent( """ <div> <?php print("foo"); ?> </div> """ ) ) def test_md1_PI_multiline(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> <?php print("foo"); ?> </div> """ ), self.dedent( """ <div> <?php print("foo"); ?> </div> """ ) ) def test_md1_PI_blank_lines(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> <?php print("foo"); ?> </div> """ ), self.dedent( """ <div> <?php print("foo"); ?> </div> """ ) ) def test_md_span_paragraph(self): self.assertMarkdownRenders( '<p markdown="span">*foo*</p>', '<p><em>foo</em></p>' ) def test_md_block_paragraph(self): self.assertMarkdownRenders( '<p markdown="block">*foo*</p>', self.dedent( """ <p> <p><em>foo</em></p> </p> """ ) ) def test_md_span_div(self): self.assertMarkdownRenders( '<div markdown="span">*foo*</div>', '<div><em>foo</em></div>' ) def test_md_block_div(self): self.assertMarkdownRenders( '<div markdown="block">*foo*</div>', self.dedent( """ <div> <p><em>foo</em></p> </div> """ ) ) def test_md_span_nested_in_block(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="block"> <div markdown="span">*foo*</div> </div> """ ), self.dedent( """ <div> <div><em>foo</em></div> </div> """ ) ) def test_md_block_nested_in_span(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="span"> <div markdown="block">*foo*</div> </div> """ ), self.dedent( """ <div> <div><em>foo</em></div> </div> """ ) ) def test_md_block_after_span_nested_in_block(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="block"> <div markdown="span">*foo*</div> <div markdown="block">*bar*</div> </div> """ ), self.dedent( """ <div> <div><em>foo</em></div> <div> <p><em>bar</em></p> </div> </div> """ ) ) def test_nomd_nested_in_md1(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> *foo* <div> *foo* <p>*bar*</p> *baz* </div> *bar* </div> """ ), self.dedent( """ <div> <p><em>foo</em></p> <div> *foo* <p>*bar*</p> *baz* </div> <p><em>bar</em></p> </div> """ ) ) def test_md1_nested_in_nomd(self): self.assertMarkdownRenders( self.dedent( """ <div> <div markdown="1">*foo*</div> </div> """ ), self.dedent( """ <div> <div markdown="1">*foo*</div> </div> """ ) ) def test_md1_single_quotes(self): self.assertMarkdownRenders( "<p markdown='1'>*foo*</p>", '<p><em>foo</em></p>' ) def test_md1_no_quotes(self): self.assertMarkdownRenders( '<p markdown=1>*foo*</p>', '<p><em>foo</em></p>' ) def test_md_no_value(self): self.assertMarkdownRenders( '<p markdown>*foo*</p>', '<p><em>foo</em></p>' ) def test_md1_preserve_attrs(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1" id="parent"> <div markdown="1" class="foo"> <p markdown="1" class="bar baz">*foo*</p> </div> </div> """ ), self.dedent( """ <div id="parent"> <div class="foo"> <p class="bar baz"><em>foo</em></p> </div> </div> """ ) ) def test_md1_unclosed_div(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> _foo_ <div class="unclosed"> _bar_ </div> """ ), self.dedent( """ <div> <p><em>foo</em></p> <div class="unclosed"> _bar_ </div> </div> """ ) ) def test_md1_orphan_endtag(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> _foo_ </p> _bar_ </div> """ ), self.dedent( """ <div> <p><em>foo</em></p> </p> <p><em>bar</em></p> </div> """ ) ) def test_md1_unclosed_p(self): self.assertMarkdownRenders( self.dedent( """ <p markdown="1">_foo_ <p markdown="1">_bar_ """ ), self.dedent( """ <p><em>foo</em> </p> <p><em>bar</em> </p> """ ) ) def test_md1_nested_unclosed_p(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> <p markdown="1">_foo_ <p markdown="1">_bar_ </div> """ ), self.dedent( """ <div> <p><em>foo</em> </p> <p><em>bar</em> </p> </div> """ ) ) def test_md1_nested_comment(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> A *Markdown* paragraph. <!-- foobar --> A *Markdown* paragraph. </div> """ ), self.dedent( """ <div> <p>A <em>Markdown</em> paragraph.</p> <!-- foobar --> <p>A <em>Markdown</em> paragraph.</p> </div> """ ) ) def test_md1_nested_link_ref(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> [link]: http://example.com <div markdown="1"> [link][link] </div> </div> """ ), self.dedent( """ <div> <div> <p><a href="http://example.com">link</a></p> </div> </div> """ ) ) def test_md1_hr_only_start(self): self.assertMarkdownRenders( self.dedent( """ *emphasis1* <hr markdown="1"> *emphasis2* """ ), self.dedent( """ <p><em>emphasis1</em></p> <hr> <p><em>emphasis2</em></p> """ ) ) def test_md1_hr_self_close(self): self.assertMarkdownRenders( self.dedent( """ *emphasis1* <hr markdown="1" /> *emphasis2* """ ), self.dedent( """ <p><em>emphasis1</em></p> <hr> <p><em>emphasis2</em></p> """ ) ) def test_md1_hr_start_and_end(self): # Browsers ignore ending hr tags, so we don't try to do anything to handle them special. self.assertMarkdownRenders( self.dedent( """ *emphasis1* <hr markdown="1"></hr> *emphasis2* """ ), self.dedent( """ <p><em>emphasis1</em></p> <hr> <p></hr> <em>emphasis2</em></p> """ ) ) def test_md1_hr_only_end(self): # Browsers ignore ending hr tags, so we don't try to do anything to handle them special. self.assertMarkdownRenders( self.dedent( """ *emphasis1* </hr> *emphasis2* """ ), self.dedent( """ <p><em>emphasis1</em> </hr> <em>emphasis2</em></p> """ ) ) def test_md1_hr_with_content(self): # Browsers ignore ending hr tags, so we don't try to do anything to handle them special. # Content is not allowed and will be treated as normal content between two hr tags self.assertMarkdownRenders( self.dedent( """ *emphasis1* <hr markdown="1"> **content** </hr> *emphasis2* """ ), self.dedent( """ <p><em>emphasis1</em></p> <hr> <p><strong>content</strong> </hr> <em>emphasis2</em></p> """ ) ) def test_no_md1_hr_with_content(self): # Browsers ignore ending hr tags, so we don't try to do anything to handle them special. # Content is not allowed and will be treated as normal content between two hr tags self.assertMarkdownRenders( self.dedent( """ *emphasis1* <hr> **content** </hr> *emphasis2* """ ), self.dedent( """ <p><em>emphasis1</em></p> <hr> <p><strong>content</strong> </hr> <em>emphasis2</em></p> """ ) ) def test_md1_nested_abbr_ref(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> *[abbr]: Abbreviation <div markdown="1"> abbr </div> </div> """ ), self.dedent( """ <div> <div> <p><abbr title="Abbreviation">abbr</abbr></p> </div> </div> """ ), extensions=['md_in_html', 'abbr'] ) def test_md1_nested_footnote_ref(self): self.assertMarkdownRenders( self.dedent( """ <div markdown="1"> [^1]: The footnote. <div markdown="1"> Paragraph with a footnote.[^1] </div> </div> """ ), '<div>\n' '<div>\n' '<p>Paragraph with a footnote.<sup id="fnref:1"><a class="footnote-ref" href="#fn:1">1</a></sup></p>\n' '</div>\n' '</div>\n' '<div class="footnote">\n' '<hr />\n' '<ol>\n' '<li id="fn:1">\n' '<p>The footnote.&#160;' '<a class="footnote-backref" href="#fnref:1" title="Jump back to footnote 1 in the text">&#8617;</a>' '</p>\n' '</li>\n' '</ol>\n' '</div>', extensions=['md_in_html', 'footnotes'] ) def test_md1_code_void_tag(self): # https://github.com/Python-Markdown/markdown/issues/1075 self.assertMarkdownRenders( self.dedent( """ <div class="outer" markdown="1"> Code: `<label><input/></label>` </div> <div class="outer" markdown="1"> HTML: <label><input/></label> </div> """ ), '<div class="outer">\n' '<p>Code: <code>&lt;label&gt;&lt;input/&gt;&lt;/label&gt;</code></p>\n' '</div>\n' '<div class="outer">\n' '<p>HTML: <label><input/></label></p>\n' '</div>', extensions=['md_in_html'] ) def test_md1_code_void_tag_multiline(self): # https://github.com/Python-Markdown/markdown/issues/1075 self.assertMarkdownRenders( self.dedent( """ <div class="outer" markdown="1"> Code: ` <label> <input/> </label> ` </div> <div class="outer" markdown="1"> HTML: <label> <input/> </label> </div> """ ), '<div class="outer">\n' '<p>Code: <code>&lt;label&gt;\n' '&lt;input/&gt;\n' '&lt;/label&gt;</code></p>\n' '</div>\n' '<div class="outer">\n' '<p>HTML:\n' '<label>\n' '<input/>\n' '</label></p>\n' '</div>', extensions=['md_in_html'] ) def test_md1_oneliner_block(self): # https://github.com/Python-Markdown/markdown/issues/1074 self.assertMarkdownRenders( self.dedent( '<div class="outer" markdown="block"><div class="inner" markdown="block">*foo*</div></div>' ), '<div class="outer">\n' '<div class="inner">\n' '<p><em>foo</em></p>\n' '</div>\n' '</div>', extensions=['md_in_html'] ) def test_md1_oneliner_block_mixed(self): # https://github.com/Python-Markdown/markdown/issues/1074 self.assertMarkdownRenders( self.dedent( """ <div class="a" markdown="block"><div class="b" markdown="block"> <div class="c" markdown="block"><div class="d" markdown="block"> *foo* </div></div> </div></div> """ ), '<div class="a">\n' '<div class="b">\n' '<div class="c">\n' '<div class="d">\n' '<p><em>foo</em></p>\n' '</div>\n' '</div>\n' '</div>\n' '</div>', extensions=['md_in_html'] ) def test_md1_oneliner_block_tail(self): # https://github.com/Python-Markdown/markdown/issues/1074 self.assertMarkdownRenders( self.dedent( """ <div class="a" markdown="block"><div class="b" markdown="block"> **foo** </div><div class="c" markdown="block"><div class="d" markdown="block"> *bar* </div></div></div> """ ), '<div class="a">\n' '<div class="b">\n' '<p><strong>foo</strong></p>\n' '</div>\n' '<div class="c">\n' '<div class="d">\n' '<p><em>bar</em></p>\n' '</div>\n' '</div>\n' '</div>', extensions=['md_in_html'] ) def test_md1_oneliner_block_complex_start_tail(self): # https://github.com/Python-Markdown/markdown/issues/1074 self.assertMarkdownRenders( '<div class="a" markdown><div class="b" markdown>**foo**</div>' '<div class="c" markdown>*bar*</div><div class="d">*not md*</div></div>', '<div class="a">\n' '<div class="b">\n' '<p><strong>foo</strong></p>\n' '</div>\n' '<div class="c">\n' '<p><em>bar</em></p>\n' '</div>\n' '<div class="d">*not md*</div>\n' '</div>', extensions=['md_in_html'] ) def test_md1_oneliner_block_complex_fail(self): # https://github.com/Python-Markdown/markdown/issues/1074 # Nested will fail because an inline tag is only considered at the beginning if it is not preceded by text. self.assertMarkdownRenders( '<div class="a" markdown>**strong**<div class="b" markdown>**strong**</div></div>', '<div class="a">\n' '<p><strong>strong</strong><div class="b" markdown><strong>strong</strong></p>\n' '</div>\n' '</div>', extensions=['md_in_html'] ) def test_md1_oneliner_block_start(self): # https://github.com/Python-Markdown/markdown/issues/1074 self.assertMarkdownRenders( self.dedent( """ <div class="outer" markdown="block"><div class="inner" markdown="block"> *foo* </div></div> """ ), '<div class="outer">\n' '<div class="inner">\n' '<p><em>foo</em></p>\n' '</div>\n' '</div>', extensions=['md_in_html'] ) def test_md1_oneliner_block_span(self): # https://github.com/Python-Markdown/markdown/issues/1074 self.assertMarkdownRenders( self.dedent( '<div class="outer" markdown="block"><div class="inner" markdown="span">*foo*</div></div>' ), '<div class="outer">\n' '<div class="inner"><em>foo</em></div>\n' '</div>', extensions=['md_in_html'] ) def test_md1_oneliner_block_span_start(self): # https://github.com/Python-Markdown/markdown/issues/1074 self.assertMarkdownRenders( self.dedent( """ <div class="outer" markdown="block"><div class="inner" markdown="span"> *foo* </div></div> """ ), '<div class="outer">\n' '<div class="inner">\n' '<em>foo</em>\n' '</div>\n' '</div>', extensions=['md_in_html'] ) def test_md1_oneliner_span_block_start(self): # https://github.com/Python-Markdown/markdown/issues/1074 self.assertMarkdownRenders( self.dedent( """ <div class="outer" markdown="span"><div class="inner" markdown="block"> *foo* </div> *foo* </div> """ ), '<div class="outer">\n' '<div class="inner">\n' '<em>foo</em>\n' '</div>\n\n' '<em>foo</em></div>', extensions=['md_in_html'] ) def test_md1_code_comment(self): self.assertMarkdownRenders( self.dedent( """ <div class="outer" markdown="1"> Code: `<label><!-- **comment** --></label>` </div> <div class="outer" markdown="1"> HTML: <label><!-- **comment** --></label> </div> """ ), '<div class="outer">\n' '<p>Code: <code>&lt;label&gt;&lt;!-- **comment** --&gt;&lt;/label&gt;</code></p>\n' '</div>\n' '<div class="outer">\n' '<p>HTML: <label><!-- **comment** --></label></p>\n' '</div>', extensions=['md_in_html'] ) def test_md1_code_pi(self): self.assertMarkdownRenders( self.dedent( """ <div class="outer" markdown="1"> Code: `<label><?php # echo '**simple**';?></label>` </div> <div class="outer" markdown="1"> HTML: <label><?php # echo '**simple**';?></label> </div> """ ), '<div class="outer">\n' '<p>Code: <code>&lt;label&gt;&lt;?php # echo \'**simple**\';?&gt;&lt;/label&gt;</code></p>\n' '</div>\n' '<div class="outer">\n' '<p>HTML: <label><?php # echo \'**simple**\';?></label></p>\n' '</div>', extensions=['md_in_html'] ) def test_md1_code_cdata(self): self.assertMarkdownRenders( self.dedent( """ <div class="outer" markdown="1"> Code: `<label><![CDATA[some stuff]]></label>` </div> <div class="outer" markdown="1"> HTML: <label><![CDATA[some stuff]]></label> </div> """ ), '<div class="outer">\n' '<p>Code: <code>&lt;label&gt;&lt;![CDATA[some stuff]]&gt;&lt;/label&gt;</code></p>\n' '</div>\n' '<div class="outer">\n' '<p>HTML: <label><![CDATA[some stuff]]></label></p>\n' '</div>', extensions=['md_in_html'] ) def test_trailing_content_after_tag_in_md_block(self): # It should be noted that this is not the way `md_in_html` is intended to be used. # What we are specifically testing is an edge case where content was previously lost. # Lost content should not happen. self.assertMarkdownRenders( self.dedent( """ <div markdown> <div class="circle"></div>AAAAA<div class="circle"></div> </div> """ ), '<div>\n' '<div class="circle"></div>\n' '<p>AAAAA<div class="circle"></p>\n' '</div>\n' '</div>', extensions=['md_in_html'] ) def test_noname_tag(self): self.assertMarkdownRenders( self.dedent( """ <div markdown> </> </div> """ ), self.dedent( """ <div> <p>&lt;/&gt;</p> </div> """ ) ) def load_tests(loader, tests, pattern): """ Ensure `TestHTMLBlocks` doesn't get run twice by excluding it here. """ suite = TestSuite() for test_class in [TestDefaultwMdInHTML, TestMdInHTML, TestMarkdownInHTMLPostProcessor]: tests = loader.loadTestsFromTestCase(test_class) suite.addTests(tests) return suite
TestMdInHTML
python
coleifer__peewee
peewee.py
{ "start": 66888, "end": 68977 }
class ____(_HashableSource, Source, SelectQuery): def _get_hash(self): return hash((self.__class__, self._alias or id(self))) def _execute(self, database): if self._cursor_wrapper is None: cursor = database.execute(self) self._cursor_wrapper = self._get_cursor_wrapper(cursor) return self._cursor_wrapper @database_required def peek(self, database, n=1): rows = self.execute(database)[:n] if rows: return rows[0] if n == 1 else rows @database_required def first(self, database, n=1): if self._limit != n: self._limit = n self._cursor_wrapper = None return self.peek(database, n=n) @database_required def scalar(self, database, as_tuple=False, as_dict=False): if as_dict: return self.dicts().peek(database) row = self.tuples().peek(database) return row[0] if row and not as_tuple else row @database_required def scalars(self, database): for row in self.tuples().execute(database): yield row[0] @database_required def count(self, database, clear_limit=False): clone = self.order_by().alias('_wrapped') if clear_limit: clone._limit = clone._offset = None try: if clone._having is None and clone._group_by is None and \ clone._windows is None and clone._distinct is None and \ clone._simple_distinct is not True: clone = clone.select(SQL('1')) except AttributeError: pass return Select([clone], [fn.COUNT(SQL('1'))]).scalar(database) @database_required def exists(self, database): clone = self.columns(SQL('1')) clone._limit = 1 clone._offset = None return bool(clone.scalar()) @database_required def get(self, database): self._cursor_wrapper = None try: return self.execute(database)[0] except IndexError: pass # QUERY IMPLEMENTATIONS.
SelectBase
python
xlwings__xlwings
xlwings/constants.py
{ "start": 71617, "end": 71737 }
class ____: xlOutline = 1 # from enum XlLayoutFormType xlTabular = 0 # from enum XlLayoutFormType
LayoutFormType
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 589934, "end": 591114 }
class ____(sgqlc.types.Interface): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("repository_discussion_comments",) repository_discussion_comments = sgqlc.types.Field( sgqlc.types.non_null(DiscussionCommentConnection), graphql_name="repositoryDiscussionComments", args=sgqlc.types.ArgDict( ( ("after", sgqlc.types.Arg(String, graphql_name="after", default=None)), ( "before", sgqlc.types.Arg(String, graphql_name="before", default=None), ), ("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)), ("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)), ( "repository_id", sgqlc.types.Arg(ID, graphql_name="repositoryId", default=None), ), ( "only_answers", sgqlc.types.Arg(Boolean, graphql_name="onlyAnswers", default=False), ), ) ), )
RepositoryDiscussionCommentAuthor
python
django__django
tests/syndication_tests/feeds.py
{ "start": 2609, "end": 2732 }
class ____(TestRss2Feed): def item_guid_is_permalink(self, item): return True
TestRss2FeedWithGuidIsPermaLinkTrue
python
streamlit__streamlit
lib/tests/streamlit/runtime/state/query_params_test.py
{ "start": 19079, "end": 21165 }
class ____: """Tests for _set_item_in_dict helper function.""" @parameterized.expand( [ ("string_value", "bar", {"foo": "bar"}), ("int_to_string", 123, {"foo": "123"}), ("float_to_string", 1.5, {"foo": "1.5"}), ("list_of_strings", ["a", "b", "c"], {"foo": ["a", "b", "c"]}), ("list_of_ints_to_strings", [1, 2, 3], {"foo": ["1", "2", "3"]}), ] ) def test_sets_value(self, _name: str, value: str | list, expected: dict) -> None: """Test _set_item_in_dict sets and converts values correctly.""" target: dict[str, list[str] | str] = {} _set_item_in_dict(target, "foo", value) # type: ignore[arg-type] assert target == expected @parameterized.expand( [ ("dict_value", "foo", {"bar": "baz"}, "cannot be set to a dictionary"), ("embed_key", "embed", "true", "embed.*cannot be set"), ( "embed_options_key", "embed_options", "show_toolbar", "embed.*cannot be set", ), # Case-insensitive embed key checks ("embed_key_uppercase", "EMBED", "true", "embed.*cannot be set"), ("embed_key_mixed_case", "Embed", "true", "embed.*cannot be set"), ( "embed_options_key_uppercase", "EMBED_OPTIONS", "show_toolbar", "embed.*cannot be set", ), ( "embed_options_key_mixed_case", "Embed_Options", "show_toolbar", "embed.*cannot be set", ), ] ) def test_raises_on_invalid_input( self, _name: str, key: str, value: str | dict, match: str ) -> None: """Test _set_item_in_dict raises exception on invalid input.""" target: dict[str, list[str] | str] = {} with pytest.raises(StreamlitAPIException, match=match): _set_item_in_dict(target, key, value) # type: ignore[arg-type]
TestSetItemInDict
python
jpadilla__pyjwt
tests/test_algorithms.py
{ "start": 944, "end": 25616 }
class ____: def test_check_crypto_key_type_should_fail_when_not_using_crypto(self): """If has_crypto is False, or if _crypto_key_types is None, then this method should throw.""" algo = NoneAlgorithm() with pytest.raises(ValueError): algo.check_crypto_key_type("key") # type: ignore[arg-type] def test_none_algorithm_should_throw_exception_if_key_is_not_none(self): algo = NoneAlgorithm() with pytest.raises(InvalidKeyError): algo.prepare_key("123") def test_none_algorithm_should_throw_exception_on_to_jwk(self): algo = NoneAlgorithm() with pytest.raises(NotImplementedError): algo.to_jwk("dummy") # Using a dummy argument as is it not relevant def test_none_algorithm_should_throw_exception_on_from_jwk(self): algo = NoneAlgorithm() with pytest.raises(NotImplementedError): algo.from_jwk({}) # Using a dummy argument as is it not relevant def test_hmac_should_reject_nonstring_key(self): algo = HMACAlgorithm(HMACAlgorithm.SHA256) with pytest.raises(TypeError) as context: algo.prepare_key(object()) # type: ignore[arg-type] exception = context.value assert str(exception) == "Expected a string value" def test_hmac_should_accept_unicode_key(self): algo = HMACAlgorithm(HMACAlgorithm.SHA256) algo.prepare_key("awesome") @pytest.mark.parametrize( "key", [ "testkey2_rsa.pub.pem", "testkey2_rsa.pub.pem", "testkey_pkcs1.pub.pem", "testkey_rsa.cer", "testkey_rsa.pub", ], ) def test_hmac_should_throw_exception(self, key): algo = HMACAlgorithm(HMACAlgorithm.SHA256) with pytest.raises(InvalidKeyError): with open(key_path(key)) as keyfile: algo.prepare_key(keyfile.read()) def test_hmac_jwk_should_parse_and_verify(self): algo = HMACAlgorithm(HMACAlgorithm.SHA256) with open(key_path("jwk_hmac.json")) as keyfile: key = algo.from_jwk(keyfile.read()) signature = algo.sign(b"Hello World!", key) assert algo.verify(b"Hello World!", key, signature) @pytest.mark.parametrize("as_dict", (False, True)) def test_hmac_to_jwk_returns_correct_values(self, as_dict): algo = HMACAlgorithm(HMACAlgorithm.SHA256) key: Any = algo.to_jwk("secret", as_dict=as_dict) if not as_dict: key = json.loads(key) assert key == {"kty": "oct", "k": "c2VjcmV0"} def test_hmac_from_jwk_should_raise_exception_if_not_hmac_key(self): algo = HMACAlgorithm(HMACAlgorithm.SHA256) with open(key_path("jwk_rsa_pub.json")) as keyfile: with pytest.raises(InvalidKeyError): algo.from_jwk(keyfile.read()) def test_hmac_from_jwk_should_raise_exception_if_empty_json(self): algo = HMACAlgorithm(HMACAlgorithm.SHA256) with open(key_path("jwk_empty.json")) as keyfile: with pytest.raises(InvalidKeyError): algo.from_jwk(keyfile.read()) @crypto_required def test_rsa_should_parse_pem_public_key(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with open(key_path("testkey2_rsa.pub.pem")) as pem_key: algo.prepare_key(pem_key.read()) @crypto_required def test_rsa_should_accept_pem_private_key_bytes(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with open(key_path("testkey_rsa.priv"), "rb") as pem_key: algo.prepare_key(pem_key.read()) @crypto_required def test_rsa_should_accept_unicode_key(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with open(key_path("testkey_rsa.priv")) as rsa_key: algo.prepare_key(rsa_key.read()) @crypto_required def test_rsa_should_reject_non_string_key(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with pytest.raises(TypeError): algo.prepare_key(None) # type: ignore[arg-type] @crypto_required def test_rsa_verify_should_return_false_if_signature_invalid(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) message = b"Hello World!" sig = base64.b64decode( b"yS6zk9DBkuGTtcBzLUzSpo9gGJxJFOGvUqN01iLhWHrzBQ9ZEz3+Ae38AXp" b"10RWwscp42ySC85Z6zoN67yGkLNWnfmCZSEv+xqELGEvBJvciOKsrhiObUl" b"2mveSc1oeO/2ujkGDkkkJ2epn0YliacVjZF5+/uDmImUfAAj8lzjnHlzYix" b"sn5jGz1H07jYYbi9diixN8IUhXeTafwFg02IcONhum29V40Wu6O5tAKWlJX" b"fHJnNUzAEUOXS0WahHVb57D30pcgIji9z923q90p5c7E2cU8V+E1qe8NdCA" b"APCDzZZ9zQ/dgcMVaBrGrgimrcLbPjueOKFgSO+SSjIElKA==" ) sig += b"123" # Signature is now invalid with open(key_path("testkey_rsa.pub")) as keyfile: pub_key = cast(RSAPublicKey, algo.prepare_key(keyfile.read())) result = algo.verify(message, pub_key, sig) assert not result @crypto_required def test_ec_jwk_public_and_private_keys_should_parse_and_verify(self): tests = { "P-256": ECAlgorithm.SHA256, "P-384": ECAlgorithm.SHA384, "P-521": ECAlgorithm.SHA512, "secp256k1": ECAlgorithm.SHA256, } for curve, hash in tests.items(): algo = ECAlgorithm(hash) with open(key_path(f"jwk_ec_pub_{curve}.json")) as keyfile: pub_key = cast(EllipticCurvePublicKey, algo.from_jwk(keyfile.read())) with open(key_path(f"jwk_ec_key_{curve}.json")) as keyfile: priv_key = cast(EllipticCurvePrivateKey, algo.from_jwk(keyfile.read())) signature = algo.sign(b"Hello World!", priv_key) assert algo.verify(b"Hello World!", pub_key, signature) @crypto_required def test_ec_jwk_fails_on_invalid_json(self): algo = ECAlgorithm(ECAlgorithm.SHA512) valid_points = { "P-256": { "x": "PTTjIY84aLtaZCxLTrG_d8I0G6YKCV7lg8M4xkKfwQ4", "y": "ank6KA34vv24HZLXlChVs85NEGlpg2sbqNmR_BcgyJU", }, "P-384": { "x": "IDC-5s6FERlbC4Nc_4JhKW8sd51AhixtMdNUtPxhRFP323QY6cwWeIA3leyZhz-J", "y": "eovmN9ocANS8IJxDAGSuC1FehTq5ZFLJU7XSPg36zHpv4H2byKGEcCBiwT4sFJsy", }, "P-521": { "x": "AHKZLLOsCOzz5cY97ewNUajB957y-C-U88c3v13nmGZx6sYl_oJXu9A5RkTKqjqvjyekWF-7ytDyRXYgCF5cj0Kt", "y": "AdymlHvOiLxXkEhayXQnNCvDX4h9htZaCJN34kfmC6pV5OhQHiraVySsUdaQkAgDPrwQrJmbnX9cwlGfP-HqHZR1", }, "secp256k1": { "x": "MLnVyPDPQpNm0KaaO4iEh0i8JItHXJE0NcIe8GK1SYs", "y": "7r8d-xF7QAgT5kSRdly6M8xeg4Jz83Gs_CQPQRH65QI", }, } # Invalid JSON with pytest.raises(InvalidKeyError): algo.from_jwk("<this isn't json>") # Bad key type with pytest.raises(InvalidKeyError): algo.from_jwk('{"kty": "RSA"}') # Missing data with pytest.raises(InvalidKeyError): algo.from_jwk('{"kty": "EC"}') with pytest.raises(InvalidKeyError): algo.from_jwk('{"kty": "EC", "x": "1"}') with pytest.raises(InvalidKeyError): algo.from_jwk('{"kty": "EC", "y": "1"}') # Missing curve with pytest.raises(InvalidKeyError): algo.from_jwk('{"kty": "EC", "x": "dGVzdA==", "y": "dGVzdA=="}') # EC coordinates not equally long with pytest.raises(InvalidKeyError): algo.from_jwk('{"kty": "EC", "x": "dGVzdHRlc3Q=", "y": "dGVzdA=="}') # EC coordinates length invalid for curve in ("P-256", "P-384", "P-521", "secp256k1"): with pytest.raises(InvalidKeyError): algo.from_jwk( f'{{"kty": "EC", "crv": "{curve}", "x": "dGVzdA==", "y": "dGVzdA=="}}' ) # EC private key length invalid for curve, point in valid_points.items(): with pytest.raises(InvalidKeyError): algo.from_jwk( f'{{"kty": "EC", "crv": "{curve}", "x": "{point["x"]}", "y": "{point["y"]}", "d": "dGVzdA=="}}' ) @crypto_required def test_ec_private_key_to_jwk_works_with_from_jwk(self): algo = ECAlgorithm(ECAlgorithm.SHA256) with open(key_path("testkey_ec.priv")) as ec_key: orig_key = cast(EllipticCurvePrivateKey, algo.prepare_key(ec_key.read())) parsed_key = cast(EllipticCurvePrivateKey, algo.from_jwk(algo.to_jwk(orig_key))) assert parsed_key.private_numbers() == orig_key.private_numbers() assert ( parsed_key.private_numbers().public_numbers == orig_key.private_numbers().public_numbers ) @crypto_required def test_ec_public_key_to_jwk_works_with_from_jwk(self): algo = ECAlgorithm(ECAlgorithm.SHA256) with open(key_path("testkey_ec.pub")) as ec_key: orig_key = cast(EllipticCurvePublicKey, algo.prepare_key(ec_key.read())) parsed_key = cast(EllipticCurvePublicKey, algo.from_jwk(algo.to_jwk(orig_key))) assert parsed_key.public_numbers() == orig_key.public_numbers() @crypto_required @pytest.mark.parametrize("as_dict", (False, True)) def test_ec_to_jwk_returns_correct_values_for_public_key(self, as_dict): algo = ECAlgorithm(ECAlgorithm.SHA256) with open(key_path("testkey_ec.pub")) as keyfile: pub_key = algo.prepare_key(keyfile.read()) key: Any = algo.to_jwk(pub_key, as_dict=as_dict) if not as_dict: key = json.loads(key) expected = { "kty": "EC", "crv": "P-256", "x": "HzAcUWSlGBHcuf3y3RiNrWI-pE6-dD2T7fIzg9t6wEc", "y": "t2G02kbWiOqimYfQAfnARdp2CTycsJPhwA8rn1Cn0SQ", } assert key == expected @crypto_required @pytest.mark.parametrize("as_dict", (False, True)) def test_ec_to_jwk_returns_correct_values_for_private_key(self, as_dict): algo = ECAlgorithm(ECAlgorithm.SHA256) with open(key_path("testkey_ec.priv")) as keyfile: priv_key = algo.prepare_key(keyfile.read()) key: Any = algo.to_jwk(priv_key, as_dict=as_dict) if not as_dict: key = json.loads(key) expected = { "kty": "EC", "crv": "P-256", "x": "HzAcUWSlGBHcuf3y3RiNrWI-pE6-dD2T7fIzg9t6wEc", "y": "t2G02kbWiOqimYfQAfnARdp2CTycsJPhwA8rn1Cn0SQ", "d": "2nninfu2jMHDwAbn9oERUhRADS6duQaJEadybLaa0YQ", } assert key == expected @crypto_required def test_ec_to_jwk_raises_exception_on_invalid_key(self): algo = ECAlgorithm(ECAlgorithm.SHA256) with pytest.raises(InvalidKeyError): algo.to_jwk({"not": "a valid key"}) # type: ignore[call-overload] @crypto_required @pytest.mark.parametrize("as_dict", (False, True)) def test_ec_to_jwk_with_valid_curves(self, as_dict): tests = { "P-256": ECAlgorithm.SHA256, "P-384": ECAlgorithm.SHA384, "P-521": ECAlgorithm.SHA512, "secp256k1": ECAlgorithm.SHA256, } for curve, hash in tests.items(): algo = ECAlgorithm(hash) with open(key_path(f"jwk_ec_pub_{curve}.json")) as keyfile: pub_key = algo.from_jwk(keyfile.read()) jwk: Any = algo.to_jwk(pub_key, as_dict=as_dict) if not as_dict: jwk = json.loads(jwk) assert jwk["crv"] == curve with open(key_path(f"jwk_ec_key_{curve}.json")) as keyfile: priv_key = algo.from_jwk(keyfile.read()) jwk = algo.to_jwk(priv_key, as_dict=as_dict) if not as_dict: jwk = json.loads(jwk) assert jwk["crv"] == curve @crypto_required def test_ec_to_jwk_with_invalid_curve(self): algo = ECAlgorithm(ECAlgorithm.SHA256) with open(key_path("testkey_ec_secp192r1.priv")) as keyfile: priv_key = algo.prepare_key(keyfile.read()) with pytest.raises(InvalidKeyError): algo.to_jwk(priv_key) @crypto_required def test_rsa_jwk_public_and_private_keys_should_parse_and_verify(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with open(key_path("jwk_rsa_pub.json")) as keyfile: pub_key = cast(RSAPublicKey, algo.from_jwk(keyfile.read())) with open(key_path("jwk_rsa_key.json")) as keyfile: priv_key = cast(RSAPrivateKey, algo.from_jwk(keyfile.read())) signature = algo.sign(b"Hello World!", priv_key) assert algo.verify(b"Hello World!", pub_key, signature) @crypto_required def test_rsa_private_key_to_jwk_works_with_from_jwk(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with open(key_path("testkey_rsa.priv")) as rsa_key: orig_key = cast(RSAPrivateKey, algo.prepare_key(rsa_key.read())) parsed_key = cast(RSAPrivateKey, algo.from_jwk(algo.to_jwk(orig_key))) assert parsed_key.private_numbers() == orig_key.private_numbers() assert ( parsed_key.private_numbers().public_numbers == orig_key.private_numbers().public_numbers ) @crypto_required def test_rsa_public_key_to_jwk_works_with_from_jwk(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with open(key_path("testkey_rsa.pub")) as rsa_key: orig_key = cast(RSAPublicKey, algo.prepare_key(rsa_key.read())) parsed_key = cast(RSAPublicKey, algo.from_jwk(algo.to_jwk(orig_key))) assert parsed_key.public_numbers() == orig_key.public_numbers() @crypto_required def test_rsa_jwk_private_key_with_other_primes_is_invalid(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with open(key_path("jwk_rsa_key.json")) as keyfile: with pytest.raises(InvalidKeyError): keydata = json.loads(keyfile.read()) keydata["oth"] = [] algo.from_jwk(json.dumps(keydata)) @crypto_required def test_rsa_jwk_private_key_with_missing_values_is_invalid(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with open(key_path("jwk_rsa_key.json")) as keyfile: with pytest.raises(InvalidKeyError): keydata = json.loads(keyfile.read()) del keydata["p"] algo.from_jwk(json.dumps(keydata)) @crypto_required def test_rsa_jwk_private_key_can_recover_prime_factors(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with open(key_path("jwk_rsa_key.json")) as keyfile: keybytes = keyfile.read() control_key = cast(RSAPrivateKey, algo.from_jwk(keybytes)).private_numbers() keydata = json.loads(keybytes) delete_these = ["p", "q", "dp", "dq", "qi"] for field in delete_these: del keydata[field] parsed_key = cast( RSAPrivateKey, algo.from_jwk(json.dumps(keydata)) ).private_numbers() assert control_key.d == parsed_key.d assert control_key.p == parsed_key.p assert control_key.q == parsed_key.q assert control_key.dmp1 == parsed_key.dmp1 assert control_key.dmq1 == parsed_key.dmq1 assert control_key.iqmp == parsed_key.iqmp @crypto_required def test_rsa_jwk_private_key_with_missing_required_values_is_invalid(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with open(key_path("jwk_rsa_key.json")) as keyfile: with pytest.raises(InvalidKeyError): keydata = json.loads(keyfile.read()) del keydata["p"] algo.from_jwk(json.dumps(keydata)) @crypto_required def test_rsa_jwk_raises_exception_if_not_a_valid_key(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) # Invalid JSON with pytest.raises(InvalidKeyError): algo.from_jwk("{not-a-real-key") # Missing key parts with pytest.raises(InvalidKeyError): algo.from_jwk('{"kty": "RSA"}') @crypto_required @pytest.mark.parametrize("as_dict", (False, True)) def test_rsa_to_jwk_returns_correct_values_for_public_key(self, as_dict): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with open(key_path("testkey_rsa.pub")) as keyfile: pub_key = algo.prepare_key(keyfile.read()) key: Any = algo.to_jwk(pub_key, as_dict=as_dict) if not as_dict: key = json.loads(key) expected = { "e": "AQAB", "key_ops": ["verify"], "kty": "RSA", "n": ( "1HgzBfJv2cOjQryCwe8NEelriOTNFWKZUivevUrRhlqcmZJdCvuCJRr-xCN-" "OmO8qwgJJR98feNujxVg-J9Ls3_UOA4HcF9nYH6aqVXELAE8Hk_ALvxi96ms" "1DDuAvQGaYZ-lANxlvxeQFOZSbjkz_9mh8aLeGKwqJLp3p-OhUBQpwvAUAPg" "82-OUtgTW3nSljjeFr14B8qAneGSc_wl0ni--1SRZUXFSovzcqQOkla3W27r" "rLfrD6LXgj_TsDs4vD1PnIm1zcVenKT7TfYI17bsG_O_Wecwz2Nl19pL7gDo" "sNruF3ogJWNq1Lyn_ijPQnkPLpZHyhvuiycYcI3DiQ" ), } assert key == expected @crypto_required @pytest.mark.parametrize("as_dict", (False, True)) def test_rsa_to_jwk_returns_correct_values_for_private_key(self, as_dict): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with open(key_path("testkey_rsa.priv")) as keyfile: priv_key = algo.prepare_key(keyfile.read()) key: Any = algo.to_jwk(priv_key, as_dict=as_dict) if not as_dict: key = json.loads(key) expected = { "key_ops": ["sign"], "kty": "RSA", "e": "AQAB", "n": ( "1HgzBfJv2cOjQryCwe8NEelriOTNFWKZUivevUrRhlqcmZJdCvuCJRr-xCN-" "OmO8qwgJJR98feNujxVg-J9Ls3_UOA4HcF9nYH6aqVXELAE8Hk_ALvxi96ms" "1DDuAvQGaYZ-lANxlvxeQFOZSbjkz_9mh8aLeGKwqJLp3p-OhUBQpwvAUAPg" "82-OUtgTW3nSljjeFr14B8qAneGSc_wl0ni--1SRZUXFSovzcqQOkla3W27r" "rLfrD6LXgj_TsDs4vD1PnIm1zcVenKT7TfYI17bsG_O_Wecwz2Nl19pL7gDo" "sNruF3ogJWNq1Lyn_ijPQnkPLpZHyhvuiycYcI3DiQ" ), "d": ( "rfbs8AWdB1RkLJRlC51LukrAvYl5UfU1TE6XRa4o-DTg2-03OXLNEMyVpMr" "a47weEnu14StypzC8qXL7vxXOyd30SSFTffLfleaTg-qxgMZSDw-Fb_M-pU" "HMPMEDYG-lgGma4l4fd1yTX2ATtoUo9BVOQgWS1LMZqi0ASEOkUfzlBgL04" "UoaLhPSuDdLygdlDzgruVPnec0t1uOEObmrcWIkhwU2CGQzeLtuzX6OVgPh" "k7xcnjbDurTTVpWH0R0gbZ5ukmQ2P-YuCX8T9iWNMGjPNSkb7h02s2Oe9ZR" "zP007xQ0VF-Z7xyLuxk6ASmoX1S39ujSbk2WF0eXNPRgFwQ" ), "q": ( "47hlW2f1ARuWYJf9Dl6MieXjdj2dGx9PL2UH0unVzJYInd56nqXNPrQrc5k" "ZU65KApC9n9oKUwIxuqwAAbh8oGNEQDqnuTj-powCkdC6bwA8KH1Y-wotpq" "_GSjxkNzjWRm2GArJSzZc6Fb8EuObOrAavKJ285-zMPCEfus1WZG0" ), "p": ( "7tr0z929Lp4OHIRJjIKM_rDrWMPtRgnV-51pgWsN6qdpDzns_PgFwrHcoyY" "sWIO-4yCdVWPxFOgEZ8xXTM_uwOe4VEmdZhw55Tx7axYZtmZYZbO_RIP4CG" "mlJlOFTiYnxpr-2Cx6kIeQmd-hf7fA3tL018aEzwYMbFMcnAGnEg0" ), "qi": ( "djo95mB0LVYikNPa-NgyDwLotLqrueb9IviMmn6zKHCwiOXReqXDX9slB8" "RA15uv56bmN04O__NyVFcgJ2ef169GZHiRFIgIy0Pl8LYkMhCYKKhyqM7g" "xN-SqGqDTKDC22j00S7jcvCaa1qadn1qbdfukZ4NXv7E2d_LO0Y2Kkc" ), "dp": ( "tgZ2-tJpEdWxu1m1EzeKa644LHVjpTRptk7H0LDc8i6SieADEuWQvkb9df" "fpY6tDFaQNQr3fQ6dtdAztmsP7l1b_ynwvT1nDZUcqZvl4ruBgDWFmKbjI" "lOCt0v9jX6MEPP5xqBx9axdkw18BnGtUuHrbzHSlUX-yh_rumpVH1SE" ), "dq": ( "xxCIuhD0YlWFbUcwFgGdBWcLIm_WCMGj7SB6aGu1VDTLr4Wu10TFWM0TNu" "hc9YPker2gpj5qzAmdAzwcfWSSvXpJTYR43jfulBTMoj8-2o3wCM0anclW" "AuKhin-kc4mh9ssDXRQZwlMymZP0QtaxUDw_nlfVrUCZgO7L1_ZsUTk" ), } assert key == expected @crypto_required def test_rsa_to_jwk_raises_exception_on_invalid_key(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with pytest.raises(InvalidKeyError): algo.to_jwk({"not": "a valid key"}) # type: ignore[call-overload] @crypto_required def test_rsa_from_jwk_raises_exception_on_invalid_key(self): algo = RSAAlgorithm(RSAAlgorithm.SHA256) with open(key_path("jwk_hmac.json")) as keyfile: with pytest.raises(InvalidKeyError): algo.from_jwk(keyfile.read()) @crypto_required def test_ec_should_reject_non_string_key(self): algo = ECAlgorithm(ECAlgorithm.SHA256) with pytest.raises(TypeError): algo.prepare_key(None) # type: ignore[arg-type] @crypto_required def test_ec_should_accept_pem_private_key_bytes(self): algo = ECAlgorithm(ECAlgorithm.SHA256) with open(key_path("testkey_ec.priv"), "rb") as ec_key: algo.prepare_key(ec_key.read()) @crypto_required def test_ec_should_accept_ssh_public_key_bytes(self): algo = ECAlgorithm(ECAlgorithm.SHA256) with open(key_path("testkey_ec_ssh.pub")) as ec_key: algo.prepare_key(ec_key.read()) @crypto_required def test_ec_verify_should_return_false_if_signature_invalid(self): algo = ECAlgorithm(ECAlgorithm.SHA256) message = b"Hello World!" # Mess up the signature by replacing a known byte sig = base64.b64decode( b"AC+m4Jf/xI3guAC6w0w37t5zRpSCF6F4udEz5LiMiTIjCS4vcVe6dDOxK+M" b"mvkF8PxJuvqxP2CO3TR3okDPCl/NjATTO1jE+qBZ966CRQSSzcCM+tzcHzw" b"LZS5kbvKu0Acd/K6Ol2/W3B1NeV5F/gjvZn/jOwaLgWEUYsg0o4XVrAg65".replace( b"r", b"s" ) ) with open(key_path("testkey_ec.pub")) as keyfile: pub_key = algo.prepare_key(keyfile.read()) result = algo.verify(message, pub_key, sig) assert not result @crypto_required def test_ec_verify_should_return_false_if_signature_wrong_length(self): algo = ECAlgorithm(ECAlgorithm.SHA256) message = b"Hello World!" sig = base64.b64decode(b"AC+m4Jf/xI3guAC6w0w3") with open(key_path("testkey_ec.pub")) as keyfile: pub_key = algo.prepare_key(keyfile.read()) result = algo.verify(message, pub_key, sig) assert not result @crypto_required def test_ec_should_throw_exception_on_wrong_key(self): algo = ECAlgorithm(ECAlgorithm.SHA256) with pytest.raises(InvalidKeyError): with open(key_path("testkey_rsa.priv")) as keyfile: algo.prepare_key(keyfile.read()) with pytest.raises(InvalidKeyError): with open(key_path("testkey2_rsa.pub.pem")) as pem_key: algo.prepare_key(pem_key.read()) @crypto_required def test_rsa_pss_sign_then_verify_should_return_true(self): algo = RSAPSSAlgorithm(RSAPSSAlgorithm.SHA256) message = b"Hello World!" with open(key_path("testkey_rsa.priv")) as keyfile: priv_key = cast(RSAPrivateKey, algo.prepare_key(keyfile.read())) sig = algo.sign(message, priv_key) with open(key_path("testkey_rsa.pub")) as keyfile: pub_key = cast(RSAPublicKey, algo.prepare_key(keyfile.read())) result = algo.verify(message, pub_key, sig) assert result @crypto_required def test_rsa_pss_verify_should_return_false_if_signature_invalid(self): algo = RSAPSSAlgorithm(RSAPSSAlgorithm.SHA256) jwt_message = b"Hello World!" jwt_sig = base64.b64decode( b"ywKAUGRIDC//6X+tjvZA96yEtMqpOrSppCNfYI7NKyon3P7doud5v65oWNu" b"vQsz0fzPGfF7mQFGo9Cm9Vn0nljm4G6PtqZRbz5fXNQBH9k10gq34AtM02c" b"/cveqACQ8gF3zxWh6qr9jVqIpeMEaEBIkvqG954E0HT9s9ybHShgHX9mlWk" b"186/LopP4xe5c/hxOQjwhv6yDlTiwJFiqjNCvj0GyBKsc4iECLGIIO+4mC4" b"daOCWqbpZDuLb1imKpmm8Nsm56kAxijMLZnpCcnPgyb7CqG+B93W9GHglA5" b"drUeR1gRtO7vqbZMsCAQ4bpjXxwbYyjQlEVuMl73UL6sOWg==" ) jwt_sig += b"123" # Signature is now invalid with open(key_path("testkey_rsa.pub")) as keyfile: jwt_pub_key = cast(RSAPublicKey, algo.prepare_key(keyfile.read())) result = algo.verify(jwt_message, jwt_pub_key, jwt_sig) assert not result
TestAlgorithms
python
google__jax
jax/experimental/pallas/ops/gpu/blackwell_matmul_mgpu.py
{ "start": 984, "end": 1075 }
class ____(enum.IntEnum): M = 0 N = 1 @dataclasses.dataclass(frozen=True)
MatmulDimension
python
pexpect__pexpect
tests/test_performance.py
{ "start": 1184, "end": 4481 }
class ____ (PexpectTestCase.PexpectTestCase): '''Testing the performance of expect, with emphasis on wading through long inputs. ''' if sys.version_info[0] >= 3: @staticmethod def _iter_n(n): s = 'for n in range(1, %d+1): print(n)' % n return s.encode('ascii') else: @staticmethod def _iter_n(n): return 'for n in range(1, %d+1): print(n)' % n def plain_range(self, n): e = pexpect.spawn(sys.executable, timeout=100) self.assertEqual(e.expect(b'>>>'), 0) e.sendline(self._iter_n(n)) self.assertEqual(e.expect(br'\.{3}'), 0) e.sendline(b'') self.assertEqual(e.expect([b'inquisition', '%d' % n]), 1) def window_range(self, n): e = pexpect.spawn(sys.executable, timeout=100) self.assertEqual(e.expect(b'>>>'), 0) e.sendline(self._iter_n(n)) self.assertEqual(e.expect(r'\.{3}'), 0) e.sendline(b'') self.assertEqual(e.expect([b'inquisition', '%d' % n], searchwindowsize=20), 1) def exact_range(self, n): e = pexpect.spawn(sys.executable, timeout=100) self.assertEqual(e.expect_exact([b'>>>']), 0) e.sendline(self._iter_n(n)) self.assertEqual(e.expect_exact([b'...']), 0) e.sendline(b'') self.assertEqual(e.expect_exact([b'inquisition', '%d' % n],timeout=520), 1) def ewin_range(self, n): e = pexpect.spawn(sys.executable, timeout=100) self.assertEqual(e.expect_exact([b'>>>']), 0) e.sendline(self._iter_n(n)) self.assertEqual(e.expect_exact([b'...']), 0) e.sendline(b'') self.assertEqual(e.expect_exact([b'inquisition', '%d' % n], searchwindowsize=20), 1) def faster_range(self, n): e = pexpect.spawn(sys.executable, timeout=100) self.assertEqual(e.expect(b'>>>'), 0) e.sendline(('list(range(1, %d+1))' % n).encode('ascii')) self.assertEqual(e.expect([b'inquisition', '%d' % n]), 1) def test_100000(self): if platform.python_implementation() == 'PyPy': raise unittest.SkipTest("This test fails on PyPy because of REPL differences") print() start_time = time.time() self.plain_range (100000) print("100000 calls to plain_range:", (time.time() - start_time)) start_time = time.time() self.window_range(100000) print("100000 calls to window_range:", (time.time() - start_time)) start_time = time.time() self.exact_range (100000) print("100000 calls to exact_range:", (time.time() - start_time)) start_time = time.time() self.ewin_range (100000) print("100000 calls to ewin_range:", (time.time() - start_time)) start_time = time.time() self.faster_range(100000) print("100000 calls to faster_range:", (time.time() - start_time)) def test_large_stdout_stream(self): e = pexpect.spawn('openssl rand -base64 {}'.format(1024*1024*25), searchwindowsize=1000) resp = e.expect(['Password:', pexpect.EOF, pexpect.TIMEOUT]) assert resp == 1 # index 1 == EOF if __name__ == "__main__": unittest.main() suite = unittest.TestLoader().loadTestsFromTestCase(PerformanceTestCase)
PerformanceTestCase
python
PyCQA__pylint
tests/functional/u/unhashable_member.py
{ "start": 598, "end": 643 }
class ____(dict): ... {FromDict: 1}
FromDict
python
PyCQA__pylint
tests/functional/a/arguments_differ.py
{ "start": 4273, "end": 4360 }
class ____: def meth(self, arg, arg1): raise NotImplementedError
ParentClass
python
tensorflow__tensorflow
tensorflow/python/data/kernel_tests/tf_record_dataset_test.py
{ "start": 7756, "end": 10848 }
class ____(tf_record_test_base.TFRecordTestBase, checkpoint_test_base.CheckpointTestBase, parameterized.TestCase): def make_dataset(self, num_epochs, compression_type=None, buffer_size=None, symbolic_checkpoint=False): filenames = self._createFiles() if compression_type == "ZLIB": zlib_files = [] for i, fn in enumerate(filenames): with open(fn, "rb") as f: cdata = zlib.compress(f.read()) zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i) with open(zfn, "wb") as f: f.write(cdata) zlib_files.append(zfn) filenames = zlib_files elif compression_type == "GZIP": gzip_files = [] for i, fn in enumerate(self._filenames): with open(fn, "rb") as f: gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i) with gzip.GzipFile(gzfn, "wb") as gzf: gzf.write(f.read()) gzip_files.append(gzfn) filenames = gzip_files dataset = readers.TFRecordDataset( filenames, compression_type, buffer_size=buffer_size).repeat(num_epochs) if symbolic_checkpoint: options = options_lib.Options() options.experimental_symbolic_checkpoint = symbolic_checkpoint dataset = dataset.with_options(options) return dataset @combinations.generate( combinations.times( test_base.default_test_combinations(), checkpoint_test_base.default_test_combinations(), combinations.combine(symbolic_checkpoint=[True, False]))) def test(self, verify_fn, symbolic_checkpoint): num_epochs = 5 num_outputs = num_epochs * self._num_files * self._num_records verify_fn( self, lambda: self.make_dataset( num_epochs, symbolic_checkpoint=symbolic_checkpoint ), num_outputs, ) @combinations.generate( combinations.times( test_base.default_test_combinations(), checkpoint_test_base.default_test_combinations(), combinations.combine(buffer_size=[0, 5]))) def testBufferSize(self, verify_fn, buffer_size): num_epochs = 5 num_outputs = num_epochs * self._num_files * self._num_records verify_fn(self, lambda: self.make_dataset(num_epochs, buffer_size=buffer_size), num_outputs) @combinations.generate( combinations.times( test_base.default_test_combinations(), checkpoint_test_base.default_test_combinations(), combinations.combine(compression_type=[None, "GZIP", "ZLIB"]))) def testCompressionTypes(self, verify_fn, compression_type): num_epochs = 5 num_outputs = num_epochs * self._num_files * self._num_records # pylint: disable=g-long-lambda verify_fn( self, lambda: self.make_dataset( num_epochs, compression_type=compression_type), num_outputs) if __name__ == "__main__": test.main()
TFRecordDatasetCheckpointTest
python
python-openxml__python-docx
tests/test_drawing.py
{ "start": 461, "end": 2523 }
class ____: """Unit-test suite for `docx.drawing.Drawing` objects.""" @pytest.mark.parametrize( ("cxml", "expected_value"), [ ("w:drawing/wp:inline/a:graphic/a:graphicData/pic:pic", True), ("w:drawing/wp:anchor/a:graphic/a:graphicData/pic:pic", True), ("w:drawing/wp:inline/a:graphic/a:graphicData/a:grpSp", False), ("w:drawing/wp:anchor/a:graphic/a:graphicData/a:chart", False), ], ) def it_knows_when_it_contains_a_Picture( self, cxml: str, expected_value: bool, document_part_: Mock ): drawing = Drawing(cast(CT_Drawing, element(cxml)), document_part_) assert drawing.has_picture == expected_value def it_provides_access_to_the_image_in_a_Picture_drawing( self, document_part_: Mock, image_part_: Mock, image_: Mock ): image_part_.image = image_ document_part_.part.related_parts = {"rId1": image_part_} cxml = ( "w:drawing/wp:inline/a:graphic/a:graphicData/pic:pic/pic:blipFill/a:blip{r:embed=rId1}" ) drawing = Drawing(cast(CT_Drawing, element(cxml)), document_part_) image = drawing.image assert image is image_ def but_it_raises_when_the_drawing_does_not_contain_a_Picture(self, document_part_: Mock): drawing = Drawing( cast(CT_Drawing, element("w:drawing/wp:inline/a:graphic/a:graphicData/a:grpSp")), document_part_, ) with pytest.raises(ValueError, match="drawing does not contain a picture"): drawing.image # -- fixtures -------------------------------------------------------------------------------- @pytest.fixture def document_part_(self, request: FixtureRequest): return instance_mock(request, DocumentPart) @pytest.fixture def image_(self, request: FixtureRequest): return instance_mock(request, Image) @pytest.fixture def image_part_(self, request: FixtureRequest): return instance_mock(request, ImagePart)
DescribeDrawing
python
kamyu104__LeetCode-Solutions
Python/coin-change-2.py
{ "start": 33, "end": 378 }
class ____(object): def change(self, amount, coins): """ :type amount: int :type coins: List[int] :rtype: int """ dp = [0] * (amount+1) dp[0] = 1 for coin in coins: for i in xrange(coin, amount+1): dp[i] += dp[i-coin] return dp[amount]
Solution
python
huggingface__transformers
tests/models/idefics3/test_modeling_idefics3.py
{ "start": 13708, "end": 21900 }
class ____(GenerationTesterMixin, ModelTesterMixin, unittest.TestCase): """ Model tester for `Idefics3ForConditionalGeneration`. """ all_model_classes = (Idefics3ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = {"image-text-to-text": Idefics3ForConditionalGeneration} if is_torch_available() else () test_resize_embeddings = True def setUp(self): self.model_tester = Idefics3VisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Idefics3Config, has_text_modality=False) @unittest.skip(reason="input_embeds cannot be passed in without input_ids") def test_inputs_embeds(): pass @unittest.skip(reason="Model does not support padding right") def test_flash_attn_2_inference_padding_right(self): pass @pytest.mark.generate @slow @unittest.skip( reason="Idefics3 doesn't support SDPA for all backbones, vision backbones has only eager/FA2 attention" ) def test_eager_matches_sdpa_generate(self): pass @unittest.skip(reason="Compile not yet supported in Idefics3 models end-to-end") @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass # We need to override as we need to prepare such that the image token is the last token def test_resize_tokens_embeddings(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model_vocab_size = config.text_config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2) n_images = self.model_tester.num_images * self.model_tester.seq_length model.model.image_token_id = model_vocab_size - 15 - 1 inputs_dict["input_ids"][:, -n_images:] = model.model.image_token_id model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model_vocab_size = config.text_config.vocab_size model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1) self.assertTrue(model.config.text_config.vocab_size + 10, model_vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) self.assertTrue(model_embed.weight.shape[0], model.config.text_config.vocab_size) self.assertTrue(model.config.text_config.vocab_size, model.vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) # Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size target_dimension = 128 model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0], target_dimension) with self.assertRaisesRegex( ValueError, "Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer", ): model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3) # We need to override as we need to prepare such that the image token is the last token def test_resize_embeddings_untied(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() original_config.tie_word_embeddings = False for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) model.eval() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.text_config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2) n_images = self.model_tester.num_images * self.model_tester.seq_length model.model.image_token_id = model_vocab_size - 15 - 1 inputs_dict["input_ids"][:, -n_images:] = model.model.image_token_id # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) @require_torch
Idefics3ForConditionalGenerationModelTest
python
facebook__pyre-check
source/interprocedural_analyses/taint/test/integration/attributes.py
{ "start": 2539, "end": 3192 }
class ____: def __init__(self): # TODO(T78211867): pyre does not infer List[Any], leading to a False Negative. self.buffer = [] def append(self, row): self.buffer.append(row) def sink(self): _test_sink(self.buffer) def test_no_list_inference(): o = NoListInference() o.append(_test_source()) o.sink() def tito_copy_dict(d: Any): return d.copy() def test_issue_with_tito_copy_dict(): d = {"tainted": _test_source()} copied_d = tito_copy_dict(d) # TODO(T184001071): This should be caught, but it's not. _test_sink(copied_d["tainted"]) @dataclass(frozen=True)
NoListInference
python
pandas-dev__pandas
pandas/tests/series/methods/test_get_numeric_data.py
{ "start": 82, "end": 915 }
class ____: def test_get_numeric_data_preserve_dtype(self): # get the numeric data obj = Series([1, 2, 3]) result = obj._get_numeric_data() tm.assert_series_equal(result, obj) # returned object is a shallow copy result.iloc[0] = 0 assert obj.iloc[0] == 1 obj = Series([1, "2", 3.0]) result = obj._get_numeric_data() expected = Series([], dtype=object) tm.assert_series_equal(result, expected) obj = Series([True, False, True]) result = obj._get_numeric_data() tm.assert_series_equal(result, obj) obj = Series(date_range("20130101", periods=3, unit="ns")) result = obj._get_numeric_data() expected = Series([], dtype="M8[ns]") tm.assert_series_equal(result, expected)
TestGetNumericData
python
django__django
tests/admin_views/models.py
{ "start": 29578, "end": 29895 }
class ____(models.Model): side = models.IntegerField() area = models.GeneratedField( db_persist=True, expression=models.F("side") * models.F("side"), output_field=models.BigIntegerField(), ) class Meta: required_db_features = {"supports_stored_generated_columns"}
Square
python
ray-project__ray
python/ray/_private/thirdparty/pynvml/pynvml.py
{ "start": 91274, "end": 91488 }
class ____(_PrintableStructure): _fields_ = [ ('schedulerPolicy', c_uint), ('arrMode', c_uint), ('schedulerParams', c_nvmlVgpuSchedulerParams_t), ]
c_nvmlVgpuSchedulerGetState_t
python
gevent__gevent
src/gevent/tests/test__server.py
{ "start": 18840, "end": 20329 }
class ____(TestCase): def _create_server(self): # pylint:disable=arguments-differ return self.ServerSubClass((greentest.DEFAULT_BIND_ADDR, 0), keyfile=_file('server.key'), certfile=_file('server.crt')) def get_spawn(self): return gevent.spawn def test_certificate(self): # Issue 801 from gevent import monkey, ssl # only broken if *not* monkey patched self.assertFalse(monkey.is_module_patched('ssl')) self.assertFalse(monkey.is_module_patched('socket')) self.init_server() server_host, server_port, _family = self.get_server_host_port_family() ssl.get_server_certificate((server_host, server_port)) # pylint:disable=no-member def test_wrap_socket_and_handle_wrap_failure(self): # A failure to wrap the socket doesn't have follow on effects # like failing with a UnboundLocalError. # See https://github.com/gevent/gevent/issues/1236 self.init_server() def bad_wrap(_client_socket, **_wrap_args): raise BadWrapException() self.server.wrap_socket = bad_wrap with self.assertRaises(BadWrapException): self.server._handle(None, None) # test non-socket.error exception in accept call: fatal # test error in spawn(): non-fatal # test error in spawned handler: non-fatal if __name__ == '__main__': greentest.main()
TestSSLGetCertificate
python
PyCQA__pylint
tests/functional/p/postponed/postponed_evaluation_not_activated.py
{ "start": 114, "end": 329 }
class ____: @classmethod def from_string(cls, source) -> Class: # <3.14:[undefined-variable] ... def validate_b(self, obj: OtherClass) -> bool: # <3.14:[used-before-assignment] ...
Class
python
conda__conda
conda/exceptions.py
{ "start": 41057, "end": 41579 }
class ____(CondaError): def __init__(self, file_types: dict[str, str], *args, **kwargs): type_groups = defaultdict(list) for file, file_type in file_types.items(): type_groups[file_type].append(file) lines = ["Cannot mix environment file formats.\n"] for file_type, files in type_groups.items(): lines.extend(f"'{file}' is a {file_type} format file" for file in files) super().__init__("\n".join(lines), *args, **kwargs)
EnvironmentFileTypeMismatchError
python
pallets__werkzeug
tests/test_datastructures.py
{ "start": 25952, "end": 27972 }
class ____: storage_class = ds.EnvironHeaders def test_basic_interface(self): # this happens in multiple WSGI servers because they # use a vary naive way to convert the headers; broken_env = { "HTTP_CONTENT_TYPE": "text/html", "CONTENT_TYPE": "text/html", "HTTP_CONTENT_LENGTH": "0", "CONTENT_LENGTH": "0", "HTTP_ACCEPT": "*", "wsgi.version": (1, 0), } headers = self.storage_class(broken_env) assert headers assert len(headers) == 3 assert sorted(headers) == [ ("Accept", "*"), ("Content-Length", "0"), ("Content-Type", "text/html"), ] assert not self.storage_class({"wsgi.version": (1, 0)}) assert len(self.storage_class({"wsgi.version": (1, 0)})) == 0 assert 42 not in headers def test_skip_empty_special_vars(self): env = {"HTTP_X_FOO": "42", "CONTENT_TYPE": "", "CONTENT_LENGTH": ""} headers = self.storage_class(env) assert dict(headers) == {"X-Foo": "42"} env = {"HTTP_X_FOO": "42", "CONTENT_TYPE": "", "CONTENT_LENGTH": "0"} headers = self.storage_class(env) assert dict(headers) == {"X-Foo": "42", "Content-Length": "0"} def test_return_type_is_str(self): headers = self.storage_class({"HTTP_FOO": "\xe2\x9c\x93"}) assert headers["Foo"] == "\xe2\x9c\x93" assert next(iter(headers)) == ("Foo", "\xe2\x9c\x93") def test_or(self) -> None: headers = ds.EnvironHeaders({"x": "1"}) with pytest.raises(TypeError): headers | {"y": "2"} def test_ior(self) -> None: headers = ds.EnvironHeaders({}) with pytest.raises(TypeError): headers |= {"y": "2"} def test_str(self) -> None: headers = ds.EnvironHeaders({"CONTENT_LENGTH": "50", "HTTP_HOST": "test"}) assert str(headers) == "Content-Length: 50\r\nHost: test\r\n\r\n"
TestEnvironHeaders
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/testing/assertsql.py
{ "start": 9128, "end": 11677 }
class ____(CompiledSQL): def _compile_dialect(self, execute_observed): return execute_observed.context.dialect def _compare_no_space(self, real_stmt, received_stmt): stmt = re.sub(r"[\n\t]", "", real_stmt) return received_stmt == stmt def _received_statement(self, execute_observed): received_stmt, received_params = super()._received_statement( execute_observed ) # TODO: why do we need this part? for real_stmt in execute_observed.statements: if self._compare_no_space(real_stmt.statement, received_stmt): break else: raise AssertionError( "Can't locate compiled statement %r in list of " "statements actually invoked" % received_stmt ) return received_stmt, execute_observed.context.compiled_parameters def _dialect_adjusted_statement(self, dialect): paramstyle = dialect.paramstyle stmt = re.sub(r"[\n\t]", "", self.statement) # temporarily escape out PG double colons stmt = stmt.replace("::", "!!") if paramstyle == "pyformat": stmt = re.sub(r":([\w_]+)", r"%(\1)s", stmt) else: # positional params repl = None if paramstyle == "qmark": repl = "?" elif paramstyle == "format": repl = r"%s" elif paramstyle.startswith("numeric"): counter = itertools.count(1) num_identifier = "$" if paramstyle == "numeric_dollar" else ":" def repl(m): return f"{num_identifier}{next(counter)}" stmt = re.sub(r":([\w_]+)", repl, stmt) # put them back stmt = stmt.replace("!!", "::") return stmt def _compare_sql(self, execute_observed, received_statement): stmt = self._dialect_adjusted_statement( execute_observed.context.dialect ) return received_statement == stmt def _failure_message(self, execute_observed, expected_params): return ( "Testing for compiled statement\n%r partial params %s, " "received\n%%(received_statement)r with params " "%%(received_parameters)r" % ( self._dialect_adjusted_statement( execute_observed.context.dialect ).replace("%", "%%"), repr(expected_params).replace("%", "%%"), ) )
DialectSQL
python
arrow-py__arrow
arrow/locales.py
{ "start": 27467, "end": 28887 }
class ____(Locale): names = ["nl", "nl-nl"] past = "{0} geleden" future = "over {0}" timeframes = { "now": "nu", "second": "een seconde", "seconds": "{0} seconden", "minute": "een minuut", "minutes": "{0} minuten", "hour": "een uur", "hours": "{0} uur", "day": "een dag", "days": "{0} dagen", "week": "een week", "weeks": "{0} weken", "month": "een maand", "months": "{0} maanden", "year": "een jaar", "years": "{0} jaar", } # In Dutch names of months and days are not starting with a capital letter # like in the English language. month_names = [ "", "januari", "februari", "maart", "april", "mei", "juni", "juli", "augustus", "september", "oktober", "november", "december", ] month_abbreviations = [ "", "jan", "feb", "mrt", "apr", "mei", "jun", "jul", "aug", "sep", "okt", "nov", "dec", ] day_names = [ "", "maandag", "dinsdag", "woensdag", "donderdag", "vrijdag", "zaterdag", "zondag", ] day_abbreviations = ["", "ma", "di", "wo", "do", "vr", "za", "zo"]
DutchLocale
python
ApeWorX__ape
src/ape/plugins/__init__.py
{ "start": 583, "end": 2486 }
class ____( Config, AccountPlugin, CompilerPlugin, ConversionPlugin, DependencyPlugin, EcosystemPlugin, ExplorerPlugin, NetworkPlugin, ProjectPlugin, ProviderPlugin, QueryPlugin, ): pass # All hookspecs are registered pluggy_manager.add_hookspecs(AllPluginHooks) def get_hooks(plugin_type): return [name for name, method in plugin_type.__dict__.items() if hasattr(method, "ape_spec")] def register(plugin_type: type[PluginType], **hookimpl_kwargs) -> Callable: """ Register your plugin to ape. You must call this decorator to get your plugins included in ape's plugin ecosystem. Usage example:: @plugins.register(plugins.AccountPlugin) # 'register()' example def account_types(): return AccountContainer, KeyfileAccount Args: plugin_type (type[:class:`~ape.plugins.pluggy_patch.PluginType`]): The plugin type to register. hookimpl_kwargs: Return-values required by the plugin type. Returns: Callable """ # NOTE: we are basically checking that `plugin_type` # is one of the parent classes of `Plugins` if not issubclass(AllPluginHooks, plugin_type): raise PluginError("Not a valid plugin type to register.") def check_hook(plugin_type, hookimpl_kwargs, fn): fn = hookimpl(fn, **hookimpl_kwargs) if not hasattr(plugin_type, fn.__name__): hooks = get_hooks(plugin_type) raise PluginError( f"Registered function `{fn.__name__}` is not" f" a valid hook for {plugin_type.__name__}, must be one of:" f" {hooks}." ) return fn # NOTE: Get around issue with using `plugin_type` raw in `check_hook` return functools.partial(check_hook, plugin_type, hookimpl_kwargs) __all__ = [ "register", ]
AllPluginHooks
python
joblib__joblib
joblib/numpy_pickle.py
{ "start": 1713, "end": 11368 }
class ____(object): """An object to be persisted instead of numpy arrays. This object is used to hack into the pickle machinery and read numpy array data from our custom persistence format. More precisely, this object is used for: * carrying the information of the persisted array: subclass, shape, order, dtype. Those ndarray metadata are used to correctly reconstruct the array with low level numpy functions. * determining if memmap is allowed on the array. * reading the array bytes from a file. * reading the array using memorymap from a file. * writing the array bytes to a file. Attributes ---------- subclass: numpy.ndarray subclass Determine the subclass of the wrapped array. shape: numpy.ndarray shape Determine the shape of the wrapped array. order: {'C', 'F'} Determine the order of wrapped array data. 'C' is for C order, 'F' is for fortran order. dtype: numpy.ndarray dtype Determine the data type of the wrapped array. allow_mmap: bool Determine if memory mapping is allowed on the wrapped array. Default: False. """ def __init__( self, subclass, shape, order, dtype, allow_mmap=False, numpy_array_alignment_bytes=NUMPY_ARRAY_ALIGNMENT_BYTES, ): """Constructor. Store the useful information for later.""" self.subclass = subclass self.shape = shape self.order = order self.dtype = dtype self.allow_mmap = allow_mmap # We make numpy_array_alignment_bytes an instance attribute to allow us # to change our mind about the default alignment and still load the old # pickles (with the previous alignment) correctly self.numpy_array_alignment_bytes = numpy_array_alignment_bytes def safe_get_numpy_array_alignment_bytes(self): # NumpyArrayWrapper instances loaded from joblib <= 1.1 pickles don't # have an numpy_array_alignment_bytes attribute return getattr(self, "numpy_array_alignment_bytes", None) def write_array(self, array, pickler): """Write array bytes to pickler file handle. This function is an adaptation of the numpy write_array function available in version 1.10.1 in numpy/lib/format.py. """ # Set buffer size to 16 MiB to hide the Python loop overhead. buffersize = max(16 * 1024**2 // array.itemsize, 1) if array.dtype.hasobject: # We contain Python objects so we cannot write out the data # directly. Instead, we will pickle it out with version 5 of the # pickle protocol. pickle.dump(array, pickler.file_handle, protocol=5) else: numpy_array_alignment_bytes = self.safe_get_numpy_array_alignment_bytes() if numpy_array_alignment_bytes is not None: current_pos = pickler.file_handle.tell() pos_after_padding_byte = current_pos + 1 padding_length = numpy_array_alignment_bytes - ( pos_after_padding_byte % numpy_array_alignment_bytes ) # A single byte is written that contains the padding length in # bytes padding_length_byte = int.to_bytes( padding_length, length=1, byteorder="little" ) pickler.file_handle.write(padding_length_byte) if padding_length != 0: padding = b"\xff" * padding_length pickler.file_handle.write(padding) for chunk in pickler.np.nditer( array, flags=["external_loop", "buffered", "zerosize_ok"], buffersize=buffersize, order=self.order, ): pickler.file_handle.write(chunk.tobytes("C")) def read_array(self, unpickler, ensure_native_byte_order): """Read array from unpickler file handle. This function is an adaptation of the numpy read_array function available in version 1.10.1 in numpy/lib/format.py. """ if len(self.shape) == 0: count = 1 else: # joblib issue #859: we cast the elements of self.shape to int64 to # prevent a potential overflow when computing their product. shape_int64 = [unpickler.np.int64(x) for x in self.shape] count = unpickler.np.multiply.reduce(shape_int64) # Now read the actual data. if self.dtype.hasobject: # The array contained Python objects. We need to unpickle the data. array = pickle.load(unpickler.file_handle) else: numpy_array_alignment_bytes = self.safe_get_numpy_array_alignment_bytes() if numpy_array_alignment_bytes is not None: padding_byte = unpickler.file_handle.read(1) padding_length = int.from_bytes(padding_byte, byteorder="little") if padding_length != 0: unpickler.file_handle.read(padding_length) # This is not a real file. We have to read it the # memory-intensive way. # crc32 module fails on reads greater than 2 ** 32 bytes, # breaking large reads from gzip streams. Chunk reads to # BUFFER_SIZE bytes to avoid issue and reduce memory overhead # of the read. In non-chunked case count < max_read_count, so # only one read is performed. max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, self.dtype.itemsize) array = unpickler.np.empty(count, dtype=self.dtype) for i in range(0, count, max_read_count): read_count = min(max_read_count, count - i) read_size = int(read_count * self.dtype.itemsize) data = _read_bytes(unpickler.file_handle, read_size, "array data") array[i : i + read_count] = unpickler.np.frombuffer( data, dtype=self.dtype, count=read_count ) del data if self.order == "F": array.shape = self.shape[::-1] array = array.transpose() else: array.shape = self.shape if ensure_native_byte_order: # Detect byte order mismatch and swap as needed. array = _ensure_native_byte_order(array) return array def read_mmap(self, unpickler): """Read an array using numpy memmap.""" current_pos = unpickler.file_handle.tell() offset = current_pos numpy_array_alignment_bytes = self.safe_get_numpy_array_alignment_bytes() if numpy_array_alignment_bytes is not None: padding_byte = unpickler.file_handle.read(1) padding_length = int.from_bytes(padding_byte, byteorder="little") # + 1 is for the padding byte offset += padding_length + 1 if unpickler.mmap_mode == "w+": unpickler.mmap_mode = "r+" marray = make_memmap( unpickler.filename, dtype=self.dtype, shape=self.shape, order=self.order, mode=unpickler.mmap_mode, offset=offset, ) # update the offset so that it corresponds to the end of the read array unpickler.file_handle.seek(offset + marray.nbytes) if ( numpy_array_alignment_bytes is None and current_pos % NUMPY_ARRAY_ALIGNMENT_BYTES != 0 ): message = ( f"The memmapped array {marray} loaded from the file " f"{unpickler.file_handle.name} is not byte aligned. " "This may cause segmentation faults if this memmapped array " "is used in some libraries like BLAS or PyTorch. " "To get rid of this warning, regenerate your pickle file " "with joblib >= 1.2.0. " "See https://github.com/joblib/joblib/issues/563 " "for more details" ) warnings.warn(message) return marray def read(self, unpickler, ensure_native_byte_order): """Read the array corresponding to this wrapper. Use the unpickler to get all information to correctly read the array. Parameters ---------- unpickler: NumpyUnpickler ensure_native_byte_order: bool If true, coerce the array to use the native endianness of the host system. Returns ------- array: numpy.ndarray """ # When requested, only use memmap mode if allowed. if unpickler.mmap_mode is not None and self.allow_mmap: assert not ensure_native_byte_order, ( "Memmaps cannot be coerced to a given byte order, " "this code path is impossible." ) array = self.read_mmap(unpickler) else: array = self.read_array(unpickler, ensure_native_byte_order) # Manage array subclass case if hasattr(array, "__array_prepare__") and self.subclass not in ( unpickler.np.ndarray, unpickler.np.memmap, ): # We need to reconstruct another subclass new_array = _reconstruct(self.subclass, (0,), "b") return new_array.__array_prepare__(array) else: return array ############################################################################### # Pickler classes
NumpyArrayWrapper
python
gevent__gevent
src/greentest/3.10/test_socket.py
{ "start": 169332, "end": 169722 }
class ____(SendrecvmsgSCTPFlagsBase, SendrecvmsgConnectedBase, ConnectedStreamTestMixin, SCTPStreamBase): pass @requireAttrs(socket.socket, "sendmsg") @unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX") @requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
SendrecvmsgSCTPStreamTestBase
python
django-haystack__django-haystack
test_haystack/test_app_using_appconfig/models.py
{ "start": 48, "end": 113 }
class ____(Model): text = CharField(max_length=140)
MicroBlogPost
python
pyca__cryptography
src/cryptography/x509/extensions.py
{ "start": 54914, "end": 56117 }
class ____(ExtensionType): oid = CRLEntryExtensionOID.INVALIDITY_DATE def __init__(self, invalidity_date: datetime.datetime) -> None: if not isinstance(invalidity_date, datetime.datetime): raise TypeError("invalidity_date must be a datetime.datetime") self._invalidity_date = invalidity_date def __repr__(self) -> str: return f"<InvalidityDate(invalidity_date={self._invalidity_date})>" def __eq__(self, other: object) -> bool: if not isinstance(other, InvalidityDate): return NotImplemented return self.invalidity_date == other.invalidity_date def __hash__(self) -> int: return hash(self.invalidity_date) @property def invalidity_date(self) -> datetime.datetime: return self._invalidity_date @property def invalidity_date_utc(self) -> datetime.datetime: if self._invalidity_date.tzinfo is None: return self._invalidity_date.replace(tzinfo=datetime.timezone.utc) else: return self._invalidity_date.astimezone(tz=datetime.timezone.utc) def public_bytes(self) -> bytes: return rust_x509.encode_extension_value(self)
InvalidityDate
python
huggingface__transformers
src/transformers/generation/continuous_batching/cache.py
{ "start": 2647, "end": 20311 }
class ____: """ Manages the cache for a paged attention mechanism, inspired by VLLM's hybrid allocator. The cache relies on making groups of layers to reduce the complexity of cache management and fragmentation. The cache uses a three-level hierarchy: - Pages: The smallest unit of cache, a page has a size of [num_heads, head_size], which is the space needed to store the key or value states for one token and one layer. For a model with only full-attention layers, to store the KV cache of one token, we need `2 * num_layers` pages: key and values each take `num_layers` pages. Pages are grouped into blocks: - Blocks: A block is a collection of `block_size` pages, serving as the allocation unit to reduce management complexity and fragmentation. Cache is allocated and freed block by block, not page by page. One block is allocated to one layer group, which only has one attention type, like full-attention or sliding-attention. If all layers in the model have the same attention type, then all layers will be in the same group. There is more than one group if and only if the model has a mixed attention types, like layers with full-attention and layers with sliding-attention. - Cache tensors: The physical supports for the cache. There are as many cache tensors as there are layer in a layer group, and the shape of the cache tensor is `[num_blocks * block_size, num_heads, head_size]`. Grouping layers into groups is useful because when we allocate one block to a group N, the block allocated is the same for all layers in group N, equivalently it is allocated across all cache tensors. This allows us to efficiently allocate and free blocks, and to efficiently read and write key and value states. For instance, imagine we have 8 blocks of cache and a model with two layer groups: a full-attention group with 3 layers and a sliding-attention group with 3 layers. At creation time, the physical cache tensors look like this: cache_tensor_0: □ □ □ □ □ □ □ □ cache_tensor_1: □ □ □ □ □ □ □ □ cache_tensor_2: □ □ □ □ □ □ □ □ where □ means the blocks is not allocated to any layer group yet. We have 3 cache tensors because there are 3 layers per group. We allocate 1 block to each group, after allocation, the cache tensors look like this: cache_tensor_0: ✖ ◉ □ □ □ □ □ □ cache_tensor_1: ✖ ◉ □ □ □ □ □ □ cache_tensor_2: ✖ ◉ □ □ □ □ □ □ where ✖ means the block is allocated to the full-attention group, and ◉ means the block is allocated to the sliding-attention group. Now, if we continue to generate, and the sliding window has been reached, we only need to allocate a new block for the full-attention group, and the cache tensors look like this: cache_tensor_0: ✖ ◉ ✖ □ □ □ □ □ cache_tensor_1: ✖ ◉ ✖ □ □ □ □ □ cache_tensor_2: ✖ ◉ ✖ □ □ □ □ □ And after further generation, when we need a new block allocated: cache_tensor_0: ✖ ◉ ✖ ✖ □ □ □ □ cache_tensor_1: ✖ ◉ ✖ ✖ □ □ □ □ cache_tensor_2: ✖ ◉ ✖ ✖ □ □ □ □ This would not have been possible if all layers were in the same group: we would have had to allocate a new block for the sliding-attention group, although it is not needed. """ def __init__( self, config: PreTrainedConfig, generation_config: GenerationConfig, device: torch.device, dtype: torch.dtype = torch.float16, tp_size: int | None = None, allow_prefix_sharing: bool = True, ) -> None: """Initialize a paged attention cache for efficient memory usage. Also turns in prefix sharing if the model has only full attention layers. Args: config: Model configuration generation_config: Generation configuration containing cache parameters device: Device for the cache tensors dtype: Data type of the cache tp_size: Tensor parallelism size allow_prefix_sharing: A flag to allow prefix sharing if the model has only full attention layers. """ self.config = config self.dtype = dtype self.device = device # Extract model dimensions kv_heads = getattr(config, "num_key_value_heads", None) self.num_key_value_heads: int = kv_heads if kv_heads is not None else config.num_attention_heads head_dim = getattr(config, "head_dim", None) self.head_dim: int = head_dim if head_dim is not None else config.hidden_size // config.num_attention_heads # Extract cache dimensions self.block_size = getattr(generation_config, "block_size", 32) # Group layers depending on the attention mix layer_groups, group_types = group_layers_by_attn_type(config) group_size = len(layer_groups[0]) self.num_groups = len(layer_groups) self.sliding_windows = {} self.layer_index_to_group_indices = {} for i, group in enumerate(layer_groups): sliding_window = config.sliding_window if group_types[i] == "sliding_attention" else 1 for j, layer in enumerate(group): self.layer_index_to_group_indices[layer] = (i, j) self.sliding_windows[layer] = sliding_window # Handle TP (or dont) if tp_size is not None and tp_size > 1: if self.num_key_value_heads % tp_size != 0: raise ValueError( f"Number of key value heads {self.num_key_value_heads} must be divisible by tensor parallel size {tp_size}." ) # If the model is using tensor parallelism, we need to adjust the number of heads accordingly. # self.num_key_value_heads //= tp_size # TODO: why is this commented out? # Infer number of blocks and max batch tokens page_size = self.head_dim * self.num_key_value_heads if "flash" in self.config._attn_implementation: num_attention_masks = 0 # only used to compute the default memory footprint args elif "sliding_attention" in group_types: # TODO: when we generalize to allow for block-attn, we can use `num_attention_masks=sum(set(group_types))` num_attention_masks = 2 else: num_attention_masks = 1 memory_handler = PagedAttentionMemoryHandler( block_size=self.block_size, page_size=page_size, num_groups=self.num_groups, group_size=group_size, peak_activation_per_token=(config.hidden_size + config.vocab_size), num_attention_masks=num_attention_masks, ) num_blocks, max_batch_tokens = memory_handler.infer_num_blocks_and_max_batch_tokens( num_blocks=getattr(generation_config, "num_blocks", None), max_batch_tokens=getattr(generation_config, "max_batch_tokens", None), max_memory_percent=getattr( generation_config, "max_memory", 0.8 ), # FIXME: it seems we overcommit memory, was changed from 0.9 which caused OOMs in our benchmarking CI cache_dtype=self.dtype, ) # Add the inferred attributes to the class self.num_blocks = num_blocks self.max_batch_tokens = max_batch_tokens logger.info( f"PagedAttentionCache initialized with {self.num_blocks = }, {self.block_size = }, {page_size = }, " f"{self.max_batch_tokens = } {num_attention_masks = }" ) # Initialize the cache self.key_cache: list[torch.Tensor] = [] self.value_cache: list[torch.Tensor] = [] # We add two extra tokens to the cache to handle padding and generally discard unwanted tokens self.cache_shape = (num_blocks * self.block_size + 2, self.num_key_value_heads, self.head_dim) for _ in range(group_size): new_layer_key_cache = torch.empty(self.cache_shape, dtype=self.dtype, device=self.device) new_layer_value_cache = torch.empty(self.cache_shape, dtype=self.dtype, device=self.device) torch._dynamo.mark_static_address(new_layer_key_cache) torch._dynamo.mark_static_address(new_layer_value_cache) self.key_cache.append(new_layer_key_cache) self.value_cache.append(new_layer_value_cache) logger.info(f"{self.cache_shape = } {self.key_cache[0].shape = } {self.key_cache[0].numel() = }") # Block management data structures self.group_cache_managers: list[CacheAllocator] = [] for i, group_type in enumerate(group_types): if group_type == "full_attention": cm = FullAttentionCacheAllocator(i, self.block_size) elif group_type == "sliding_attention": cm = SlidingAttentionCacheAllocator(i, self.block_size, config.sliding_window) else: raise ValueError(f"Invalid group type: {group_type}") self.group_cache_managers.append(cm) # We only use prefix sharing if the whole model has only full attention layers self.use_prefix_sharing = allow_prefix_sharing and group_types == ["full_attention"] self._block_manager = BlockManager(num_blocks, self.block_size, self.use_prefix_sharing) self.blocks_to_complete: dict[str, int] = {} self._total_prefix_length: int = 0 # a counter to measure the impact of prefix sharing, also used in tests @traced def allocate_blocks(self, n_blocks: int, state: RequestState) -> int: """Allocate cache blocks across all layer groups for a given request. Actual allocation is done by the cache managers, and this method only returns the maximum number of blocks actually allocated across all managers.""" max_allocated = 0 for cm in self.group_cache_managers: allocated = cm.allocate_blocks(n_blocks, state.request_id, self._block_manager) if allocated is None: return None max_allocated = max(max_allocated, allocated) return max_allocated @traced def free_blocks(self, request_id: str) -> None: """Free all allocated cache blocks for a given request across all layer groups. Actual deallocation is done by the cache managers.""" for cm in self.group_cache_managers: cm.free_blocks(request_id, self._block_manager) def get_num_free_blocks(self) -> int: """Get the current number of unallocated blocks available for new requests.""" return self._block_manager.num_free_blocks @traced def extend_read_indices( self, request_id: str, past_length: int, query_length: int, read_index: list[list[int]] ) -> None: """Retrieve physical cache indices for reading KV states in the cache across all layer groups. This method coordinates with all cache managers to build the complete set of read indices needed for attention computation. """ for cm, read_indices in zip(self.group_cache_managers, read_index): indices = cm.get_read_indices(request_id, past_length, query_length) read_indices.extend(indices) @traced def extend_write_indices( self, request_id: str, past_length: int, query_length: int, write_index: list[list[int]] ) -> None: """Retrieve physical cache indices for writing new KV states to the cache across all layer groups. This method coordinates with all cache managers to build the complete set of write indices needed to store computed KV states.""" for cm, write_indices in zip(self.group_cache_managers, write_index): indices = cm.get_write_indices(request_id, past_length, query_length) write_indices.extend(indices) @traced def get_seqlens_k(self, request_id: str, past_length: int, query_length: int) -> dict[str, int]: """Retrieve the key sequence length for the given request_id across all layer types. Returns a dictionary of layer types to their corresponding key sequence lengths.""" seqlens_k = {} for cm in self.group_cache_managers: attn_type, seqlen_k = cm.get_seqlens_k(request_id, past_length, query_length) seqlens_k[attn_type] = seqlen_k return seqlens_k @traced def update( self, key_states: torch.Tensor, # shape [1, num_kv_heads, seqlen_kv, head_dim] value_states: torch.Tensor, # shape [1, num_kv_heads, seqlen_kv, head_dim] layer_idx: int, read_index: list[torch.Tensor], # shape [num_layer_groups, seqlen_kv + past_length] write_index: list[torch.Tensor], # shape [num_layer_groups, seqlen_q] ) -> tuple[torch.Tensor, torch.Tensor]: # shape [seqlen_kv + past_length, num_kv_heads, head_dim] """Update the cache with new key-value states for a specific layer. This method writes new KV states to the appropriate cache locations. The behavior differs based on the layer's attention type: - Full attention: New KV states are written to cache, then complete sequence is read from cache - Sliding window: Old KV is read from cache along with extra spaces for the new KV, then new KV is written to cache. This is because new KV might overwrite the old KV, so we need to read the old KV first. Returns the complete KV states (cached + new) for attention computation. """ # Retrieve the layer read and write indices, and if there is a sliding window group_idx, layer_idx_in_group = self.layer_index_to_group_indices[layer_idx] layer_read_index = read_index[group_idx] layer_write_index = write_index[group_idx] # Select the correct cache k_cache = self.key_cache[layer_idx_in_group] v_cache = self.value_cache[layer_idx_in_group] # Transpose the key and value states to match the cache shape, after which shape is [seqlen_kv, num_kv_heads, head_dim] key_states = key_states.transpose(1, 2).squeeze(0) value_states = value_states.transpose(1, 2).squeeze(0) # Case: full attention sliding_window = self.sliding_windows[layer_idx] if sliding_window == 1: k_cache[layer_write_index, :, :] = key_states v_cache[layer_write_index, :, :] = value_states key_states_with_cache = k_cache[layer_read_index, :, :] value_states_with_cache = v_cache[layer_read_index, :, :] # Case: sliding window -- we need to be careful of read/write order because of chunked prefill, because it's # the only case where you may write over cache you need to use else: # Add the cache to the key and value states mask = (layer_read_index == -1).unsqueeze(-1).unsqueeze(-1) # TODO: should this be precomputed? key_states_with_cache = k_cache[layer_read_index, :, :] key_states_with_cache.masked_scatter_(mask, key_states) value_states_with_cache = v_cache[layer_read_index, :, :] value_states_with_cache.masked_scatter_(mask, value_states) # Write new KV values to the cache k_cache[layer_write_index, :, :] = key_states v_cache[layer_write_index, :, :] = value_states # Return the new KV values return key_states_with_cache, value_states_with_cache def search_prefix_match(self, request_id: str, prompt_ids: list[int]) -> int: """Searches for a prefix match in the cache for the given (prompts_ids). If one is found, we reference the matching blocks in the (request_id), increase the reference count of the blocks and return the number of blocks that match. If no prefix match is found, we return 0.""" current_hash = None allocated_blocks = [] for b in range(len(prompt_ids) // self.block_size): tokens = prompt_ids[b * self.block_size : (b + 1) * self.block_size] current_hash = self._block_manager.compute_hash(current_hash, tokens) block_id = self._block_manager._hash_to_id.get(current_hash) if block_id is not None: allocated_blocks.append(block_id) self._block_manager.increase_ref_count(block_id) else: break # If we found a matching prefix, we reference the blocks in the request if allocated_blocks: logger.debug(f"Found prefix match for request {request_id} with {len(allocated_blocks)} blocks") cm = self.group_cache_managers[0] cm.block_table[request_id] = allocated_blocks prefix_length = len(allocated_blocks) * self.block_size self._total_prefix_length += prefix_length return prefix_length def mark_blocks_as_complete(self, state: RequestState) -> None: """Marks the blocks that have been computed in the forward pass as complete. If prefix sharing is off, this is a no-op.""" num_complete_blocks = 0 if not self.use_prefix_sharing else self.blocks_to_complete.pop(state.request_id) if num_complete_blocks == 0: return None cm = self.group_cache_managers[0] # if prefix sharing is on, there is only one group self._block_manager.mark_blocks_as_complete( num_complete_blocks=num_complete_blocks, allocated_blocks=cm.block_table[state.request_id], prompt_ids=(state.initial_tokens + state.generated_tokens), ) # TODO: rework computation with the groups and their sizes
PagedAttentionCache
python
numba__numba
numba/tests/npyufunc/test_ufuncbuilding.py
{ "start": 13606, "end": 15493 }
class ____(TestCase): def test_all(self): # note: no signatures specified @vectorize(nopython=True) def new_ufunc(hundreds, tens, ones): return 100*hundreds + 10*tens + ones # give it integers a = np.array([1, 2, 3], dtype=np.int64) b = np.array([4, 5, 6], dtype=np.int64) c = np.array([7, 8, 9], dtype=np.int64) all_np = new_ufunc(a, b, c) self.assertIsInstance(all_np, np.ndarray) self.assertEqual(all_np.tolist(), [147, 258, 369]) nep13_1 = new_ufunc(NEP13Array(a), b, c) self.assertIsInstance(nep13_1, NEP13Array) self.assertEqual(nep13_1.tolist(), [147, 258, 369]) nep13_2 = new_ufunc(a, NEP13Array(b), c) self.assertIsInstance(nep13_2, NEP13Array) self.assertEqual(nep13_2.tolist(), [147, 258, 369]) nep13_3 = new_ufunc(a, b, NEP13Array(c)) self.assertIsInstance(nep13_3, NEP13Array) self.assertEqual(nep13_3.tolist(), [147, 258, 369]) # give it floats a = np.array([1.1, 2.2, 3.3], dtype=np.float64) b = np.array([4.4, 5.5, 6.6], dtype=np.float64) c = np.array([7.7, 8.8, 9.9], dtype=np.float64) all_np = new_ufunc(a, b, c) self.assertIsInstance(all_np, np.ndarray) self.assertEqual(all_np.tolist(), [161.7, 283.8, 405.9]) nep13_1 = new_ufunc(NEP13Array(a), b, c) self.assertIsInstance(nep13_1, NEP13Array) self.assertEqual(nep13_1.tolist(), [161.7, 283.8, 405.9]) nep13_2 = new_ufunc(a, NEP13Array(b), c) self.assertIsInstance(nep13_2, NEP13Array) self.assertEqual(nep13_2.tolist(), [161.7, 283.8, 405.9]) nep13_3 = new_ufunc(a, b, NEP13Array(c)) self.assertIsInstance(nep13_3, NEP13Array) self.assertEqual(nep13_3.tolist(), [161.7, 283.8, 405.9])
TestNEP13WithoutSignature
python
bokeh__bokeh
src/bokeh/models/glyphs.py
{ "start": 60940, "end": 62330 }
class ____(Glyph, LineGlyph, FillGlyph, HatchGlyph): """ Vertical strips of infinite height. """ # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) __example__ = "examples/reference/models/VStrip.py" _args = ("x0", "x1") x0 = NumberSpec(default=field("x0"), help=""" The x-coordinates of the coordinates of one side of the strips. """) x1 = NumberSpec(default=field("x1"), help=""" The x-coordinates of the coordinates of the other side of the strips. """) line_props = Include(LineProps, help=""" The {prop} values for the strips. """) fill_props = Include(FillProps, help=""" The {prop} values for the strips. """) hatch_props = Include(HatchProps, help=""" The {prop} values for the strips. """) #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
VStrip
python
huggingface__transformers
src/transformers/models/deepseek_v2/modular_deepseek_v2.py
{ "start": 21763, "end": 21821 }
class ____(LlamaForCausalLM): pass
DeepseekV2ForCausalLM
python
PrefectHQ__prefect
tests/server/orchestration/api/test_flow_runs.py
{ "start": 17858, "end": 21356 }
class ____: async def test_read_flow_run(self, flow, flow_run, client): # make sure we we can read the flow run correctly response = await client.get(f"/flow_runs/{flow_run.id}") assert response.status_code == status.HTTP_200_OK, response.text assert response.json()["id"] == str(flow_run.id) assert response.json()["flow_id"] == str(flow.id) assert response.json()["deployment_version"] is None @pytest.fixture async def flow_run_with_deployment_version(self, flow, session): flow_run = await models.flow_runs.create_flow_run( session=session, flow_run=schemas.core.FlowRun( flow_id=flow.id, flow_version="1.0", deployment_version="Deployment Version 1.0", state=schemas.states.Pending(), ), ) await session.commit() return flow_run async def test_read_flow_run_with_deployment_version( self, flow, flow_run_with_deployment_version, client ): # make sure we we can read the flow run correctly response = await client.get(f"/flow_runs/{flow_run_with_deployment_version.id}") assert response.status_code == status.HTTP_200_OK, response.text assert response.json()["id"] == str(flow_run_with_deployment_version.id) assert response.json()["flow_id"] == str(flow.id) assert response.json()["deployment_version"] == "Deployment Version 1.0" async def test_read_flow_run_like_the_engine_does(self, flow, flow_run, client): """Regression test for the hex format of UUIDs in `PREFECT__FLOW_RUN_ID` The only route that is requested in this way is `GET /flow_runs/{id}`; other methods aren't affected because they are based on prior requests for flow runs and will use a fully-formatted UUID with dashes. """ flow_run_id = flow_run.id.hex assert "-" not in flow_run_id assert len(flow_run_id) == 32 response = await client.get(f"/flow_runs/{flow_run.id}") assert response.status_code == status.HTTP_200_OK, response.text assert response.json()["id"] == str(flow_run.id) assert response.json()["flow_id"] == str(flow.id) async def test_read_flow_run_with_invalid_id_is_rejected(self, client): """Additional safety check with for the above regression test to confirm that we're not attempting query with any old string as a flow run ID.""" with mock.patch("prefect.server.models.flow_runs.read_flow_run") as mock_read: response = await client.get("/flow_runs/THISAINTIT") assert response.status_code == 404, response.text mock_read.assert_not_called() async def test_read_flow_run_with_state(self, flow_run, client, session): state_id = uuid4() ( await models.flow_runs.set_flow_run_state( session=session, flow_run_id=flow_run.id, state=schemas.states.State(id=state_id, type="RUNNING"), ) ).state await client.get(f"/flow_runs/{flow_run.id}") assert flow_run.state.type.value == "RUNNING" assert flow_run.state.id == state_id async def test_read_flow_run_returns_404_if_does_not_exist(self, client): response = await client.get(f"/flow_runs/{uuid4()}") assert response.status_code == status.HTTP_404_NOT_FOUND, response.text
TestReadFlowRun
python
getsentry__sentry
src/sentry/users/services/user/model.py
{ "start": 638, "end": 853 }
class ____(RpcModel): id: int = 0 user_id: int = -1 created_at: datetime.datetime = DEFAULT_DATE last_used_at: datetime.datetime | None = None type: int = -1 config: Any = None
RpcAuthenticator
python
numba__numba
numba/core/typing/enumdecl.py
{ "start": 1401, "end": 1467 }
class ____(EnumCompare): pass @infer_global(operator.ne)
EnumEq
python
huggingface__transformers
src/transformers/models/video_llama_3/modeling_video_llama_3.py
{ "start": 15046, "end": 15496 }
class ____(PreTrainedModel): config: VideoLlama3Config base_model_prefix = "model" input_modalities = ("image", "video", "text") supports_gradient_checkpointing = True _no_split_modules = ["VideoLlama3VisionEncoderLayer"] _skip_keys_device_placement = "past_key_values" _supports_flash_attn = True _supports_sdpa = True _can_compile_fullgraph = True _supports_attention_backend = True
VideoLlama3PreTrainedModel
python
langchain-ai__langchain
libs/core/langchain_core/language_models/fake_chat_models.py
{ "start": 6940, "end": 12918 }
class ____(BaseChatModel): """Generic fake chat model that can be used to test the chat model interface. * Chat model should be usable in both sync and async tests * Invokes `on_llm_new_token` to allow for testing of callback related code for new tokens. * Includes logic to break messages into message chunk to facilitate testing of streaming. """ messages: Iterator[AIMessage | str] """Get an iterator over messages. This can be expanded to accept other types like Callables / dicts / strings to make the interface more generic if needed. !!! note if you want to pass a list, you can use `iter` to convert it to an iterator. !!! warning Streaming is not implemented yet. We should try to implement it in the future by delegating to invoke and then breaking the resulting output into message chunks. """ @override def _generate( self, messages: list[BaseMessage], stop: list[str] | None = None, run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> ChatResult: message = next(self.messages) message_ = AIMessage(content=message) if isinstance(message, str) else message generation = ChatGeneration(message=message_) return ChatResult(generations=[generation]) def _stream( self, messages: list[BaseMessage], stop: list[str] | None = None, run_manager: CallbackManagerForLLMRun | None = None, **kwargs: Any, ) -> Iterator[ChatGenerationChunk]: chat_result = self._generate( messages, stop=stop, run_manager=run_manager, **kwargs ) if not isinstance(chat_result, ChatResult): msg = ( f"Expected generate to return a ChatResult, " f"but got {type(chat_result)} instead." ) raise ValueError(msg) # noqa: TRY004 message = chat_result.generations[0].message if not isinstance(message, AIMessage): msg = ( f"Expected invoke to return an AIMessage, " f"but got {type(message)} instead." ) raise ValueError(msg) # noqa: TRY004 content = message.content if content: # Use a regular expression to split on whitespace with a capture group # so that we can preserve the whitespace in the output. if not isinstance(content, str): msg = "Expected content to be a string." raise ValueError(msg) content_chunks = cast("list[str]", re.split(r"(\s)", content)) for idx, token in enumerate(content_chunks): chunk = ChatGenerationChunk( message=AIMessageChunk(content=token, id=message.id) ) if ( idx == len(content_chunks) - 1 and isinstance(chunk.message, AIMessageChunk) and not message.additional_kwargs ): chunk.message.chunk_position = "last" if run_manager: run_manager.on_llm_new_token(token, chunk=chunk) yield chunk if message.additional_kwargs: for key, value in message.additional_kwargs.items(): # We should further break down the additional kwargs into chunks # Special case for function call if key == "function_call": for fkey, fvalue in value.items(): if isinstance(fvalue, str): # Break function call by `,` fvalue_chunks = cast("list[str]", re.split(r"(,)", fvalue)) for fvalue_chunk in fvalue_chunks: chunk = ChatGenerationChunk( message=AIMessageChunk( id=message.id, content="", additional_kwargs={ "function_call": {fkey: fvalue_chunk} }, ) ) if run_manager: run_manager.on_llm_new_token( "", chunk=chunk, # No token for function call ) yield chunk else: chunk = ChatGenerationChunk( message=AIMessageChunk( id=message.id, content="", additional_kwargs={"function_call": {fkey: fvalue}}, ) ) if run_manager: run_manager.on_llm_new_token( "", chunk=chunk, # No token for function call ) yield chunk else: chunk = ChatGenerationChunk( message=AIMessageChunk( id=message.id, content="", additional_kwargs={key: value} ) ) if run_manager: run_manager.on_llm_new_token( "", chunk=chunk, # No token for function call ) yield chunk @property def _llm_type(self) -> str: return "generic-fake-chat-model"
GenericFakeChatModel
python
walkccc__LeetCode
solutions/2088. Count Fertile Pyramids in a Land/2088.py
{ "start": 0, "end": 559 }
class ____: def countPyramids(self, grid: list[list[int]]) -> int: # dp[i][j] := the maximum height of the pyramid for which it is the apex def count(dp: list[list[int]]) -> int: ans = 0 for i in range(len(dp) - 2, -1, -1): for j in range(1, len(dp[0]) - 1): if dp[i][j] == 1: dp[i][j] = min(dp[i + 1][j - 1], dp[i + 1][j], dp[i + 1][j + 1]) + 1 ans += dp[i][j] - 1 return ans return count(deepcopy(grid)[::-1]) + count(grid)
Solution
python
davidhalter__parso
parso/python/errors.py
{ "start": 37450, "end": 43617 }
class ____(SyntaxRule): def _check_assignment(self, node, is_deletion=False, is_namedexpr=False, is_aug_assign=False): error = None type_ = node.type if type_ == 'lambdef': error = 'lambda' elif type_ == 'atom': first, second = node.children[:2] error = _get_comprehension_type(node) if error is None: if second.type == 'dictorsetmaker': if self._normalizer.version < (3, 8): error = 'literal' else: if second.children[1] == ':': if self._normalizer.version < (3, 10): error = 'dict display' else: error = 'dict literal' else: error = 'set display' elif first == "{" and second == "}": if self._normalizer.version < (3, 8): error = 'literal' else: if self._normalizer.version < (3, 10): error = "dict display" else: error = "dict literal" elif first == "{" and len(node.children) > 2: if self._normalizer.version < (3, 8): error = 'literal' else: error = "set display" elif first in ('(', '['): if second.type == 'yield_expr': error = 'yield expression' elif second.type == 'testlist_comp': # ([a, b] := [1, 2]) # ((a, b) := [1, 2]) if is_namedexpr: if first == '(': error = 'tuple' elif first == '[': error = 'list' # This is not a comprehension, they were handled # further above. for child in second.children[::2]: self._check_assignment(child, is_deletion, is_namedexpr, is_aug_assign) else: # Everything handled, must be useless brackets. self._check_assignment(second, is_deletion, is_namedexpr, is_aug_assign) elif type_ == 'keyword': if node.value == "yield": error = "yield expression" elif self._normalizer.version < (3, 8): error = 'keyword' else: error = str(node.value) elif type_ == 'operator': if node.value == '...': if self._normalizer.version < (3, 10): error = 'Ellipsis' else: error = 'ellipsis' elif type_ == 'comparison': error = 'comparison' elif type_ in ('string', 'number', 'strings'): error = 'literal' elif type_ == 'yield_expr': # This one seems to be a slightly different warning in Python. message = 'assignment to yield expression not possible' self.add_issue(node, message=message) elif type_ == 'test': error = 'conditional expression' elif type_ in ('atom_expr', 'power'): if node.children[0] == 'await': error = 'await expression' elif node.children[-2] == '**': if self._normalizer.version < (3, 10): error = 'operator' else: error = 'expression' else: # Has a trailer trailer = node.children[-1] assert trailer.type == 'trailer' if trailer.children[0] == '(': error = 'function call' elif is_namedexpr and trailer.children[0] == '[': error = 'subscript' elif is_namedexpr and trailer.children[0] == '.': error = 'attribute' elif type_ == "fstring": if self._normalizer.version < (3, 8): error = 'literal' else: error = "f-string expression" elif type_ in ('testlist_star_expr', 'exprlist', 'testlist'): for child in node.children[::2]: self._check_assignment(child, is_deletion, is_namedexpr, is_aug_assign) elif ('expr' in type_ and type_ != 'star_expr' # is a substring or '_test' in type_ or type_ in ('term', 'factor')): if self._normalizer.version < (3, 10): error = 'operator' else: error = 'expression' elif type_ == "star_expr": if is_deletion: if self._normalizer.version >= (3, 9): error = "starred" else: self.add_issue(node, message="can't use starred expression here") else: if self._normalizer.version >= (3, 9): ancestor = node.parent else: ancestor = _skip_parens_bottom_up(node) if ancestor.type not in _STAR_EXPR_PARENTS and not is_aug_assign \ and not (ancestor.type == 'atom' and ancestor.children[0] == '['): message = "starred assignment target must be in a list or tuple" self.add_issue(node, message=message) self._check_assignment(node.children[1]) if error is not None: if is_namedexpr: message = 'cannot use assignment expressions with %s' % error else: cannot = "can't" if self._normalizer.version < (3, 8) else "cannot" message = ' '.join([cannot, "delete" if is_deletion else "assign to", error]) self.add_issue(node, message=message) @ErrorFinder.register_rule(type='sync_comp_for')
_CheckAssignmentRule
python
huggingface__transformers
tests/models/clvp/test_feature_extraction_clvp.py
{ "start": 3642, "end": 10717 }
class ____(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = ClvpFeatureExtractor def setUp(self): self.feat_extract_tester = ClvpFeatureExtractionTester(self) def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch cleanup(torch_device) # Copied from transformers.tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_feat_extract_from_and_save_pretrained def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) # Copied from transformers.tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_feat_extract_to_json_file def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) dict_first = feat_extract_first.to_dict() dict_second = feat_extract_second.to_dict() mel_1 = feat_extract_first.mel_filters mel_2 = feat_extract_second.mel_filters self.assertTrue(np.allclose(mel_1, mel_2)) self.assertEqual(dict_first, dict_second) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_features = feature_extractor(np_speech_inputs, padding="max_length", return_tensors="np").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test 2-D numpy arrays are batched. speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test truncation required speech_inputs = [floats_list((1, x))[0] for x in range(200, (feature_extractor.n_samples + 500), 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] speech_inputs_truncated = [x[: feature_extractor.n_samples] for x in speech_inputs] np_speech_inputs_truncated = [np.asarray(speech_input) for speech_input in speech_inputs_truncated] encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features encoded_sequences_2 = feature_extractor(np_speech_inputs_truncated, return_tensors="np").input_features for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Copied from transformers.tests.models.whisper.test_feature_extraction_whisper.WhisperFeatureExtractionTest.test_double_precision_pad def test_double_precision_pad(self): import torch feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100, 32).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") self.assertTrue(np_processed.input_features.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_features.dtype == torch.float32) def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") ds = ds.cast_column("audio", Audio(sampling_rate=22050)) # automatic decoding with librispeech speech_samples = ds.sort("id")[:num_samples]["audio"] return [x["array"] for x in speech_samples], [x["sampling_rate"] for x in speech_samples] @slow def test_integration(self): # fmt: off EXPECTED_INPUT_FEATURES = torch.tensor( [ 0.9271, 1.1405, 1.4419, 1.2470, 1.2438, 1.1787, 1.0595, 1.0570, 1.1070, 1.2205, 1.2376, 1.2997, 1.1131, 1.0843, 1.0459, 1.1858, 1.2323, 1.3582, 1.3401, 1.3770, 1.4173, 1.3381, 1.2291, 1.0854, 1.2116, 1.1873, 1.2178, 1.2137, 1.3001, 1.4274 ] ) # fmt: on input_speech, sr = self._load_datasamples(1) feature_extractor = ClvpFeatureExtractor.from_pretrained("susnato/clvp_dev") input_features = feature_extractor(input_speech, sampling_rate=sr[0], return_tensors="pt").input_features self.assertEqual(input_features.shape, (1, 80, 517)) torch.testing.assert_close(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
ClvpFeatureExtractionTest
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/roles.py
{ "start": 3011, "end": 3300 }
class ____(AllowsLambdaRole, UsesInspection, ByOfRole): __slots__ = () # note there's a special case right now where you can pass a whole # ORM entity to group_by() and it splits out. we may not want to keep # this around _role_name = "GROUP BY expression"
GroupByRole
python
walkccc__LeetCode
solutions/25. Reverse Nodes in k-Group/25.py
{ "start": 0, "end": 694 }
class ____: def reverseKGroup(self, head: ListNode | None, k: int) -> ListNode | None: if not head: return None tail = head for _ in range(k): # There are less than k nodes in the list, do nothing. if not tail: return head tail = tail.next newHead = self._reverse(head, tail) head.next = self.reverseKGroup(tail, k) return newHead def _reverse( self, head: ListNode | None, tail: ListNode | None, ) -> ListNode | None: """Reverses [head, tail).""" prev = None curr = head while curr != tail: next = curr.next curr.next = prev prev = curr curr = next return prev
Solution
python
doocs__leetcode
solution/1900-1999/1935.Maximum Number of Words You Can Type/Solution.py
{ "start": 0, "end": 185 }
class ____: def canBeTypedWords(self, text: str, brokenLetters: str) -> int: s = set(brokenLetters) return sum(all(c not in s for c in w) for w in text.split())
Solution
python
mwaskom__seaborn
seaborn/_core/properties.py
{ "start": 19592, "end": 19780 }
class ____(TextAlignment): def _default_values(self, n: int) -> list: vals = itertools.cycle(["left", "right"]) return [next(vals) for _ in range(n)]
HorizontalAlignment
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/links/base.py
{ "start": 1361, "end": 4641 }
class ____(BaseOperatorLink): """ Base class for all Google links. When you inherit this class in a Link class; - You can call the persist method to push data to the XCom to use it later in the get_link method. - If you have an operator which inherit the GoogleCloudBaseOperator or BaseSensorOperator You can define extra_links_params method in the operator to pass the operator properties to the get_link method. :meta private: """ name: ClassVar[str] key: ClassVar[str] format_str: ClassVar[str] @property def xcom_key(self) -> str: # NOTE: in Airflow 3 we need to have xcom_key property in the Link class. # Since we have the key property already, this is just a proxy property method to use same # key as in Airflow 2. return self.key @classmethod def persist(cls, context: Context, **value): """ Push arguments to the XCom to use later for link formatting at the `get_link` method. Note: for Airflow 2 we need to call this function with context variable only where we have the extra_links_params property method defined """ params = {} # TODO: remove after Airflow v2 support dropped if not AIRFLOW_V_3_0_PLUS: common_params = getattr(context["task"], "extra_links_params", None) if common_params: params.update(common_params) context["ti"].xcom_push( key=cls.key, value={ **params, **value, }, ) def get_config(self, operator, ti_key): conf = {} conf.update(getattr(operator, "extra_links_params", {})) conf.update(XCom.get_value(key=self.key, ti_key=ti_key) or {}) # if the config did not define, return None to stop URL formatting if not conf: return None # Add a default value for the 'namespace' parameter for backward compatibility. # This is for datafusion conf.setdefault("namespace", "default") return conf def get_link( self, operator: BaseOperator, *, ti_key: TaskInstanceKey, ) -> str: if TYPE_CHECKING: assert isinstance(operator, (GoogleCloudBaseOperator, BaseSensorOperator)) # In cases when worker passes execution to trigger, the value that is put to XCom # already contains link to the object in string format. In this case we don't want to execute # get_config() again. Instead we can leave this value without any changes link_value = XCom.get_value(key=self.key, ti_key=ti_key) if link_value and isinstance(link_value, str): if urlparse(link_value).scheme in ("http", "https"): return link_value conf = self.get_config(operator, ti_key) if not conf: return "" return self._format_link(**conf) def _format_link(self, **kwargs): try: formatted_str = self.format_str.format(**kwargs) if formatted_str.startswith("http"): return formatted_str return BASE_LINK + formatted_str except KeyError: return ""
BaseGoogleLink
python
allegroai__clearml
clearml/backend_api/services/v2_13/tasks.py
{ "start": 119559, "end": 121411 }
class ____(Response): """ Response of tasks.close endpoint. :param updated: Number of tasks updated (0 or 1) :type updated: int :param fields: Updated fields names and values :type fields: dict """ _service = "tasks" _action = "close" _version = "2.13" _schema = { "definitions": {}, "properties": { "fields": { "additionalProperties": True, "description": "Updated fields names and values", "type": ["object", "null"], }, "updated": { "description": "Number of tasks updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], }, }, "type": "object", } def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None: super(CloseResponse, self).__init__(**kwargs) self.updated = updated self.fields = fields @schema_property("updated") def updated(self) -> Optional[int]: return self._property_updated @updated.setter def updated(self, value: Optional[int]) -> None: if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value @schema_property("fields") def fields(self) -> Optional[dict]: return self._property_fields @fields.setter def fields(self, value: Optional[dict]) -> None: if value is None: self._property_fields = None return self.assert_isinstance(value, "fields", (dict,)) self._property_fields = value
CloseResponse
python
yaml__pyyaml
tests/legacy_tests/test_emitter.py
{ "start": 3058, "end": 4417 }
class ____(yaml.Loader): def construct_event(self, node): if isinstance(node, yaml.ScalarNode): mapping = {} else: mapping = self.construct_mapping(node) class_name = str(node.tag[1:])+'Event' if class_name in ['AliasEvent', 'ScalarEvent', 'SequenceStartEvent', 'MappingStartEvent']: mapping.setdefault('anchor', None) if class_name in ['ScalarEvent', 'SequenceStartEvent', 'MappingStartEvent']: mapping.setdefault('tag', None) if class_name in ['SequenceStartEvent', 'MappingStartEvent']: mapping.setdefault('implicit', True) if class_name == 'ScalarEvent': mapping.setdefault('implicit', (False, True)) mapping.setdefault('value', '') value = getattr(yaml, class_name)(**mapping) return value EventsLoader.add_constructor(None, EventsLoader.construct_event) def test_emitter_events(events_filename, verbose=False): with open(events_filename, 'rb') as file: events = list(yaml.load(file, Loader=EventsLoader)) output = yaml.emit(events) if verbose: print("OUTPUT:") print(output) new_events = list(yaml.parse(output)) _compare_events(events, new_events) if __name__ == '__main__': import test_appliance test_appliance.run(globals())
EventsLoader
python
hyperopt__hyperopt
hyperopt/tests/unit/test_tpe.py
{ "start": 7208, "end": 10164 }
class ____(unittest.TestCase): def setUp(self): self.rng = np.random.default_rng(234) self.weights = [0.1, 0.3, 0.4, 0.2] self.mus = [1.0, 2.0, 3.0, 4.0] self.sigmas = [0.1, 0.4, 0.8, 2.0] self.low = None self.high = None self.n_samples = 1001 self.show = DO_SHOW # or put a string # -- triggers error if test case forgets to call work() self.worked = False def tearDown(self): assert self.worked def work(self, **kwargs): self.__dict__.update(kwargs) del kwargs self.worked = True gkwargs = dict( weights=self.weights, mus=self.mus, sigmas=self.sigmas, low=self.low, high=self.high, q=self.q, ) samples = GMM1(rng=self.rng, size=(self.n_samples,), **gkwargs) / self.q print("drew", len(samples), "samples") assert np.all(samples == samples.astype("int")) min_max = int(samples.min()), int(samples.max()) counts = np.bincount(samples.astype("int") - min_max[0]) print(counts) xcoords = np.arange(min_max[0], min_max[1] + 1) * self.q prob = np.exp(GMM1_lpdf(xcoords, **gkwargs)) assert counts.sum() == self.n_samples y = counts / float(self.n_samples) if self.show: plt.scatter(xcoords, y, c="r", label="empirical") plt.scatter(xcoords, prob, c="b", label="predicted") plt.legend() plt.title(str(self.show)) plt.show() err = (prob - y) ** 2 print(np.max(err)) print(np.mean(err)) print(np.median(err)) if self.show: pytest.skip() else: assert np.max(err) < 0.1 assert np.mean(err) < 0.01 assert np.median(err) < 0.01 def test_basic_1(self): self.work(q=1) def test_basic_2(self): self.work(q=2) def test_basic_pt5(self): self.work(q=0.5) def test_bounded_1(self): self.work(q=1, low=2, high=4) def test_bounded_2(self): self.work(q=2, low=2, high=4) def test_bounded_1b(self): self.work(q=1, low=1, high=4.1) def test_bounded_2b(self): self.work(q=2, low=1, high=4.1) def test_bounded_3(self): self.work( weights=[0.14285714, 0.28571429, 0.28571429, 0.28571429], mus=[5.505, 7.0, 2.0, 10.0], sigmas=[8.99, 5.0, 8.0, 8.0], q=1, low=1.01, high=10, n_samples=10000, # show='bounded_3', ) def test_bounded_3b(self): self.work( weights=[0.33333333, 0.66666667], mus=[5.505, 5.0], sigmas=[8.99, 5.19], q=1, low=1.01, high=10, n_samples=10000, # show='bounded_3b', )
TestQGMM1Math
python
kamyu104__LeetCode-Solutions
Python/detect-squares.py
{ "start": 1124, "end": 1843 }
class ____(object): def __init__(self): self.__points = [] self.__point_counts = collections.defaultdict(int) def add(self, point): """ :type point: List[int] :rtype: None """ self.__points.append(point) self.__point_counts[tuple(point)] += 1 def count(self, point): """ :type point: List[int] :rtype: int """ result = 0 for x, y in self.__points: if not (point[0] != x and point[1] != y and (abs(point[0]-x) == abs(point[1]-y))): continue result += self.__point_counts[(point[0], y)]*self.__point_counts[(x, point[1])] return result
DetectSquares2
python
catalyst-team__catalyst
catalyst/contrib/losses/gan.py
{ "start": 413, "end": 1894 }
class ____(nn.Module): """Criterion to compute gradient penalty. WARN: SHOULD NOT BE RUN WITH CriterionCallback, use special GradientPenaltyCallback instead """ def forward(self, fake_data, real_data, critic, critic_condition_args): """Compute gradient penalty. @TODO: Docs. Contribution is welcome. """ device = real_data.device # Random weight term for interpolation between real and fake samples alpha = torch.rand((real_data.size(0), 1, 1, 1), device=device) # Get random interpolation between real and fake samples interpolates = (alpha * real_data + ((1 - alpha) * fake_data)).detach() interpolates.requires_grad_(True) with torch.set_grad_enabled(True): # to compute in validation mode d_interpolates = critic(interpolates, *critic_condition_args) fake = torch.ones((real_data.size(0), 1), device=device, requires_grad=False) # Get gradient w.r.t. interpolates gradients = torch.autograd.grad( outputs=d_interpolates, inputs=interpolates, grad_outputs=fake, create_graph=True, retain_graph=True, only_inputs=True, )[0] gradients = gradients.view(gradients.size(0), -1) gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() return gradient_penalty __all__ = ["MeanOutputLoss", "GradientPenaltyLoss"]
GradientPenaltyLoss
python
tensorflow__tensorflow
tensorflow/python/distribute/strategy_test_lib.py
{ "start": 20077, "end": 24862 }
class ____(test.TestCase): """Some tests that should work with any one-device DistributionStrategy.""" def _test_run(self, strategy): out1 = strategy.run(lambda: array_ops.identity(4.)) self.assertAllEqual([4.], self.evaluate(strategy.unwrap(out1))) out2 = strategy.run(lambda x: {"a": x * 2, "b": x * x}, args=(out1,)) out2_vals = self.evaluate(nest.map_structure(strategy.unwrap, out2)) self.assertAllEqual([8.], out2_vals["a"]) self.assertAllEqual([16.], out2_vals["b"]) out3 = strategy.run(lambda b, a: a + 2 * b + 2, kwargs=out2) self.assertAllEqual([42.], self.evaluate(strategy.unwrap(out3))) def _test_all_reduce_sum(self, strategy): self._test_collective_comms( strategy, _all_sum, inputs=(4., [42., 43.]), expected=(4., [42., 43.])) def _test_all_reduce_sum_gradients(self, strategy): self._test_collective_comms_gradients( strategy, _all_sum, inputs=[4.], expected_grads=[4.]) def _test_all_reduce_sum_gradient_tape(self, strategy): self._test_collective_comms_gradient_tape( strategy, _all_sum, inputs=[4.], expected_grads=[4.]) def _test_all_reduce_mean(self, strategy): self._test_collective_comms( strategy, _all_mean, inputs=(2., [21., 22.]), expected=(2., [21., 22.])) def _test_all_reduce_mean_gradients(self, strategy): self._test_collective_comms_gradients( strategy, _all_mean, inputs=[5.], expected_grads=[5.]) def _test_all_reduce_mean_gradient_tape(self, strategy): self._test_collective_comms_gradient_tape( strategy, _all_mean, inputs=[5.], expected_grads=[5.]) def _test_collective_comms(self, strategy, comm_fn, inputs, expected): inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.from_tensors(inputs)) self.evaluate(inputs.initialize()) outputs = self.evaluate( list( map(strategy.experimental_local_results, strategy.experimental_run(comm_fn, inputs)))) self.assertAllEqual([expected[0]], outputs[0]) self.assertAllEqual([expected[1]], outputs[1]) def _test_collective_comms_gradients(self, strategy, comm_fn, inputs, expected_grads): if context.executing_eagerly(): self.skipTest("`tf.gradients` is not supported with eager execution.") def step(c): x = array_ops.identity(42.) y = comm_fn(x) * c return gradients_impl.gradients(y, [x])[0] inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.from_tensors(inputs)) self.evaluate(inputs.initialize()) self.assertAllEqual( expected_grads, self.evaluate( strategy.experimental_local_results( strategy.experimental_run(step, inputs)))) def _test_collective_comms_gradient_tape(self, strategy, comm_fn, inputs, expected_grads): def step(c): x = array_ops.identity(42.) with backprop.GradientTape() as tape: tape.watch(x) y = comm_fn(x) * c return tape.gradient(y, x) inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.from_tensors(inputs)) self.evaluate(inputs.initialize()) self.assertAllEqual( expected_grads, self.evaluate( strategy.experimental_local_results( strategy.experimental_run(step, inputs)))) def _test_device_and_input_device_are_colocated(self, strategy): if context.executing_eagerly(): self.skipTest( "cross-device tests are not supported with eager execution.") workers, _ = test_util.create_local_cluster(2, 0) inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.range(5)) comm_fn = lambda x: x + 1 run_op = strategy.experimental_run(comm_fn, inputs) with session_lib.Session(target=workers[1].target) as sess: sess.run(inputs.initialize()) sess.run(run_op) def _test_device_and_input_device_are_colocated_with_function(self, strategy): if context.executing_eagerly(): self.skipTest( "cross-device tests are not supported with eager execution.") workers, _ = test_util.create_local_cluster(2, 0) inputs = strategy.make_input_fn_iterator( lambda _: dataset_ops.Dataset.range(5)) comm_fn = lambda x: x + 1 experimental_run = def_function.function()(strategy.experimental_run) with ops.device("/job:worker/replica:0/task:1/device:CPU:0"): # The tf.function must be defined on the right device as well. run_op = experimental_run(comm_fn, inputs) with session_lib.Session(target=workers[1].target) as sess: sess.run(inputs.initialize()) sess.run(run_op)
OneDeviceDistributionTestBase
python
django__django
django/forms/widgets.py
{ "start": 21231, "end": 25739 }
class ____(Widget): allow_multiple_selected = False input_type = None template_name = None option_template_name = None add_id_index = True checked_attribute = {"checked": True} option_inherits_attrs = True def __init__(self, attrs=None, choices=()): super().__init__(attrs) self.choices = choices def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() obj.choices = copy.copy(self.choices) memo[id(self)] = obj return obj def subwidgets(self, name, value, attrs=None): """ Yield all "subwidgets" of this widget. Used to enable iterating options from a BoundField for choice widgets. """ value = self.format_value(value) yield from self.options(name, value, attrs) def options(self, name, value, attrs=None): """Yield a flat list of options for this widget.""" for group in self.optgroups(name, value, attrs): yield from group[1] def optgroups(self, name, value, attrs=None): """Return a list of optgroups for this widget.""" groups = [] has_selected = False for index, (option_value, option_label) in enumerate(self.choices): if option_value is None: option_value = "" subgroup = [] if isinstance(option_label, (list, tuple)): group_name = option_value subindex = 0 choices = option_label else: group_name = None subindex = None choices = [(option_value, option_label)] groups.append((group_name, subgroup, index)) for subvalue, sublabel in choices: selected = (not has_selected or self.allow_multiple_selected) and str( subvalue ) in value has_selected |= selected subgroup.append( self.create_option( name, subvalue, sublabel, selected, index, subindex=subindex, attrs=attrs, ) ) if subindex is not None: subindex += 1 return groups def create_option( self, name, value, label, selected, index, subindex=None, attrs=None ): index = str(index) if subindex is None else "%s_%s" % (index, subindex) option_attrs = ( self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {} ) if selected: option_attrs.update(self.checked_attribute) if "id" in option_attrs: option_attrs["id"] = self.id_for_label(option_attrs["id"], index) return { "name": name, "value": value, "label": label, "selected": selected, "index": index, "attrs": option_attrs, "type": self.input_type, "template_name": self.option_template_name, "wrap_label": True, } def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context["widget"]["optgroups"] = self.optgroups( name, context["widget"]["value"], attrs ) return context def id_for_label(self, id_, index="0"): """ Use an incremented id for each option where the main widget references the zero index. """ if id_ and self.add_id_index: id_ = "%s_%s" % (id_, index) return id_ def value_from_datadict(self, data, files, name): getter = data.get if self.allow_multiple_selected: try: getter = data.getlist except AttributeError: pass return getter(name) def format_value(self, value): """Return selected values as a list.""" if value is None and self.allow_multiple_selected: return [] if not isinstance(value, (tuple, list)): value = [value] return [str(v) if v is not None else "" for v in value] @property def choices(self): return self._choices @choices.setter def choices(self, value): self._choices = normalize_choices(value)
ChoiceWidget
python
python-openxml__python-docx
tests/oxml/test_table.py
{ "start": 520, "end": 1444 }
class ____: @pytest.mark.parametrize( ("tr_cxml", "expected_cxml"), [ ("w:tr", "w:tr/w:trPr"), ("w:tr/w:tblPrEx", "w:tr/(w:tblPrEx,w:trPr)"), ("w:tr/w:tc", "w:tr/(w:trPr,w:tc)"), ("w:tr/(w:sdt,w:del,w:tc)", "w:tr/(w:trPr,w:sdt,w:del,w:tc)"), ], ) def it_can_add_a_trPr(self, tr_cxml: str, expected_cxml: str): tr = cast(CT_Row, element(tr_cxml)) tr._add_trPr() assert tr.xml == xml(expected_cxml) @pytest.mark.parametrize(("snippet_idx", "row_idx", "col_idx"), [(0, 0, 3), (1, 0, 1)]) def it_raises_on_tc_at_grid_col(self, snippet_idx: int, row_idx: int, col_idx: int): tr = cast(CT_Tbl, parse_xml(snippet_seq("tbl-cells")[snippet_idx])).tr_lst[row_idx] with pytest.raises(ValueError, match=f"no `tc` element at grid_offset={col_idx}"): tr.tc_at_grid_offset(col_idx)
DescribeCT_Row
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_evaluation.py
{ "start": 3124, "end": 3466 }
class ____(NamedTupleSerializer): """Unpacks an arbitrary object into None.""" def unpack( # pyright: ignore[reportIncompatibleMethodOverride] self, unpacked_dict: dict[str, UnpackedValue], whitelist_map: WhitelistMap, context: UnpackContext, ) -> None: return None
BackcompatNullSerializer
python
prompt-toolkit__python-prompt-toolkit
src/prompt_toolkit/layout/processors.py
{ "start": 25004, "end": 30030 }
class ____(Processor): """ Process to display the "(reverse-i-search)`...`:..." stuff around the search buffer. Note: This processor is meant to be applied to the BufferControl that contains the search buffer, it's not meant for the original input. """ _excluded_input_processors: list[type[Processor]] = [ HighlightSearchProcessor, HighlightSelectionProcessor, BeforeInput, AfterInput, ] def _get_main_buffer(self, buffer_control: BufferControl) -> BufferControl | None: from prompt_toolkit.layout.controls import BufferControl prev_control = get_app().layout.search_target_buffer_control if ( isinstance(prev_control, BufferControl) and prev_control.search_buffer_control == buffer_control ): return prev_control return None def _content( self, main_control: BufferControl, ti: TransformationInput ) -> UIContent: from prompt_toolkit.layout.controls import BufferControl # Emulate the BufferControl through which we are searching. # For this we filter out some of the input processors. excluded_processors = tuple(self._excluded_input_processors) def filter_processor(item: Processor) -> Processor | None: """Filter processors from the main control that we want to disable here. This returns either an accepted processor or None.""" # For a `_MergedProcessor`, check each individual processor, recursively. if isinstance(item, _MergedProcessor): accepted_processors = [filter_processor(p) for p in item.processors] return merge_processors( [p for p in accepted_processors if p is not None] ) # For a `ConditionalProcessor`, check the body. elif isinstance(item, ConditionalProcessor): p = filter_processor(item.processor) if p: return ConditionalProcessor(p, item.filter) # Otherwise, check the processor itself. else: if not isinstance(item, excluded_processors): return item return None filtered_processor = filter_processor( merge_processors(main_control.input_processors or []) ) highlight_processor = HighlightIncrementalSearchProcessor() if filtered_processor: new_processors = [filtered_processor, highlight_processor] else: new_processors = [highlight_processor] from .controls import SearchBufferControl assert isinstance(ti.buffer_control, SearchBufferControl) buffer_control = BufferControl( buffer=main_control.buffer, input_processors=new_processors, include_default_input_processors=False, lexer=main_control.lexer, preview_search=True, search_buffer_control=ti.buffer_control, ) return buffer_control.create_content(ti.width, ti.height, preview_search=True) def apply_transformation(self, ti: TransformationInput) -> Transformation: from .controls import SearchBufferControl assert isinstance(ti.buffer_control, SearchBufferControl), ( "`ReverseSearchProcessor` should be applied to a `SearchBufferControl` only." ) source_to_display: SourceToDisplay | None display_to_source: DisplayToSource | None main_control = self._get_main_buffer(ti.buffer_control) if ti.lineno == 0 and main_control: content = self._content(main_control, ti) # Get the line from the original document for this search. line_fragments = content.get_line(content.cursor_position.y) if main_control.search_state.direction == SearchDirection.FORWARD: direction_text = "i-search" else: direction_text = "reverse-i-search" fragments_before: StyleAndTextTuples = [ ("class:prompt.search", "("), ("class:prompt.search", direction_text), ("class:prompt.search", ")`"), ] fragments = ( fragments_before + [ ("class:prompt.search.text", fragment_list_to_text(ti.fragments)), ("", "': "), ] + line_fragments ) shift_position = fragment_list_len(fragments_before) source_to_display = lambda i: i + shift_position display_to_source = lambda i: i - shift_position else: source_to_display = None display_to_source = None fragments = ti.fragments return Transformation( fragments, source_to_display=source_to_display, display_to_source=display_to_source, )
ReverseSearchProcessor
python
pypa__pipenv
pipenv/patched/pip/_vendor/rich/console.py
{ "start": 18085, "end": 100651 }
class ____: """A high level console interface. Args: color_system (str, optional): The color system supported by your terminal, either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect. force_terminal (Optional[bool], optional): Enable/disable terminal control codes, or None to auto-detect terminal. Defaults to None. force_jupyter (Optional[bool], optional): Enable/disable Jupyter rendering, or None to auto-detect Jupyter. Defaults to None. force_interactive (Optional[bool], optional): Enable/disable interactive mode, or None to auto detect. Defaults to None. soft_wrap (Optional[bool], optional): Set soft wrap default on print method. Defaults to False. theme (Theme, optional): An optional style theme object, or ``None`` for default theme. stderr (bool, optional): Use stderr rather than stdout if ``file`` is not specified. Defaults to False. file (IO, optional): A file object where the console should write to. Defaults to stdout. quiet (bool, Optional): Boolean to suppress all output. Defaults to False. width (int, optional): The width of the terminal. Leave as default to auto-detect width. height (int, optional): The height of the terminal. Leave as default to auto-detect height. style (StyleType, optional): Style to apply to all output, or None for no style. Defaults to None. no_color (Optional[bool], optional): Enabled no color mode, or None to auto detect. Defaults to None. tab_size (int, optional): Number of spaces used to replace a tab character. Defaults to 8. record (bool, optional): Boolean to enable recording of terminal output, required to call :meth:`export_html`, :meth:`export_svg`, and :meth:`export_text`. Defaults to False. markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True. emoji (bool, optional): Enable emoji code. Defaults to True. emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None. highlight (bool, optional): Enable automatic highlighting. Defaults to True. log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True. log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True. log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%X] ". highlighter (HighlighterType, optional): Default highlighter. legacy_windows (bool, optional): Enable legacy Windows mode, or ``None`` to auto detect. Defaults to ``None``. safe_box (bool, optional): Restrict box options that don't render on legacy Windows. get_datetime (Callable[[], datetime], optional): Callable that gets the current time as a datetime.datetime object (used by Console.log), or None for datetime.now. get_time (Callable[[], time], optional): Callable that gets the current time in seconds, default uses time.monotonic. """ _environ: Mapping[str, str] = os.environ def __init__( self, *, color_system: Optional[ Literal["auto", "standard", "256", "truecolor", "windows"] ] = "auto", force_terminal: Optional[bool] = None, force_jupyter: Optional[bool] = None, force_interactive: Optional[bool] = None, soft_wrap: bool = False, theme: Optional[Theme] = None, stderr: bool = False, file: Optional[IO[str]] = None, quiet: bool = False, width: Optional[int] = None, height: Optional[int] = None, style: Optional[StyleType] = None, no_color: Optional[bool] = None, tab_size: int = 8, record: bool = False, markup: bool = True, emoji: bool = True, emoji_variant: Optional[EmojiVariant] = None, highlight: bool = True, log_time: bool = True, log_path: bool = True, log_time_format: Union[str, FormatTimeCallable] = "[%X]", highlighter: Optional["HighlighterType"] = ReprHighlighter(), legacy_windows: Optional[bool] = None, safe_box: bool = True, get_datetime: Optional[Callable[[], datetime]] = None, get_time: Optional[Callable[[], float]] = None, _environ: Optional[Mapping[str, str]] = None, ): # Copy of os.environ allows us to replace it for testing if _environ is not None: self._environ = _environ self.is_jupyter = _is_jupyter() if force_jupyter is None else force_jupyter if self.is_jupyter: if width is None: jupyter_columns = self._environ.get("JUPYTER_COLUMNS") if jupyter_columns is not None and jupyter_columns.isdigit(): width = int(jupyter_columns) else: width = JUPYTER_DEFAULT_COLUMNS if height is None: jupyter_lines = self._environ.get("JUPYTER_LINES") if jupyter_lines is not None and jupyter_lines.isdigit(): height = int(jupyter_lines) else: height = JUPYTER_DEFAULT_LINES self.tab_size = tab_size self.record = record self._markup = markup self._emoji = emoji self._emoji_variant: Optional[EmojiVariant] = emoji_variant self._highlight = highlight self.legacy_windows: bool = ( (detect_legacy_windows() and not self.is_jupyter) if legacy_windows is None else legacy_windows ) if width is None: columns = self._environ.get("COLUMNS") if columns is not None and columns.isdigit(): width = int(columns) - self.legacy_windows if height is None: lines = self._environ.get("LINES") if lines is not None and lines.isdigit(): height = int(lines) self.soft_wrap = soft_wrap self._width = width self._height = height self._color_system: Optional[ColorSystem] self._force_terminal = None if force_terminal is not None: self._force_terminal = force_terminal self._file = file self.quiet = quiet self.stderr = stderr if color_system is None: self._color_system = None elif color_system == "auto": self._color_system = self._detect_color_system() else: self._color_system = COLOR_SYSTEMS[color_system] self._lock = threading.RLock() self._log_render = LogRender( show_time=log_time, show_path=log_path, time_format=log_time_format, ) self.highlighter: HighlighterType = highlighter or _null_highlighter self.safe_box = safe_box self.get_datetime = get_datetime or datetime.now self.get_time = get_time or monotonic self.style = style self.no_color = ( no_color if no_color is not None else self._environ.get("NO_COLOR", "") != "" ) self.is_interactive = ( (self.is_terminal and not self.is_dumb_terminal) if force_interactive is None else force_interactive ) self._record_buffer_lock = threading.RLock() self._thread_locals = ConsoleThreadLocals( theme_stack=ThemeStack(themes.DEFAULT if theme is None else theme) ) self._record_buffer: List[Segment] = [] self._render_hooks: List[RenderHook] = [] self._live: Optional["Live"] = None self._is_alt_screen = False def __repr__(self) -> str: return f"<console width={self.width} {self._color_system!s}>" @property def file(self) -> IO[str]: """Get the file object to write to.""" file = self._file or (sys.stderr if self.stderr else sys.stdout) file = getattr(file, "rich_proxied_file", file) if file is None: file = NULL_FILE return file @file.setter def file(self, new_file: IO[str]) -> None: """Set a new file object.""" self._file = new_file @property def _buffer(self) -> List[Segment]: """Get a thread local buffer.""" return self._thread_locals.buffer @property def _buffer_index(self) -> int: """Get a thread local buffer.""" return self._thread_locals.buffer_index @_buffer_index.setter def _buffer_index(self, value: int) -> None: self._thread_locals.buffer_index = value @property def _theme_stack(self) -> ThemeStack: """Get the thread local theme stack.""" return self._thread_locals.theme_stack def _detect_color_system(self) -> Optional[ColorSystem]: """Detect color system from env vars.""" if self.is_jupyter: return ColorSystem.TRUECOLOR if not self.is_terminal or self.is_dumb_terminal: return None if WINDOWS: # pragma: no cover if self.legacy_windows: # pragma: no cover return ColorSystem.WINDOWS windows_console_features = get_windows_console_features() return ( ColorSystem.TRUECOLOR if windows_console_features.truecolor else ColorSystem.EIGHT_BIT ) else: color_term = self._environ.get("COLORTERM", "").strip().lower() if color_term in ("truecolor", "24bit"): return ColorSystem.TRUECOLOR term = self._environ.get("TERM", "").strip().lower() _term_name, _hyphen, colors = term.rpartition("-") color_system = _TERM_COLORS.get(colors, ColorSystem.STANDARD) return color_system def _enter_buffer(self) -> None: """Enter in to a buffer context, and buffer all output.""" self._buffer_index += 1 def _exit_buffer(self) -> None: """Leave buffer context, and render content if required.""" self._buffer_index -= 1 self._check_buffer() def set_live(self, live: "Live") -> None: """Set Live instance. Used by Live context manager. Args: live (Live): Live instance using this Console. Raises: errors.LiveError: If this Console has a Live context currently active. """ with self._lock: if self._live is not None: raise errors.LiveError("Only one live display may be active at once") self._live = live def clear_live(self) -> None: """Clear the Live instance.""" with self._lock: self._live = None def push_render_hook(self, hook: RenderHook) -> None: """Add a new render hook to the stack. Args: hook (RenderHook): Render hook instance. """ with self._lock: self._render_hooks.append(hook) def pop_render_hook(self) -> None: """Pop the last renderhook from the stack.""" with self._lock: self._render_hooks.pop() def __enter__(self) -> "Console": """Own context manager to enter buffer context.""" self._enter_buffer() return self def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: """Exit buffer context.""" self._exit_buffer() def begin_capture(self) -> None: """Begin capturing console output. Call :meth:`end_capture` to exit capture mode and return output.""" self._enter_buffer() def end_capture(self) -> str: """End capture mode and return captured string. Returns: str: Console output. """ render_result = self._render_buffer(self._buffer) del self._buffer[:] self._exit_buffer() return render_result def push_theme(self, theme: Theme, *, inherit: bool = True) -> None: """Push a new theme on to the top of the stack, replacing the styles from the previous theme. Generally speaking, you should call :meth:`~rich.console.Console.use_theme` to get a context manager, rather than calling this method directly. Args: theme (Theme): A theme instance. inherit (bool, optional): Inherit existing styles. Defaults to True. """ self._theme_stack.push_theme(theme, inherit=inherit) def pop_theme(self) -> None: """Remove theme from top of stack, restoring previous theme.""" self._theme_stack.pop_theme() def use_theme(self, theme: Theme, *, inherit: bool = True) -> ThemeContext: """Use a different theme for the duration of the context manager. Args: theme (Theme): Theme instance to user. inherit (bool, optional): Inherit existing console styles. Defaults to True. Returns: ThemeContext: [description] """ return ThemeContext(self, theme, inherit) @property def color_system(self) -> Optional[str]: """Get color system string. Returns: Optional[str]: "standard", "256" or "truecolor". """ if self._color_system is not None: return _COLOR_SYSTEMS_NAMES[self._color_system] else: return None @property def encoding(self) -> str: """Get the encoding of the console file, e.g. ``"utf-8"``. Returns: str: A standard encoding string. """ return (getattr(self.file, "encoding", "utf-8") or "utf-8").lower() @property def is_terminal(self) -> bool: """Check if the console is writing to a terminal. Returns: bool: True if the console writing to a device capable of understanding escape sequences, otherwise False. """ # If dev has explicitly set this value, return it if self._force_terminal is not None: return self._force_terminal # Fudge for Idle if hasattr(sys.stdin, "__module__") and sys.stdin.__module__.startswith( "idlelib" ): # Return False for Idle which claims to be a tty but can't handle ansi codes return False if self.is_jupyter: # return False for Jupyter, which may have FORCE_COLOR set return False environ = self._environ tty_compatible = environ.get("TTY_COMPATIBLE", "") # 0 indicates device is not tty compatible if tty_compatible == "0": return False # 1 indicates device is tty compatible if tty_compatible == "1": return True # https://force-color.org/ force_color = environ.get("FORCE_COLOR") if force_color is not None: return force_color != "" # Any other value defaults to auto detect isatty: Optional[Callable[[], bool]] = getattr(self.file, "isatty", None) try: return False if isatty is None else isatty() except ValueError: # in some situation (at the end of a pytest run for example) isatty() can raise # ValueError: I/O operation on closed file # return False because we aren't in a terminal anymore return False @property def is_dumb_terminal(self) -> bool: """Detect dumb terminal. Returns: bool: True if writing to a dumb terminal, otherwise False. """ _term = self._environ.get("TERM", "") is_dumb = _term.lower() in ("dumb", "unknown") return self.is_terminal and is_dumb @property def options(self) -> ConsoleOptions: """Get default console options.""" return ConsoleOptions( max_height=self.size.height, size=self.size, legacy_windows=self.legacy_windows, min_width=1, max_width=self.width, encoding=self.encoding, is_terminal=self.is_terminal, ) @property def size(self) -> ConsoleDimensions: """Get the size of the console. Returns: ConsoleDimensions: A named tuple containing the dimensions. """ if self._width is not None and self._height is not None: return ConsoleDimensions(self._width - self.legacy_windows, self._height) if self.is_dumb_terminal: return ConsoleDimensions(80, 25) width: Optional[int] = None height: Optional[int] = None streams = _STD_STREAMS_OUTPUT if WINDOWS else _STD_STREAMS for file_descriptor in streams: try: width, height = os.get_terminal_size(file_descriptor) except (AttributeError, ValueError, OSError): # Probably not a terminal pass else: break columns = self._environ.get("COLUMNS") if columns is not None and columns.isdigit(): width = int(columns) lines = self._environ.get("LINES") if lines is not None and lines.isdigit(): height = int(lines) # get_terminal_size can report 0, 0 if run from pseudo-terminal width = width or 80 height = height or 25 return ConsoleDimensions( width - self.legacy_windows if self._width is None else self._width, height if self._height is None else self._height, ) @size.setter def size(self, new_size: Tuple[int, int]) -> None: """Set a new size for the terminal. Args: new_size (Tuple[int, int]): New width and height. """ width, height = new_size self._width = width self._height = height @property def width(self) -> int: """Get the width of the console. Returns: int: The width (in characters) of the console. """ return self.size.width @width.setter def width(self, width: int) -> None: """Set width. Args: width (int): New width. """ self._width = width @property def height(self) -> int: """Get the height of the console. Returns: int: The height (in lines) of the console. """ return self.size.height @height.setter def height(self, height: int) -> None: """Set height. Args: height (int): new height. """ self._height = height def bell(self) -> None: """Play a 'bell' sound (if supported by the terminal).""" self.control(Control.bell()) def capture(self) -> Capture: """A context manager to *capture* the result of print() or log() in a string, rather than writing it to the console. Example: >>> from rich.console import Console >>> console = Console() >>> with console.capture() as capture: ... console.print("[bold magenta]Hello World[/]") >>> print(capture.get()) Returns: Capture: Context manager with disables writing to the terminal. """ capture = Capture(self) return capture def pager( self, pager: Optional[Pager] = None, styles: bool = False, links: bool = False ) -> PagerContext: """A context manager to display anything printed within a "pager". The pager application is defined by the system and will typically support at least pressing a key to scroll. Args: pager (Pager, optional): A pager object, or None to use :class:`~rich.pager.SystemPager`. Defaults to None. styles (bool, optional): Show styles in pager. Defaults to False. links (bool, optional): Show links in pager. Defaults to False. Example: >>> from rich.console import Console >>> from rich.__main__ import make_test_card >>> console = Console() >>> with console.pager(): console.print(make_test_card()) Returns: PagerContext: A context manager. """ return PagerContext(self, pager=pager, styles=styles, links=links) def line(self, count: int = 1) -> None: """Write new line(s). Args: count (int, optional): Number of new lines. Defaults to 1. """ assert count >= 0, "count must be >= 0" self.print(NewLine(count)) def clear(self, home: bool = True) -> None: """Clear the screen. Args: home (bool, optional): Also move the cursor to 'home' position. Defaults to True. """ if home: self.control(Control.clear(), Control.home()) else: self.control(Control.clear()) def status( self, status: RenderableType, *, spinner: str = "dots", spinner_style: StyleType = "status.spinner", speed: float = 1.0, refresh_per_second: float = 12.5, ) -> "Status": """Display a status and spinner. Args: status (RenderableType): A status renderable (str or Text typically). spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots". spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner". speed (float, optional): Speed factor for spinner animation. Defaults to 1.0. refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5. Returns: Status: A Status object that may be used as a context manager. """ from .status import Status status_renderable = Status( status, console=self, spinner=spinner, spinner_style=spinner_style, speed=speed, refresh_per_second=refresh_per_second, ) return status_renderable def show_cursor(self, show: bool = True) -> bool: """Show or hide the cursor. Args: show (bool, optional): Set visibility of the cursor. """ if self.is_terminal: self.control(Control.show_cursor(show)) return True return False def set_alt_screen(self, enable: bool = True) -> bool: """Enables alternative screen mode. Note, if you enable this mode, you should ensure that is disabled before the application exits. See :meth:`~rich.Console.screen` for a context manager that handles this for you. Args: enable (bool, optional): Enable (True) or disable (False) alternate screen. Defaults to True. Returns: bool: True if the control codes were written. """ changed = False if self.is_terminal and not self.legacy_windows: self.control(Control.alt_screen(enable)) changed = True self._is_alt_screen = enable return changed @property def is_alt_screen(self) -> bool: """Check if the alt screen was enabled. Returns: bool: True if the alt screen was enabled, otherwise False. """ return self._is_alt_screen def set_window_title(self, title: str) -> bool: """Set the title of the console terminal window. Warning: There is no means within Rich of "resetting" the window title to its previous value, meaning the title you set will persist even after your application exits. ``fish`` shell resets the window title before and after each command by default, negating this issue. Windows Terminal and command prompt will also reset the title for you. Most other shells and terminals, however, do not do this. Some terminals may require configuration changes before you can set the title. Some terminals may not support setting the title at all. Other software (including the terminal itself, the shell, custom prompts, plugins, etc.) may also set the terminal window title. This could result in whatever value you write using this method being overwritten. Args: title (str): The new title of the terminal window. Returns: bool: True if the control code to change the terminal title was written, otherwise False. Note that a return value of True does not guarantee that the window title has actually changed, since the feature may be unsupported/disabled in some terminals. """ if self.is_terminal: self.control(Control.title(title)) return True return False def screen( self, hide_cursor: bool = True, style: Optional[StyleType] = None ) -> "ScreenContext": """Context manager to enable and disable 'alternative screen' mode. Args: hide_cursor (bool, optional): Also hide the cursor. Defaults to False. style (Style, optional): Optional style for screen. Defaults to None. Returns: ~ScreenContext: Context which enables alternate screen on enter, and disables it on exit. """ return ScreenContext(self, hide_cursor=hide_cursor, style=style or "") def measure( self, renderable: RenderableType, *, options: Optional[ConsoleOptions] = None ) -> Measurement: """Measure a renderable. Returns a :class:`~rich.measure.Measurement` object which contains information regarding the number of characters required to print the renderable. Args: renderable (RenderableType): Any renderable or string. options (Optional[ConsoleOptions], optional): Options to use when measuring, or None to use default options. Defaults to None. Returns: Measurement: A measurement of the renderable. """ measurement = Measurement.get(self, options or self.options, renderable) return measurement def render( self, renderable: RenderableType, options: Optional[ConsoleOptions] = None ) -> Iterable[Segment]: """Render an object in to an iterable of `Segment` instances. This method contains the logic for rendering objects with the console protocol. You are unlikely to need to use it directly, unless you are extending the library. Args: renderable (RenderableType): An object supporting the console protocol, or an object that may be converted to a string. options (ConsoleOptions, optional): An options object, or None to use self.options. Defaults to None. Returns: Iterable[Segment]: An iterable of segments that may be rendered. """ _options = options or self.options if _options.max_width < 1: # No space to render anything. This prevents potential recursion errors. return render_iterable: RenderResult renderable = rich_cast(renderable) if hasattr(renderable, "__rich_console__") and not isclass(renderable): render_iterable = renderable.__rich_console__(self, _options) elif isinstance(renderable, str): text_renderable = self.render_str( renderable, highlight=_options.highlight, markup=_options.markup ) render_iterable = text_renderable.__rich_console__(self, _options) else: raise errors.NotRenderableError( f"Unable to render {renderable!r}; " "A str, Segment or object with __rich_console__ method is required" ) try: iter_render = iter(render_iterable) except TypeError: raise errors.NotRenderableError( f"object {render_iterable!r} is not renderable" ) _Segment = Segment _options = _options.reset_height() for render_output in iter_render: if isinstance(render_output, _Segment): yield render_output else: yield from self.render(render_output, _options) def render_lines( self, renderable: RenderableType, options: Optional[ConsoleOptions] = None, *, style: Optional[Style] = None, pad: bool = True, new_lines: bool = False, ) -> List[List[Segment]]: """Render objects in to a list of lines. The output of render_lines is useful when further formatting of rendered console text is required, such as the Panel class which draws a border around any renderable object. Args: renderable (RenderableType): Any object renderable in the console. options (Optional[ConsoleOptions], optional): Console options, or None to use self.options. Default to ``None``. style (Style, optional): Optional style to apply to renderables. Defaults to ``None``. pad (bool, optional): Pad lines shorter than render width. Defaults to ``True``. new_lines (bool, optional): Include "\n" characters at end of lines. Returns: List[List[Segment]]: A list of lines, where a line is a list of Segment objects. """ with self._lock: render_options = options or self.options _rendered = self.render(renderable, render_options) if style: _rendered = Segment.apply_style(_rendered, style) render_height = render_options.height if render_height is not None: render_height = max(0, render_height) lines = list( islice( Segment.split_and_crop_lines( _rendered, render_options.max_width, include_new_lines=new_lines, pad=pad, style=style, ), None, render_height, ) ) if render_options.height is not None: extra_lines = render_options.height - len(lines) if extra_lines > 0: pad_line = [ ( [ Segment(" " * render_options.max_width, style), Segment("\n"), ] if new_lines else [Segment(" " * render_options.max_width, style)] ) ] lines.extend(pad_line * extra_lines) return lines def render_str( self, text: str, *, style: Union[str, Style] = "", justify: Optional[JustifyMethod] = None, overflow: Optional[OverflowMethod] = None, emoji: Optional[bool] = None, markup: Optional[bool] = None, highlight: Optional[bool] = None, highlighter: Optional[HighlighterType] = None, ) -> "Text": """Convert a string to a Text instance. This is called automatically if you print or log a string. Args: text (str): Text to render. style (Union[str, Style], optional): Style to apply to rendered text. justify (str, optional): Justify method: "default", "left", "center", "full", or "right". Defaults to ``None``. overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to ``None``. emoji (Optional[bool], optional): Enable emoji, or ``None`` to use Console default. markup (Optional[bool], optional): Enable markup, or ``None`` to use Console default. highlight (Optional[bool], optional): Enable highlighting, or ``None`` to use Console default. highlighter (HighlighterType, optional): Optional highlighter to apply. Returns: ConsoleRenderable: Renderable object. """ emoji_enabled = emoji or (emoji is None and self._emoji) markup_enabled = markup or (markup is None and self._markup) highlight_enabled = highlight or (highlight is None and self._highlight) if markup_enabled: rich_text = render_markup( text, style=style, emoji=emoji_enabled, emoji_variant=self._emoji_variant, ) rich_text.justify = justify rich_text.overflow = overflow else: rich_text = Text( ( _emoji_replace(text, default_variant=self._emoji_variant) if emoji_enabled else text ), justify=justify, overflow=overflow, style=style, ) _highlighter = (highlighter or self.highlighter) if highlight_enabled else None if _highlighter is not None: highlight_text = _highlighter(str(rich_text)) highlight_text.copy_styles(rich_text) return highlight_text return rich_text def get_style( self, name: Union[str, Style], *, default: Optional[Union[Style, str]] = None ) -> Style: """Get a Style instance by its theme name or parse a definition. Args: name (str): The name of a style or a style definition. Returns: Style: A Style object. Raises: MissingStyle: If no style could be parsed from name. """ if isinstance(name, Style): return name try: style = self._theme_stack.get(name) if style is None: style = Style.parse(name) return style.copy() if style.link else style except errors.StyleSyntaxError as error: if default is not None: return self.get_style(default) raise errors.MissingStyle( f"Failed to get style {name!r}; {error}" ) from None def _collect_renderables( self, objects: Iterable[Any], sep: str, end: str, *, justify: Optional[JustifyMethod] = None, emoji: Optional[bool] = None, markup: Optional[bool] = None, highlight: Optional[bool] = None, ) -> List[ConsoleRenderable]: """Combine a number of renderables and text into one renderable. Args: objects (Iterable[Any]): Anything that Rich can render. sep (str): String to write between print data. end (str): String to write at end of print data. justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``. emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Returns: List[ConsoleRenderable]: A list of things to render. """ renderables: List[ConsoleRenderable] = [] _append = renderables.append text: List[Text] = [] append_text = text.append append = _append if justify in ("left", "center", "right"): def align_append(renderable: RenderableType) -> None: _append(Align(renderable, cast(AlignMethod, justify))) append = align_append _highlighter: HighlighterType = _null_highlighter if highlight or (highlight is None and self._highlight): _highlighter = self.highlighter def check_text() -> None: if text: sep_text = Text(sep, justify=justify, end=end) append(sep_text.join(text)) text.clear() for renderable in objects: renderable = rich_cast(renderable) if isinstance(renderable, str): append_text( self.render_str( renderable, emoji=emoji, markup=markup, highlight=highlight, highlighter=_highlighter, ) ) elif isinstance(renderable, Text): append_text(renderable) elif isinstance(renderable, ConsoleRenderable): check_text() append(renderable) elif is_expandable(renderable): check_text() append(Pretty(renderable, highlighter=_highlighter)) else: append_text(_highlighter(str(renderable))) check_text() if self.style is not None: style = self.get_style(self.style) renderables = [Styled(renderable, style) for renderable in renderables] return renderables def rule( self, title: TextType = "", *, characters: str = "─", style: Union[str, Style] = "rule.line", align: AlignMethod = "center", ) -> None: """Draw a line with optional centered title. Args: title (str, optional): Text to render over the rule. Defaults to "". characters (str, optional): Character(s) to form the line. Defaults to "─". style (str, optional): Style of line. Defaults to "rule.line". align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center". """ from .rule import Rule rule = Rule(title=title, characters=characters, style=style, align=align) self.print(rule) def control(self, *control: Control) -> None: """Insert non-printing control codes. Args: control_codes (str): Control codes, such as those that may move the cursor. """ if not self.is_dumb_terminal: with self: self._buffer.extend(_control.segment for _control in control) def out( self, *objects: Any, sep: str = " ", end: str = "\n", style: Optional[Union[str, Style]] = None, highlight: Optional[bool] = None, ) -> None: """Output to the terminal. This is a low-level way of writing to the terminal which unlike :meth:`~rich.console.Console.print` won't pretty print, wrap text, or apply markup, but will optionally apply highlighting and a basic style. Args: sep (str, optional): String to write between print data. Defaults to " ". end (str, optional): String to write at end of print data. Defaults to "\\\\n". style (Union[str, Style], optional): A style to apply to output. Defaults to None. highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to ``None``. """ raw_output: str = sep.join(str(_object) for _object in objects) self.print( raw_output, style=style, highlight=highlight, emoji=False, markup=False, no_wrap=True, overflow="ignore", crop=False, end=end, ) def print( self, *objects: Any, sep: str = " ", end: str = "\n", style: Optional[Union[str, Style]] = None, justify: Optional[JustifyMethod] = None, overflow: Optional[OverflowMethod] = None, no_wrap: Optional[bool] = None, emoji: Optional[bool] = None, markup: Optional[bool] = None, highlight: Optional[bool] = None, width: Optional[int] = None, height: Optional[int] = None, crop: bool = True, soft_wrap: Optional[bool] = None, new_line_start: bool = False, ) -> None: """Print to the console. Args: objects (positional args): Objects to log to the terminal. sep (str, optional): String to write between print data. Defaults to " ". end (str, optional): String to write at end of print data. Defaults to "\\\\n". style (Union[str, Style], optional): A style to apply to output. Defaults to None. justify (str, optional): Justify method: "default", "left", "right", "center", or "full". Defaults to ``None``. overflow (str, optional): Overflow method: "ignore", "crop", "fold", or "ellipsis". Defaults to None. no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to None. emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to ``None``. markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to ``None``. highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to ``None``. width (Optional[int], optional): Width of output, or ``None`` to auto-detect. Defaults to ``None``. crop (Optional[bool], optional): Crop output to width of terminal. Defaults to True. soft_wrap (bool, optional): Enable soft wrap mode which disables word wrapping and cropping of text or ``None`` for Console default. Defaults to ``None``. new_line_start (bool, False): Insert a new line at the start if the output contains more than one line. Defaults to ``False``. """ if not objects: objects = (NewLine(),) if soft_wrap is None: soft_wrap = self.soft_wrap if soft_wrap: if no_wrap is None: no_wrap = True if overflow is None: overflow = "ignore" crop = False render_hooks = self._render_hooks[:] with self: renderables = self._collect_renderables( objects, sep, end, justify=justify, emoji=emoji, markup=markup, highlight=highlight, ) for hook in render_hooks: renderables = hook.process_renderables(renderables) render_options = self.options.update( justify=justify, overflow=overflow, width=min(width, self.width) if width is not None else NO_CHANGE, height=height, no_wrap=no_wrap, markup=markup, highlight=highlight, ) new_segments: List[Segment] = [] extend = new_segments.extend render = self.render if style is None: for renderable in renderables: extend(render(renderable, render_options)) else: for renderable in renderables: extend( Segment.apply_style( render(renderable, render_options), self.get_style(style) ) ) if new_line_start: if ( len("".join(segment.text for segment in new_segments).splitlines()) > 1 ): new_segments.insert(0, Segment.line()) if crop: buffer_extend = self._buffer.extend for line in Segment.split_and_crop_lines( new_segments, self.width, pad=False ): buffer_extend(line) else: self._buffer.extend(new_segments) def print_json( self, json: Optional[str] = None, *, data: Any = None, indent: Union[None, int, str] = 2, highlight: bool = True, skip_keys: bool = False, ensure_ascii: bool = False, check_circular: bool = True, allow_nan: bool = True, default: Optional[Callable[[Any], Any]] = None, sort_keys: bool = False, ) -> None: """Pretty prints JSON. Output will be valid JSON. Args: json (Optional[str]): A string containing JSON. data (Any): If json is not supplied, then encode this data. indent (Union[None, int, str], optional): Number of spaces to indent. Defaults to 2. highlight (bool, optional): Enable highlighting of output: Defaults to True. skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False. ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False. check_circular (bool, optional): Check for circular references. Defaults to True. allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True. default (Callable, optional): A callable that converts values that can not be encoded in to something that can be JSON encoded. Defaults to None. sort_keys (bool, optional): Sort dictionary keys. Defaults to False. """ from pipenv.patched.pip._vendor.rich.json import JSON if json is None: json_renderable = JSON.from_data( data, indent=indent, highlight=highlight, skip_keys=skip_keys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, default=default, sort_keys=sort_keys, ) else: if not isinstance(json, str): raise TypeError( f"json must be str. Did you mean print_json(data={json!r}) ?" ) json_renderable = JSON( json, indent=indent, highlight=highlight, skip_keys=skip_keys, ensure_ascii=ensure_ascii, check_circular=check_circular, allow_nan=allow_nan, default=default, sort_keys=sort_keys, ) self.print(json_renderable, soft_wrap=True) def update_screen( self, renderable: RenderableType, *, region: Optional[Region] = None, options: Optional[ConsoleOptions] = None, ) -> None: """Update the screen at a given offset. Args: renderable (RenderableType): A Rich renderable. region (Region, optional): Region of screen to update, or None for entire screen. Defaults to None. x (int, optional): x offset. Defaults to 0. y (int, optional): y offset. Defaults to 0. Raises: errors.NoAltScreen: If the Console isn't in alt screen mode. """ if not self.is_alt_screen: raise errors.NoAltScreen("Alt screen must be enabled to call update_screen") render_options = options or self.options if region is None: x = y = 0 render_options = render_options.update_dimensions( render_options.max_width, render_options.height or self.height ) else: x, y, width, height = region render_options = render_options.update_dimensions(width, height) lines = self.render_lines(renderable, options=render_options) self.update_screen_lines(lines, x, y) def update_screen_lines( self, lines: List[List[Segment]], x: int = 0, y: int = 0 ) -> None: """Update lines of the screen at a given offset. Args: lines (List[List[Segment]]): Rendered lines (as produced by :meth:`~rich.Console.render_lines`). x (int, optional): x offset (column no). Defaults to 0. y (int, optional): y offset (column no). Defaults to 0. Raises: errors.NoAltScreen: If the Console isn't in alt screen mode. """ if not self.is_alt_screen: raise errors.NoAltScreen("Alt screen must be enabled to call update_screen") screen_update = ScreenUpdate(lines, x, y) segments = self.render(screen_update) self._buffer.extend(segments) self._check_buffer() def print_exception( self, *, width: Optional[int] = 100, extra_lines: int = 3, theme: Optional[str] = None, word_wrap: bool = False, show_locals: bool = False, suppress: Iterable[Union[str, ModuleType]] = (), max_frames: int = 100, ) -> None: """Prints a rich render of the last exception and traceback. Args: width (Optional[int], optional): Number of characters used to render code. Defaults to 100. extra_lines (int, optional): Additional lines of code to render. Defaults to 3. theme (str, optional): Override pygments theme used in traceback word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False. show_locals (bool, optional): Enable display of local variables. Defaults to False. suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback. max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100. """ from .traceback import Traceback traceback = Traceback( width=width, extra_lines=extra_lines, theme=theme, word_wrap=word_wrap, show_locals=show_locals, suppress=suppress, max_frames=max_frames, ) self.print(traceback) @staticmethod def _caller_frame_info( offset: int, currentframe: Callable[[], Optional[FrameType]] = inspect.currentframe, ) -> Tuple[str, int, Dict[str, Any]]: """Get caller frame information. Args: offset (int): the caller offset within the current frame stack. currentframe (Callable[[], Optional[FrameType]], optional): the callable to use to retrieve the current frame. Defaults to ``inspect.currentframe``. Returns: Tuple[str, int, Dict[str, Any]]: A tuple containing the filename, the line number and the dictionary of local variables associated with the caller frame. Raises: RuntimeError: If the stack offset is invalid. """ # Ignore the frame of this local helper offset += 1 frame = currentframe() if frame is not None: # Use the faster currentframe where implemented while offset and frame is not None: frame = frame.f_back offset -= 1 assert frame is not None return frame.f_code.co_filename, frame.f_lineno, frame.f_locals else: # Fallback to the slower stack frame_info = inspect.stack()[offset] return frame_info.filename, frame_info.lineno, frame_info.frame.f_locals def log( self, *objects: Any, sep: str = " ", end: str = "\n", style: Optional[Union[str, Style]] = None, justify: Optional[JustifyMethod] = None, emoji: Optional[bool] = None, markup: Optional[bool] = None, highlight: Optional[bool] = None, log_locals: bool = False, _stack_offset: int = 1, ) -> None: """Log rich content to the terminal. Args: objects (positional args): Objects to log to the terminal. sep (str, optional): String to write between print data. Defaults to " ". end (str, optional): String to write at end of print data. Defaults to "\\\\n". style (Union[str, Style], optional): A style to apply to output. Defaults to None. justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``. emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to None. markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to None. highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to None. log_locals (bool, optional): Boolean to enable logging of locals where ``log()`` was called. Defaults to False. _stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1. """ if not objects: objects = (NewLine(),) render_hooks = self._render_hooks[:] with self: renderables = self._collect_renderables( objects, sep, end, justify=justify, emoji=emoji, markup=markup, highlight=highlight, ) if style is not None: renderables = [Styled(renderable, style) for renderable in renderables] filename, line_no, locals = self._caller_frame_info(_stack_offset) link_path = None if filename.startswith("<") else os.path.abspath(filename) path = filename.rpartition(os.sep)[-1] if log_locals: locals_map = { key: value for key, value in locals.items() if not key.startswith("__") } renderables.append(render_scope(locals_map, title="[i]locals")) renderables = [ self._log_render( self, renderables, log_time=self.get_datetime(), path=path, line_no=line_no, link_path=link_path, ) ] for hook in render_hooks: renderables = hook.process_renderables(renderables) new_segments: List[Segment] = [] extend = new_segments.extend render = self.render render_options = self.options for renderable in renderables: extend(render(renderable, render_options)) buffer_extend = self._buffer.extend for line in Segment.split_and_crop_lines( new_segments, self.width, pad=False ): buffer_extend(line) def on_broken_pipe(self) -> None: """This function is called when a `BrokenPipeError` is raised. This can occur when piping Textual output in Linux and macOS. The default implementation is to exit the app, but you could implement this method in a subclass to change the behavior. See https://docs.python.org/3/library/signal.html#note-on-sigpipe for details. """ self.quiet = True devnull = os.open(os.devnull, os.O_WRONLY) os.dup2(devnull, sys.stdout.fileno()) raise SystemExit(1) def _check_buffer(self) -> None: """Check if the buffer may be rendered. Render it if it can (e.g. Console.quiet is False) Rendering is supported on Windows, Unix and Jupyter environments. For legacy Windows consoles, the win32 API is called directly. This method will also record what it renders if recording is enabled via Console.record. """ if self.quiet: del self._buffer[:] return try: self._write_buffer() except BrokenPipeError: self.on_broken_pipe() def _write_buffer(self) -> None: """Write the buffer to the output file.""" with self._lock: if self.record and not self._buffer_index: with self._record_buffer_lock: self._record_buffer.extend(self._buffer[:]) if self._buffer_index == 0: if self.is_jupyter: # pragma: no cover from .jupyter import display display(self._buffer, self._render_buffer(self._buffer[:])) del self._buffer[:] else: if WINDOWS: use_legacy_windows_render = False if self.legacy_windows: fileno = get_fileno(self.file) if fileno is not None: use_legacy_windows_render = ( fileno in _STD_STREAMS_OUTPUT ) if use_legacy_windows_render: from pipenv.patched.pip._vendor.rich._win32_console import LegacyWindowsTerm from pipenv.patched.pip._vendor.rich._windows_renderer import legacy_windows_render buffer = self._buffer[:] if self.no_color and self._color_system: buffer = list(Segment.remove_color(buffer)) legacy_windows_render(buffer, LegacyWindowsTerm(self.file)) else: # Either a non-std stream on legacy Windows, or modern Windows. text = self._render_buffer(self._buffer[:]) # https://bugs.python.org/issue37871 # https://github.com/python/cpython/issues/82052 # We need to avoid writing more than 32Kb in a single write, due to the above bug write = self.file.write # Worse case scenario, every character is 4 bytes of utf-8 MAX_WRITE = 32 * 1024 // 4 try: if len(text) <= MAX_WRITE: write(text) else: batch: List[str] = [] batch_append = batch.append size = 0 for line in text.splitlines(True): if size + len(line) > MAX_WRITE and batch: write("".join(batch)) batch.clear() size = 0 batch_append(line) size += len(line) if batch: write("".join(batch)) batch.clear() except UnicodeEncodeError as error: error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***" raise else: text = self._render_buffer(self._buffer[:]) try: self.file.write(text) except UnicodeEncodeError as error: error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***" raise self.file.flush() del self._buffer[:] def _render_buffer(self, buffer: Iterable[Segment]) -> str: """Render buffered output, and clear buffer.""" output: List[str] = [] append = output.append color_system = self._color_system legacy_windows = self.legacy_windows not_terminal = not self.is_terminal if self.no_color and color_system: buffer = Segment.remove_color(buffer) for text, style, control in buffer: if style: append( style.render( text, color_system=color_system, legacy_windows=legacy_windows, ) ) elif not (not_terminal and control): append(text) rendered = "".join(output) return rendered def input( self, prompt: TextType = "", *, markup: bool = True, emoji: bool = True, password: bool = False, stream: Optional[TextIO] = None, ) -> str: """Displays a prompt and waits for input from the user. The prompt may contain color / style. It works in the same way as Python's builtin :func:`input` function and provides elaborate line editing and history features if Python's builtin :mod:`readline` module is previously loaded. Args: prompt (Union[str, Text]): Text to render in the prompt. markup (bool, optional): Enable console markup (requires a str prompt). Defaults to True. emoji (bool, optional): Enable emoji (requires a str prompt). Defaults to True. password: (bool, optional): Hide typed text. Defaults to False. stream: (TextIO, optional): Optional file to read input from (rather than stdin). Defaults to None. Returns: str: Text read from stdin. """ if prompt: self.print(prompt, markup=markup, emoji=emoji, end="") if password: result = getpass("", stream=stream) else: if stream: result = stream.readline() else: result = input() return result def export_text(self, *, clear: bool = True, styles: bool = False) -> str: """Generate text from console contents (requires record=True argument in constructor). Args: clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. styles (bool, optional): If ``True``, ansi escape codes will be included. ``False`` for plain text. Defaults to ``False``. Returns: str: String containing console contents. """ assert ( self.record ), "To export console contents set record=True in the constructor or instance" with self._record_buffer_lock: if styles: text = "".join( (style.render(text) if style else text) for text, style, _ in self._record_buffer ) else: text = "".join( segment.text for segment in self._record_buffer if not segment.control ) if clear: del self._record_buffer[:] return text def save_text(self, path: str, *, clear: bool = True, styles: bool = False) -> None: """Generate text from console and save to a given location (requires record=True argument in constructor). Args: path (str): Path to write text files. clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text. Defaults to ``False``. """ text = self.export_text(clear=clear, styles=styles) with open(path, "w", encoding="utf-8") as write_file: write_file.write(text) def export_html( self, *, theme: Optional[TerminalTheme] = None, clear: bool = True, code_format: Optional[str] = None, inline_styles: bool = False, ) -> str: """Generate HTML from console contents (requires record=True argument in constructor). Args: theme (TerminalTheme, optional): TerminalTheme object containing console colors. clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. code_format (str, optional): Format string to render HTML. In addition to '{foreground}', '{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``. inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag. Defaults to False. Returns: str: String containing console contents as HTML. """ assert ( self.record ), "To export console contents set record=True in the constructor or instance" fragments: List[str] = [] append = fragments.append _theme = theme or DEFAULT_TERMINAL_THEME stylesheet = "" render_code_format = CONSOLE_HTML_FORMAT if code_format is None else code_format with self._record_buffer_lock: if inline_styles: for text, style, _ in Segment.filter_control( Segment.simplify(self._record_buffer) ): text = escape(text) if style: rule = style.get_html_style(_theme) if style.link: text = f'<a href="{style.link}">{text}</a>' text = f'<span style="{rule}">{text}</span>' if rule else text append(text) else: styles: Dict[str, int] = {} for text, style, _ in Segment.filter_control( Segment.simplify(self._record_buffer) ): text = escape(text) if style: rule = style.get_html_style(_theme) style_number = styles.setdefault(rule, len(styles) + 1) if style.link: text = f'<a class="r{style_number}" href="{style.link}">{text}</a>' else: text = f'<span class="r{style_number}">{text}</span>' append(text) stylesheet_rules: List[str] = [] stylesheet_append = stylesheet_rules.append for style_rule, style_number in styles.items(): if style_rule: stylesheet_append(f".r{style_number} {{{style_rule}}}") stylesheet = "\n".join(stylesheet_rules) rendered_code = render_code_format.format( code="".join(fragments), stylesheet=stylesheet, foreground=_theme.foreground_color.hex, background=_theme.background_color.hex, ) if clear: del self._record_buffer[:] return rendered_code def save_html( self, path: str, *, theme: Optional[TerminalTheme] = None, clear: bool = True, code_format: str = CONSOLE_HTML_FORMAT, inline_styles: bool = False, ) -> None: """Generate HTML from console contents and write to a file (requires record=True argument in constructor). Args: path (str): Path to write html file. theme (TerminalTheme, optional): TerminalTheme object containing console colors. clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``. code_format (str, optional): Format string to render HTML. In addition to '{foreground}', '{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``. inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag. Defaults to False. """ html = self.export_html( theme=theme, clear=clear, code_format=code_format, inline_styles=inline_styles, ) with open(path, "w", encoding="utf-8") as write_file: write_file.write(html) def export_svg( self, *, title: str = "Rich", theme: Optional[TerminalTheme] = None, clear: bool = True, code_format: str = CONSOLE_SVG_FORMAT, font_aspect_ratio: float = 0.61, unique_id: Optional[str] = None, ) -> str: """ Generate an SVG from the console contents (requires record=True in Console constructor). Args: title (str, optional): The title of the tab in the output image theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal clear (bool, optional): Clear record buffer after exporting. Defaults to ``True`` code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables into the string in order to form the final SVG output. The default template used and the variables injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable. font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format`` string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font). If you aren't specifying a different font inside ``code_format``, you probably don't need this. unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node ids). If not set, this defaults to a computed value based on the recorded content. """ from pipenv.patched.pip._vendor.rich.cells import cell_len style_cache: Dict[Style, str] = {} def get_svg_style(style: Style) -> str: """Convert a Style to CSS rules for SVG.""" if style in style_cache: return style_cache[style] css_rules = [] color = ( _theme.foreground_color if (style.color is None or style.color.is_default) else style.color.get_truecolor(_theme) ) bgcolor = ( _theme.background_color if (style.bgcolor is None or style.bgcolor.is_default) else style.bgcolor.get_truecolor(_theme) ) if style.reverse: color, bgcolor = bgcolor, color if style.dim: color = blend_rgb(color, bgcolor, 0.4) css_rules.append(f"fill: {color.hex}") if style.bold: css_rules.append("font-weight: bold") if style.italic: css_rules.append("font-style: italic;") if style.underline: css_rules.append("text-decoration: underline;") if style.strike: css_rules.append("text-decoration: line-through;") css = ";".join(css_rules) style_cache[style] = css return css _theme = theme or SVG_EXPORT_THEME width = self.width char_height = 20 char_width = char_height * font_aspect_ratio line_height = char_height * 1.22 margin_top = 1 margin_right = 1 margin_bottom = 1 margin_left = 1 padding_top = 40 padding_right = 8 padding_bottom = 8 padding_left = 8 padding_width = padding_left + padding_right padding_height = padding_top + padding_bottom margin_width = margin_left + margin_right margin_height = margin_top + margin_bottom text_backgrounds: List[str] = [] text_group: List[str] = [] classes: Dict[str, int] = {} style_no = 1 def escape_text(text: str) -> str: """HTML escape text and replace spaces with nbsp.""" return escape(text).replace(" ", "&#160;") def make_tag( name: str, content: Optional[str] = None, **attribs: object ) -> str: """Make a tag from name, content, and attributes.""" def stringify(value: object) -> str: if isinstance(value, (float)): return format(value, "g") return str(value) tag_attribs = " ".join( f'{k.lstrip("_").replace("_", "-")}="{stringify(v)}"' for k, v in attribs.items() ) return ( f"<{name} {tag_attribs}>{content}</{name}>" if content else f"<{name} {tag_attribs}/>" ) with self._record_buffer_lock: segments = list(Segment.filter_control(self._record_buffer)) if clear: self._record_buffer.clear() if unique_id is None: unique_id = "terminal-" + str( zlib.adler32( ("".join(repr(segment) for segment in segments)).encode( "utf-8", "ignore", ) + title.encode("utf-8", "ignore") ) ) y = 0 for y, line in enumerate(Segment.split_and_crop_lines(segments, length=width)): x = 0 for text, style, _control in line: style = style or Style() rules = get_svg_style(style) if rules not in classes: classes[rules] = style_no style_no += 1 class_name = f"r{classes[rules]}" if style.reverse: has_background = True background = ( _theme.foreground_color.hex if style.color is None else style.color.get_truecolor(_theme).hex ) else: bgcolor = style.bgcolor has_background = bgcolor is not None and not bgcolor.is_default background = ( _theme.background_color.hex if style.bgcolor is None else style.bgcolor.get_truecolor(_theme).hex ) text_length = cell_len(text) if has_background: text_backgrounds.append( make_tag( "rect", fill=background, x=x * char_width, y=y * line_height + 1.5, width=char_width * text_length, height=line_height + 0.25, shape_rendering="crispEdges", ) ) if text != " " * len(text): text_group.append( make_tag( "text", escape_text(text), _class=f"{unique_id}-{class_name}", x=x * char_width, y=y * line_height + char_height, textLength=char_width * len(text), clip_path=f"url(#{unique_id}-line-{y})", ) ) x += cell_len(text) line_offsets = [line_no * line_height + 1.5 for line_no in range(y)] lines = "\n".join( f"""<clipPath id="{unique_id}-line-{line_no}"> {make_tag("rect", x=0, y=offset, width=char_width * width, height=line_height + 0.25)} </clipPath>""" for line_no, offset in enumerate(line_offsets) ) styles = "\n".join( f".{unique_id}-r{rule_no} {{ {css} }}" for css, rule_no in classes.items() ) backgrounds = "".join(text_backgrounds) matrix = "".join(text_group) terminal_width = ceil(width * char_width + padding_width) terminal_height = (y + 1) * line_height + padding_height chrome = make_tag( "rect", fill=_theme.background_color.hex, stroke="rgba(255,255,255,0.35)", stroke_width="1", x=margin_left, y=margin_top, width=terminal_width, height=terminal_height, rx=8, ) title_color = _theme.foreground_color.hex if title: chrome += make_tag( "text", escape_text(title), _class=f"{unique_id}-title", fill=title_color, text_anchor="middle", x=terminal_width // 2, y=margin_top + char_height + 6, ) chrome += f""" <g transform="translate(26,22)"> <circle cx="0" cy="0" r="7" fill="#ff5f57"/> <circle cx="22" cy="0" r="7" fill="#febc2e"/> <circle cx="44" cy="0" r="7" fill="#28c840"/> </g> """ svg = code_format.format( unique_id=unique_id, char_width=char_width, char_height=char_height, line_height=line_height, terminal_width=char_width * width - 1, terminal_height=(y + 1) * line_height - 1, width=terminal_width + margin_width, height=terminal_height + margin_height, terminal_x=margin_left + padding_left, terminal_y=margin_top + padding_top, styles=styles, chrome=chrome, backgrounds=backgrounds, matrix=matrix, lines=lines, ) return svg def save_svg( self, path: str, *, title: str = "Rich", theme: Optional[TerminalTheme] = None, clear: bool = True, code_format: str = CONSOLE_SVG_FORMAT, font_aspect_ratio: float = 0.61, unique_id: Optional[str] = None, ) -> None: """Generate an SVG file from the console contents (requires record=True in Console constructor). Args: path (str): The path to write the SVG to. title (str, optional): The title of the tab in the output image theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal clear (bool, optional): Clear record buffer after exporting. Defaults to ``True`` code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables into the string in order to form the final SVG output. The default template used and the variables injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable. font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format`` string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font). If you aren't specifying a different font inside ``code_format``, you probably don't need this. unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node ids). If not set, this defaults to a computed value based on the recorded content. """ svg = self.export_svg( title=title, theme=theme, clear=clear, code_format=code_format, font_aspect_ratio=font_aspect_ratio, unique_id=unique_id, ) with open(path, "w", encoding="utf-8") as write_file: write_file.write(svg) def _svg_hash(svg_main_code: str) -> str: """Returns a unique hash for the given SVG main code. Args: svg_main_code (str): The content we're going to inject in the SVG envelope. Returns: str: a hash of the given content """ return str(zlib.adler32(svg_main_code.encode())) if __name__ == "__main__": # pragma: no cover console = Console(record=True) console.log( "JSONRPC [i]request[/i]", 5, 1.3, True, False, None, { "jsonrpc": "2.0", "method": "subtract", "params": {"minuend": 42, "subtrahend": 23}, "id": 3, }, ) console.log("Hello, World!", "{'a': 1}", repr(console)) console.print( { "name": None, "empty": [], "quiz": { "sport": { "answered": True, "q1": { "question": "Which one is correct team name in NBA?", "options": [ "New York Bulls", "Los Angeles Kings", "Golden State Warriors", "Huston Rocket", ], "answer": "Huston Rocket", }, }, "maths": { "answered": False, "q1": { "question": "5 + 7 = ?", "options": [10, 11, 12, 13], "answer": 12, }, "q2": { "question": "12 - 8 = ?", "options": [1, 2, 3, 4], "answer": 4, }, }, }, } )
Console
python
miyuchina__mistletoe
mistletoe/ast_renderer.py
{ "start": 117, "end": 1475 }
class ____(BaseRenderer): def render(self, token): """ Returns the string representation of the AST. Overrides super().render. Delegates the logic to get_ast. """ return json.dumps(get_ast(token), indent=2) + '\n' def __getattr__(self, name): return lambda token: '' def get_ast(token): """ Recursively unrolls token attributes into dictionaries (token.children into lists). Returns: a dictionary of token's attributes. """ node = {} # Python 3.6 uses [ordered dicts] [1]. # Put in 'type' entry first to make the final tree format somewhat # similar to [MDAST] [2]. # # [1]: https://docs.python.org/3/whatsnew/3.6.html # [2]: https://github.com/syntax-tree/mdast node['type'] = token.__class__.__name__ for attrname in ['content', 'footnotes']: if attrname in vars(token): node[attrname] = getattr(token, attrname) for attrname in token.repr_attributes: node[attrname] = getattr(token, attrname) if 'header' in vars(token): node['header'] = get_ast(getattr(token, 'header')) if token.children is not None: node['children'] = [get_ast(child) for child in token.children] return node ASTRenderer = AstRenderer """ Deprecated name of the `AstRenderer` class. """
AstRenderer
python
pydantic__pydantic
pydantic/v1/types.py
{ "start": 24365, "end": 25034 }
class ____(type): def __getitem__(self, t: Type[Any]) -> Type[JsonWrapper]: if t is Any: return Json # allow Json[Any] to replicate plain Json return _registered(type('JsonWrapperValue', (JsonWrapper,), {'inner_type': t})) if TYPE_CHECKING: Json = Annotated[T, ...] # Json[list[str]] will be recognized by type checkers as list[str] else: class Json(metaclass=JsonMeta): @classmethod def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: field_schema.update(type='string', format='json-string') # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SECRET TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
JsonMeta
python
django__django
tests/view_tests/tests/test_debug.py
{ "start": 64146, "end": 78766 }
class ____( ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase ): """ Sensitive information can be filtered out of error reports (#14614). """ rf = RequestFactory() sensitive_settings = [ "SECRET_KEY", "SECRET_KEY_FALLBACKS", "PASSWORD", "API_KEY", "SOME_TOKEN", "MY_AUTH", ] def test_non_sensitive_request(self): """ Everything (request info and frame variables) can bee seen in the default error reports for non-sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(non_sensitive_view) self.verify_unsafe_email(non_sensitive_view) with self.settings(DEBUG=False): self.verify_unsafe_response(non_sensitive_view) self.verify_unsafe_email(non_sensitive_view) def test_sensitive_request(self): """ Sensitive POST parameters and frame variables cannot be seen in the default error reports for sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_view) self.verify_unsafe_email(sensitive_view) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_view) self.verify_safe_email(sensitive_view) def test_async_sensitive_request(self): with self.settings(DEBUG=True): self.verify_unsafe_response(async_sensitive_view) self.verify_unsafe_email(async_sensitive_view) with self.settings(DEBUG=False): self.verify_safe_response(async_sensitive_view) self.verify_safe_email(async_sensitive_view) def test_async_sensitive_nested_request(self): with self.settings(DEBUG=True): self.verify_unsafe_response(async_sensitive_view_nested) self.verify_unsafe_email(async_sensitive_view_nested) with self.settings(DEBUG=False): self.verify_safe_response(async_sensitive_view_nested) self.verify_safe_email(async_sensitive_view_nested) def test_paranoid_request(self): """ No POST parameters and frame variables can be seen in the default error reports for "paranoid" requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(paranoid_view) self.verify_unsafe_email(paranoid_view) with self.settings(DEBUG=False): self.verify_paranoid_response(paranoid_view) self.verify_paranoid_email(paranoid_view) def test_multivalue_dict_key_error(self): """ #21098 -- Sensitive POST parameters cannot be seen in the error reports for if request.POST['nonexistent_key'] throws an error. """ with self.settings(DEBUG=True): self.verify_unsafe_response(multivalue_dict_key_error) self.verify_unsafe_email(multivalue_dict_key_error) with self.settings(DEBUG=False): self.verify_safe_response(multivalue_dict_key_error) self.verify_safe_email(multivalue_dict_key_error) def test_custom_exception_reporter_filter(self): """ It's possible to assign an exception reporter filter to the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER. """ with self.settings(DEBUG=True): self.verify_unsafe_response(custom_exception_reporter_filter_view) self.verify_unsafe_email(custom_exception_reporter_filter_view) with self.settings(DEBUG=False): self.verify_unsafe_response(custom_exception_reporter_filter_view) self.verify_unsafe_email(custom_exception_reporter_filter_view) def test_sensitive_method(self): """ The sensitive_variables decorator works with object methods. """ with self.settings(DEBUG=True): self.verify_unsafe_response( sensitive_method_view, check_for_POST_params=False ) self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False) with self.settings(DEBUG=False): self.verify_safe_response( sensitive_method_view, check_for_POST_params=False ) self.verify_safe_email(sensitive_method_view, check_for_POST_params=False) def test_async_sensitive_method(self): """ The sensitive_variables decorator works with async object methods. """ with self.settings(DEBUG=True): self.verify_unsafe_response( async_sensitive_method_view, check_for_POST_params=False ) self.verify_unsafe_email( async_sensitive_method_view, check_for_POST_params=False ) with self.settings(DEBUG=False): self.verify_safe_response( async_sensitive_method_view, check_for_POST_params=False ) self.verify_safe_email( async_sensitive_method_view, check_for_POST_params=False ) def test_async_sensitive_method_nested(self): """ The sensitive_variables decorator works with async object methods. """ with self.settings(DEBUG=True): self.verify_unsafe_response( async_sensitive_method_view_nested, check_for_POST_params=False ) self.verify_unsafe_email( async_sensitive_method_view_nested, check_for_POST_params=False ) with self.settings(DEBUG=False): self.verify_safe_response( async_sensitive_method_view_nested, check_for_POST_params=False ) self.verify_safe_email( async_sensitive_method_view_nested, check_for_POST_params=False ) def test_sensitive_function_arguments(self): """ Sensitive variables don't leak in the sensitive_variables decorator's frame, when those variables are passed as arguments to the decorated function. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_args_function_caller) self.verify_unsafe_email(sensitive_args_function_caller) with self.settings(DEBUG=False): self.verify_safe_response( sensitive_args_function_caller, check_for_POST_params=False ) self.verify_safe_email( sensitive_args_function_caller, check_for_POST_params=False ) def test_sensitive_function_keyword_arguments(self): """ Sensitive variables don't leak in the sensitive_variables decorator's frame, when those variables are passed as keyword arguments to the decorated function. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_kwargs_function_caller) self.verify_unsafe_email(sensitive_kwargs_function_caller) with self.settings(DEBUG=False): self.verify_safe_response( sensitive_kwargs_function_caller, check_for_POST_params=False ) self.verify_safe_email( sensitive_kwargs_function_caller, check_for_POST_params=False ) def test_callable_settings(self): """ Callable settings should not be evaluated in the debug page (#21345). """ def callable_setting(): return "This should not be displayed" with self.settings(DEBUG=True, FOOBAR=callable_setting): response = self.client.get("/raises500/") self.assertNotContains( response, "This should not be displayed", status_code=500 ) def test_callable_settings_forbidding_to_set_attributes(self): """ Callable settings which forbid to set attributes should not break the debug page (#23070). """ class CallableSettingWithSlots: __slots__ = [] def __call__(self): return "This should not be displayed" with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()): response = self.client.get("/raises500/") self.assertNotContains( response, "This should not be displayed", status_code=500 ) def test_dict_setting_with_non_str_key(self): """ A dict setting containing a non-string key should not break the debug page (#12744). """ with self.settings(DEBUG=True, FOOBAR={42: None}): response = self.client.get("/raises500/") self.assertContains(response, "FOOBAR", status_code=500) def test_sensitive_settings(self): """ The debug page should not show some sensitive settings (password, secret key, ...). """ for setting in self.sensitive_settings: with self.subTest(setting=setting): with self.settings(DEBUG=True, **{setting: "should not be displayed"}): response = self.client.get("/raises500/") self.assertNotContains( response, "should not be displayed", status_code=500 ) def test_settings_with_sensitive_keys(self): """ The debug page should filter out some sensitive information found in dict settings. """ for setting in self.sensitive_settings: FOOBAR = { setting: "should not be displayed", "recursive": {setting: "should not be displayed"}, } with self.subTest(setting=setting): with self.settings(DEBUG=True, FOOBAR=FOOBAR): response = self.client.get("/raises500/") self.assertNotContains( response, "should not be displayed", status_code=500 ) def test_cleanse_setting_basic(self): reporter_filter = SafeExceptionReporterFilter() self.assertEqual(reporter_filter.cleanse_setting("TEST", "TEST"), "TEST") self.assertEqual( reporter_filter.cleanse_setting("PASSWORD", "super_secret"), reporter_filter.cleansed_substitute, ) def test_cleanse_setting_ignore_case(self): reporter_filter = SafeExceptionReporterFilter() self.assertEqual( reporter_filter.cleanse_setting("password", "super_secret"), reporter_filter.cleansed_substitute, ) def test_cleanse_setting_recurses_in_dictionary(self): reporter_filter = SafeExceptionReporterFilter() initial = {"login": "cooper", "password": "secret"} self.assertEqual( reporter_filter.cleanse_setting("SETTING_NAME", initial), {"login": "cooper", "password": reporter_filter.cleansed_substitute}, ) def test_cleanse_setting_recurses_in_dictionary_with_non_string_key(self): reporter_filter = SafeExceptionReporterFilter() initial = {("localhost", 8000): {"login": "cooper", "password": "secret"}} self.assertEqual( reporter_filter.cleanse_setting("SETTING_NAME", initial), { ("localhost", 8000): { "login": "cooper", "password": reporter_filter.cleansed_substitute, }, }, ) def test_cleanse_setting_recurses_in_list_tuples(self): reporter_filter = SafeExceptionReporterFilter() initial = [ { "login": "cooper", "password": "secret", "apps": ( {"name": "app1", "api_key": "a06b-c462cffae87a"}, {"name": "app2", "api_key": "a9f4-f152e97ad808"}, ), "tokens": ["98b37c57-ec62-4e39", "8690ef7d-8004-4916"], }, {"SECRET_KEY": "c4d77c62-6196-4f17-a06b-c462cffae87a"}, ] cleansed = [ { "login": "cooper", "password": reporter_filter.cleansed_substitute, "apps": ( {"name": "app1", "api_key": reporter_filter.cleansed_substitute}, {"name": "app2", "api_key": reporter_filter.cleansed_substitute}, ), "tokens": reporter_filter.cleansed_substitute, }, {"SECRET_KEY": reporter_filter.cleansed_substitute}, ] self.assertEqual( reporter_filter.cleanse_setting("SETTING_NAME", initial), cleansed, ) self.assertEqual( reporter_filter.cleanse_setting("SETTING_NAME", tuple(initial)), tuple(cleansed), ) def test_request_meta_filtering(self): headers = { "API_URL": "super secret", "A_SIGNATURE_VALUE": "super secret", "MY_KEY": "super secret", "PASSWORD": "super secret", "SECRET_VALUE": "super secret", "SOME_TOKEN": "super secret", "THE_AUTH": "super secret", } request = self.rf.get("/", headers=headers) reporter_filter = SafeExceptionReporterFilter() cleansed_headers = reporter_filter.get_safe_request_meta(request) for header in headers: with self.subTest(header=header): self.assertEqual( cleansed_headers[f"HTTP_{header}"], reporter_filter.cleansed_substitute, ) self.assertEqual( cleansed_headers["HTTP_COOKIE"], reporter_filter.cleansed_substitute, ) def test_exception_report_uses_meta_filtering(self): response = self.client.get( "/raises500/", headers={"secret-header": "super_secret"} ) self.assertNotIn(b"super_secret", response.content) response = self.client.get( "/raises500/", headers={"secret-header": "super_secret", "accept": "application/json"}, ) self.assertNotIn(b"super_secret", response.content) @override_settings(SESSION_COOKIE_NAME="djangosession") def test_cleanse_session_cookie_value(self): self.client.cookies.load({"djangosession": "should not be displayed"}) response = self.client.get("/raises500/") self.assertNotContains(response, "should not be displayed", status_code=500)
ExceptionReporterFilterTests
python
python-jsonschema__jsonschema
jsonschema/tests/test_format.py
{ "start": 402, "end": 3188 }
class ____(TestCase): def test_it_can_validate_no_formats(self): checker = FormatChecker(formats=()) self.assertFalse(checker.checkers) def test_it_raises_a_key_error_for_unknown_formats(self): with self.assertRaises(KeyError): FormatChecker(formats=["o noes"]) def test_it_can_register_cls_checkers(self): original = dict(FormatChecker.checkers) self.addCleanup(FormatChecker.checkers.pop, "boom") with self.assertWarns(DeprecationWarning): FormatChecker.cls_checks("boom")(boom) self.assertEqual( FormatChecker.checkers, dict(original, boom=(boom, ())), ) def test_it_can_register_checkers(self): checker = FormatChecker() checker.checks("boom")(boom) self.assertEqual( checker.checkers, dict(FormatChecker.checkers, boom=(boom, ())), ) def test_it_catches_registered_errors(self): checker = FormatChecker() checker.checks("boom", raises=type(BOOM))(boom) with self.assertRaises(FormatError) as cm: checker.check(instance=12, format="boom") self.assertIs(cm.exception.cause, BOOM) self.assertIs(cm.exception.__cause__, BOOM) self.assertEqual(str(cm.exception), "12 is not a 'boom'") # Unregistered errors should not be caught with self.assertRaises(type(BANG)): checker.check(instance="bang", format="boom") def test_format_error_causes_become_validation_error_causes(self): checker = FormatChecker() checker.checks("boom", raises=ValueError)(boom) validator = Draft4Validator({"format": "boom"}, format_checker=checker) with self.assertRaises(ValidationError) as cm: validator.validate("BOOM") self.assertIs(cm.exception.cause, BOOM) self.assertIs(cm.exception.__cause__, BOOM) def test_format_checkers_come_with_defaults(self): # This is bad :/ but relied upon. # The docs for quite awhile recommended people do things like # validate(..., format_checker=FormatChecker()) # We should change that, but we can't without deprecation... checker = FormatChecker() with self.assertRaises(FormatError): checker.check(instance="not-an-ipv4", format="ipv4") def test_repr(self): checker = FormatChecker(formats=()) checker.checks("foo")(lambda thing: True) # pragma: no cover checker.checks("bar")(lambda thing: True) # pragma: no cover checker.checks("baz")(lambda thing: True) # pragma: no cover self.assertEqual( repr(checker), "<FormatChecker checkers=['bar', 'baz', 'foo']>", )
TestFormatChecker
python
pytorch__pytorch
test/torch_np/numpy_tests/lib/test_function_base.py
{ "start": 16790, "end": 20502 }
class ____(TestCase): def test_basic(self): a = [1, 2, 3] assert_equal(insert(a, 0, 1), [1, 1, 2, 3]) assert_equal(insert(a, 3, 1), [1, 2, 3, 1]) assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3]) assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3]) assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9]) assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3]) assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9]) b = np.array([0, 1], dtype=np.float64) assert_equal(insert(b, 0, b[0]), [0.0, 0.0, 1.0]) assert_equal(insert(b, [], []), b) # Bools will be treated differently in the future: # assert_equal(insert(a, np.array([True]*4), 9), [9, 1, 9, 2, 9, 3, 9]) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings("always", "", FutureWarning) assert_equal(insert(a, np.array([True] * 4), 9), [1, 9, 9, 9, 9, 2, 3]) assert_(w[0].category is FutureWarning) def test_multidim(self): a = [[1, 1, 1]] r = [[2, 2, 2], [1, 1, 1]] assert_equal(insert(a, 0, [1]), [1, 1, 1, 1]) assert_equal(insert(a, 0, [2, 2, 2], axis=0), r) assert_equal(insert(a, 0, 2, axis=0), r) assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]]) a = np.array([[1, 1], [2, 2], [3, 3]]) b = np.arange(1, 4).repeat(3).reshape(3, 3) c = np.concatenate( (a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T, a[:, 1:2]), axis=1 ) assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b) assert_equal(insert(a, [1], [1, 2, 3], axis=1), c) # scalars behave differently, in this case exactly opposite: assert_equal(insert(a, 1, [1, 2, 3], axis=1), b) assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c) a = np.arange(4).reshape(2, 2) assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a) assert_equal(insert(a[:1, :], 1, a[1, :], axis=0), a) # negative axis value a = np.arange(24).reshape((2, 3, 4)) assert_equal( insert(a, 1, a[:, :, 3], axis=-1), insert(a, 1, a[:, :, 3], axis=2) ) assert_equal( insert(a, 1, a[:, 2, :], axis=-2), insert(a, 1, a[:, 2, :], axis=1) ) # invalid axis value assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=3) assert_raises(np.AxisError, insert, a, 1, a[:, 2, :], axis=-4) # negative axis value a = np.arange(24).reshape((2, 3, 4)) assert_equal( insert(a, 1, a[:, :, 3], axis=-1), insert(a, 1, a[:, :, 3], axis=2) ) assert_equal( insert(a, 1, a[:, 2, :], axis=-2), insert(a, 1, a[:, 2, :], axis=1) ) def test_0d(self): a = np.array(1) with pytest.raises(np.AxisError): insert(a, [], 2, axis=0) with pytest.raises(TypeError): insert(a, [], 2, axis="nonsense") def test_index_array_copied(self): x = np.array([1, 1, 1]) np.insert([0, 1, 2], x, [3, 4, 5]) assert_equal(x, np.array([1, 1, 1])) def test_index_floats(self): with pytest.raises(IndexError): np.insert([0, 1, 2], np.array([1.0, 2.0]), [10, 20]) with pytest.raises(IndexError): np.insert([0, 1, 2], np.array([], dtype=float), []) @skip(reason="NP_VER: fails on CI") @parametrize("idx", [4, -4]) def test_index_out_of_bounds(self, idx): with pytest.raises(IndexError, match="out of bounds"): np.insert([0, 1, 2], [idx], [3, 4])
TestInsert
python
PyCQA__pylint
tests/config/test_argparse_config.py
{ "start": 2026, "end": 3010 }
class ____: @staticmethod def test_new_names() -> None: """Check that we correctly emit DeprecationWarnings for deprecated options.""" with pytest.raises(SystemExit) as ex: with pytest.warns(DeprecationWarning) as records: Run([EMPTY_MODULE, "--ignore-mixin-members=yes"]) assert len(records) == 1 assert "--ignore-mixin-members has been deprecated" in records[0] assert ex.value.code == 0 @staticmethod def test_old_names() -> None: """Check that we correctly double assign old name options.""" run = Run([EMPTY_MODULE, "--ignore=test,test_two"], exit=False) assert run.linter.config.ignore == ["test", "test_two"] assert run.linter.config.ignore == run.linter.config.black_list assert run.linter.config.ignore_patterns == (re.compile("^\\.#"),) assert run.linter.config.ignore_patterns == run.linter.config.black_list_re
TestDeprecationOptions
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-web/llama_index/readers/web/spider_web/base.py
{ "start": 152, "end": 2347 }
class ____(BasePydanticReader): """ Scrapes a URL for data and returns llm-ready data with `Spider.cloud`. Must have the Python package `spider-client` installed and a Spider API key. See https://spider.cloud for more. Args: api_key (str): The Spider API key, get one at https://spider.cloud mode (Mode): "Scrape" the url (default) or "crawl" the url following all subpages. params (dict): Additional parameters to pass to the Spider API. """ class Config: use_enum_values = True extra = "allow" def __init__( self, *, api_key: Optional[str] = None, mode: Literal["scrape", "crawl"] = "scrape", params: Optional[dict] = None, ) -> None: super().__init__(api_key=api_key, mode=mode, params=params) if params is None: params = {"return_format": "markdown", "metadata": True} try: from spider import Spider except ImportError: raise ImportError( "`spider-client` package not found, please run `pip install spider-client`" ) self.spider = Spider(api_key=api_key) self.mode = mode self.params = params def load_data(self, url: str) -> List[Document]: if self.mode != "scrape" and self.mode != "crawl": raise ValueError( "Unknown mode in `mode` parameter, `scrape` or `crawl` is the allowed modes" ) action = ( self.spider.scrape_url if self.mode == "scrape" else self.spider.crawl_url ) spider_docs = action(url=url, params=self.params) if not spider_docs: return [Document(page_content="", metadata={})] documents = [] if isinstance(spider_docs, list): for doc in spider_docs: text = doc.get("content", "") if text is not None: documents.append( Document( text=text, metadata=doc.get("metadata", {}), ) ) return documents
SpiderWebReader
python
readthedocs__readthedocs.org
readthedocs/projects/migrations/0121_remove_requirements_file.py
{ "start": 121, "end": 532 }
class ____(migrations.Migration): safe = Safe.after_deploy() dependencies = [ ("projects", "0120_docdiff_helptext"), ] operations = [ migrations.RemoveField( model_name="historicalproject", name="requirements_file", ), migrations.RemoveField( model_name="project", name="requirements_file", ), ]
Migration
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/ext/hybrid.py
{ "start": 376, "end": 42544 }
class ____ and at the instance level. The :mod:`~sqlalchemy.ext.hybrid` extension provides a special form of method decorator and has minimal dependencies on the rest of SQLAlchemy. Its basic theory of operation can work with any descriptor-based expression system. Consider a mapping ``Interval``, representing integer ``start`` and ``end`` values. We can define higher level functions on mapped classes that produce SQL expressions at the class level, and Python expression evaluation at the instance level. Below, each function decorated with :class:`.hybrid_method` or :class:`.hybrid_property` may receive ``self`` as an instance of the class, or may receive the class directly, depending on context:: from __future__ import annotations from sqlalchemy.ext.hybrid import hybrid_method from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column class Base(DeclarativeBase): pass class Interval(Base): __tablename__ = "interval" id: Mapped[int] = mapped_column(primary_key=True) start: Mapped[int] end: Mapped[int] def __init__(self, start: int, end: int): self.start = start self.end = end @hybrid_property def length(self) -> int: return self.end - self.start @hybrid_method def contains(self, point: int) -> bool: return (self.start <= point) & (point <= self.end) @hybrid_method def intersects(self, other: Interval) -> bool: return self.contains(other.start) | self.contains(other.end) Above, the ``length`` property returns the difference between the ``end`` and ``start`` attributes. With an instance of ``Interval``, this subtraction occurs in Python, using normal Python descriptor mechanics:: >>> i1 = Interval(5, 10) >>> i1.length 5 When dealing with the ``Interval`` class itself, the :class:`.hybrid_property` descriptor evaluates the function body given the ``Interval`` class as the argument, which when evaluated with SQLAlchemy expression mechanics returns a new SQL expression: .. sourcecode:: pycon+sql >>> from sqlalchemy import select >>> print(select(Interval.length)) {printsql}SELECT interval."end" - interval.start AS length FROM interval{stop} >>> print(select(Interval).filter(Interval.length > 10)) {printsql}SELECT interval.id, interval.start, interval."end" FROM interval WHERE interval."end" - interval.start > :param_1 Filtering methods such as :meth:`.Select.filter_by` are supported with hybrid attributes as well: .. sourcecode:: pycon+sql >>> print(select(Interval).filter_by(length=5)) {printsql}SELECT interval.id, interval.start, interval."end" FROM interval WHERE interval."end" - interval.start = :param_1 The ``Interval`` class example also illustrates two methods, ``contains()`` and ``intersects()``, decorated with :class:`.hybrid_method`. This decorator applies the same idea to methods that :class:`.hybrid_property` applies to attributes. The methods return boolean values, and take advantage of the Python ``|`` and ``&`` bitwise operators to produce equivalent instance-level and SQL expression-level boolean behavior: .. sourcecode:: pycon+sql >>> i1.contains(6) True >>> i1.contains(15) False >>> i1.intersects(Interval(7, 18)) True >>> i1.intersects(Interval(25, 29)) False >>> print(select(Interval).filter(Interval.contains(15))) {printsql}SELECT interval.id, interval.start, interval."end" FROM interval WHERE interval.start <= :start_1 AND interval."end" > :end_1{stop} >>> ia = aliased(Interval) >>> print(select(Interval, ia).filter(Interval.intersects(ia))) {printsql}SELECT interval.id, interval.start, interval."end", interval_1.id AS interval_1_id, interval_1.start AS interval_1_start, interval_1."end" AS interval_1_end FROM interval, interval AS interval_1 WHERE interval.start <= interval_1.start AND interval."end" > interval_1.start OR interval.start <= interval_1."end" AND interval."end" > interval_1."end"{stop} .. _hybrid_distinct_expression: Defining Expression Behavior Distinct from Attribute Behavior -------------------------------------------------------------- In the previous section, our usage of the ``&`` and ``|`` bitwise operators within the ``Interval.contains`` and ``Interval.intersects`` methods was fortunate, considering our functions operated on two boolean values to return a new one. In many cases, the construction of an in-Python function and a SQLAlchemy SQL expression have enough differences that two separate Python expressions should be defined. The :mod:`~sqlalchemy.ext.hybrid` decorator defines a **modifier** :meth:`.hybrid_property.expression` for this purpose. As an example we'll define the radius of the interval, which requires the usage of the absolute value function:: from sqlalchemy import ColumnElement from sqlalchemy import Float from sqlalchemy import func from sqlalchemy import type_coerce class Interval(Base): # ... @hybrid_property def radius(self) -> float: return abs(self.length) / 2 @radius.inplace.expression @classmethod def _radius_expression(cls) -> ColumnElement[float]: return type_coerce(func.abs(cls.length) / 2, Float) In the above example, the :class:`.hybrid_property` first assigned to the name ``Interval.radius`` is amended by a subsequent method called ``Interval._radius_expression``, using the decorator ``@radius.inplace.expression``, which chains together two modifiers :attr:`.hybrid_property.inplace` and :attr:`.hybrid_property.expression`. The use of :attr:`.hybrid_property.inplace` indicates that the :meth:`.hybrid_property.expression` modifier should mutate the existing hybrid object at ``Interval.radius`` in place, without creating a new object. Notes on this modifier and its rationale are discussed in the next section :ref:`hybrid_pep484_naming`. The use of ``@classmethod`` is optional, and is strictly to give typing tools a hint that ``cls`` in this case is expected to be the ``Interval`` class, and not an instance of ``Interval``. .. note:: :attr:`.hybrid_property.inplace` as well as the use of ``@classmethod`` for proper typing support are available as of SQLAlchemy 2.0.4, and will not work in earlier versions. With ``Interval.radius`` now including an expression element, the SQL function ``ABS()`` is returned when accessing ``Interval.radius`` at the class level: .. sourcecode:: pycon+sql >>> from sqlalchemy import select >>> print(select(Interval).filter(Interval.radius > 5)) {printsql}SELECT interval.id, interval.start, interval."end" FROM interval WHERE abs(interval."end" - interval.start) / :abs_1 > :param_1 .. _hybrid_pep484_naming: Using ``inplace`` to create pep-484 compliant hybrid properties --------------------------------------------------------------- In the previous section, a :class:`.hybrid_property` decorator is illustrated which includes two separate method-level functions being decorated, both to produce a single object attribute referenced as ``Interval.radius``. There are actually several different modifiers we can use for :class:`.hybrid_property` including :meth:`.hybrid_property.expression`, :meth:`.hybrid_property.setter` and :meth:`.hybrid_property.update_expression`. SQLAlchemy's :class:`.hybrid_property` decorator intends that adding on these methods may be done in the identical manner as Python's built-in ``@property`` decorator, where idiomatic use is to continue to redefine the attribute repeatedly, using the **same attribute name** each time, as in the example below that illustrates the use of :meth:`.hybrid_property.setter` and :meth:`.hybrid_property.expression` for the ``Interval.radius`` descriptor:: # correct use, however is not accepted by pep-484 tooling class Interval(Base): # ... @hybrid_property def radius(self): return abs(self.length) / 2 @radius.setter def radius(self, value): self.length = value * 2 @radius.expression def radius(cls): return type_coerce(func.abs(cls.length) / 2, Float) Above, there are three ``Interval.radius`` methods, but as each are decorated, first by the :class:`.hybrid_property` decorator and then by the ``@radius`` name itself, the end effect is that ``Interval.radius`` is a single attribute with three different functions contained within it. This style of use is taken from `Python's documented use of @property <https://docs.python.org/3/library/functions.html#property>`_. It is important to note that the way both ``@property`` as well as :class:`.hybrid_property` work, a **copy of the descriptor is made each time**. That is, each call to ``@radius.expression``, ``@radius.setter`` etc. make a new object entirely. This allows the attribute to be re-defined in subclasses without issue (see :ref:`hybrid_reuse_subclass` later in this section for how this is used). However, the above approach is not compatible with typing tools such as mypy and pyright. Python's own ``@property`` decorator does not have this limitation only because `these tools hardcode the behavior of @property <https://github.com/python/typing/discussions/1102>`_, meaning this syntax is not available to SQLAlchemy under :pep:`484` compliance. In order to produce a reasonable syntax while remaining typing compliant, the :attr:`.hybrid_property.inplace` decorator allows the same decorator to be re-used with different method names, while still producing a single decorator under one name:: # correct use which is also accepted by pep-484 tooling class Interval(Base): # ... @hybrid_property def radius(self) -> float: return abs(self.length) / 2 @radius.inplace.setter def _radius_setter(self, value: float) -> None: # for example only self.length = value * 2 @radius.inplace.expression @classmethod def _radius_expression(cls) -> ColumnElement[float]: return type_coerce(func.abs(cls.length) / 2, Float) Using :attr:`.hybrid_property.inplace` further qualifies the use of the decorator that a new copy should not be made, thereby maintaining the ``Interval.radius`` name while allowing additional methods ``Interval._radius_setter`` and ``Interval._radius_expression`` to be differently named. .. versionadded:: 2.0.4 Added :attr:`.hybrid_property.inplace` to allow less verbose construction of composite :class:`.hybrid_property` objects while not having to use repeated method names. Additionally allowed the use of ``@classmethod`` within :attr:`.hybrid_property.expression`, :attr:`.hybrid_property.update_expression`, and :attr:`.hybrid_property.comparator` to allow typing tools to identify ``cls`` as a class and not an instance in the method signature. Defining Setters ---------------- The :meth:`.hybrid_property.setter` modifier allows the construction of a custom setter method, that can modify values on the object:: class Interval(Base): # ... @hybrid_property def length(self) -> int: return self.end - self.start @length.inplace.setter def _length_setter(self, value: int) -> None: self.end = self.start + value The ``length(self, value)`` method is now called upon set:: >>> i1 = Interval(5, 10) >>> i1.length 5 >>> i1.length = 12 >>> i1.end 17 .. _hybrid_bulk_update: Supporting ORM Bulk INSERT and UPDATE ------------------------------------- Hybrids have support for use in ORM Bulk INSERT/UPDATE operations described at :ref:`orm_expression_update_delete`. There are two distinct hooks that may be used supply a hybrid value within a DML operation: 1. The :meth:`.hybrid_property.update_expression` hook indicates a method that can provide one or more expressions to render in the SET clause of an UPDATE or INSERT statement, in response to when a hybrid attribute is referenced directly in the :meth:`.UpdateBase.values` method; i.e. the use shown in :ref:`orm_queryguide_update_delete_where` and :ref:`orm_queryguide_insert_values` 2. The :meth:`.hybrid_property.bulk_dml` hook indicates a method that can intercept individual parameter dictionaries sent to :meth:`_orm.Session.execute`, i.e. the use shown at :ref:`orm_queryguide_bulk_insert` as well as :ref:`orm_queryguide_bulk_update`. Using update_expression with update.values() and insert.values() ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The :meth:`.hybrid_property.update_expression` decorator indicates a method that is invoked when a hybrid is used in the :meth:`.ValuesBase.values` clause of an :func:`_sql.update` or :func:`_sql.insert` statement. It returns a list of tuple pairs ``[(x1, y1), (x2, y2), ...]`` which will expand into the SET clause of an UPDATE statement as ``SET x1=y1, x2=y2, ...``. The :func:`_sql.from_dml_column` construct is often useful as it can create a SQL expression that refers to another column that may also present in the same INSERT or UPDATE statement, alternatively falling back to referring to the original column if such an expression is not present. In the example below, the ``total_price`` hybrid will derive the ``price`` column, by taking the given "total price" value and dividing it by a ``tax_rate`` value that is also present in the :meth:`.ValuesBase.values` call:: from sqlalchemy import from_dml_column class Product(Base): __tablename__ = "product" id: Mapped[int] = mapped_column(primary_key=True) price: Mapped[float] tax_rate: Mapped[float] @hybrid_property def total_price(self) -> float: return self.price * (1 + self.tax_rate) @total_price.inplace.update_expression @classmethod def _total_price_update_expression( cls, value: Any ) -> List[Tuple[Any, Any]]: return [(cls.price, value / (1 + from_dml_column(cls.tax_rate)))] When used in an UPDATE statement, :func:`_sql.from_dml_column` creates a reference to the ``tax_rate`` column that will use the value passed to the :meth:`.ValuesBase.values` method, rather than the existing value on the column in the database. This allows the hybrid to access other values being updated in the same statement: .. sourcecode:: pycon+sql >>> from sqlalchemy import update >>> print( ... update(Product).values( ... {Product.tax_rate: 0.08, Product.total_price: 125.00} ... ) ... ) {printsql}UPDATE product SET tax_rate=:tax_rate, price=(:total_price / (:tax_rate + :param_1)) When the column referenced by :func:`_sql.from_dml_column` (in this case ``product.tax_rate``) is omitted from :meth:`.ValuesBase.values`, the rendered expression falls back to using the original column: .. sourcecode:: pycon+sql >>> from sqlalchemy import update >>> print(update(Product).values({Product.total_price: 125.00})) {printsql}UPDATE product SET price=(:total_price / (tax_rate + :param_1)) Using bulk_dml to intercept bulk parameter dictionaries ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. versionadded:: 2.1 For bulk operations that pass a list of parameter dictionaries to methods like :meth:`.Session.execute`, the :meth:`.hybrid_property.bulk_dml` decorator provides a hook that can receive each dictionary and populate it with new values. The implementation for the :meth:`.hybrid_property.bulk_dml` hook can retrieve other column values from the parameter dictionary:: from typing import MutableMapping class Product(Base): __tablename__ = "product" id: Mapped[int] = mapped_column(primary_key=True) price: Mapped[float] tax_rate: Mapped[float] @hybrid_property def total_price(self) -> float: return self.price * (1 + self.tax_rate) @total_price.inplace.bulk_dml @classmethod def _total_price_bulk_dml( cls, mapping: MutableMapping[str, Any], value: float ) -> None: mapping["price"] = value / (1 + mapping["tax_rate"]) This allows for bulk INSERT/UPDATE with derived values:: # Bulk INSERT session.execute( insert(Product), [ {"tax_rate": 0.08, "total_price": 125.00}, {"tax_rate": 0.05, "total_price": 110.00}, ], ) Note that the method decorated by :meth:`.hybrid_property.bulk_dml` is invoked only with parameter dictionaries and does not have the ability to use SQL expressions in the given dictionaries, only literal Python values that will be passed to parameters in the INSERT or UPDATE statement. .. seealso:: :ref:`orm_expression_update_delete` - includes background on ORM-enabled UPDATE statements Working with Relationships -------------------------- There's no essential difference when creating hybrids that work with related objects as opposed to column-based data. The need for distinct expressions tends to be greater. The two variants we'll illustrate are the "join-dependent" hybrid, and the "correlated subquery" hybrid. Join-Dependent Relationship Hybrid ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Consider the following declarative mapping which relates a ``User`` to a ``SavingsAccount``:: from __future__ import annotations from decimal import Decimal from typing import cast from typing import List from typing import Optional from sqlalchemy import ForeignKey from sqlalchemy import Numeric from sqlalchemy import String from sqlalchemy import SQLColumnExpression from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column from sqlalchemy.orm import relationship class Base(DeclarativeBase): pass class SavingsAccount(Base): __tablename__ = "account" id: Mapped[int] = mapped_column(primary_key=True) user_id: Mapped[int] = mapped_column(ForeignKey("user.id")) balance: Mapped[Decimal] = mapped_column(Numeric(15, 5)) owner: Mapped[User] = relationship(back_populates="accounts") class User(Base): __tablename__ = "user" id: Mapped[int] = mapped_column(primary_key=True) name: Mapped[str] = mapped_column(String(100)) accounts: Mapped[List[SavingsAccount]] = relationship( back_populates="owner", lazy="selectin" ) @hybrid_property def balance(self) -> Optional[Decimal]: if self.accounts: return self.accounts[0].balance else: return None @balance.inplace.setter def _balance_setter(self, value: Optional[Decimal]) -> None: assert value is not None if not self.accounts: account = SavingsAccount(owner=self) else: account = self.accounts[0] account.balance = value @balance.inplace.expression @classmethod def _balance_expression(cls) -> SQLColumnExpression[Optional[Decimal]]: return cast( "SQLColumnExpression[Optional[Decimal]]", SavingsAccount.balance, ) The above hybrid property ``balance`` works with the first ``SavingsAccount`` entry in the list of accounts for this user. The in-Python getter/setter methods can treat ``accounts`` as a Python list available on ``self``. .. tip:: The ``User.balance`` getter in the above example accesses the ``self.acccounts`` collection, which will normally be loaded via the :func:`.selectinload` loader strategy configured on the ``User.balance`` :func:`_orm.relationship`. The default loader strategy when not otherwise stated on :func:`_orm.relationship` is :func:`.lazyload`, which emits SQL on demand. When using asyncio, on-demand loaders such as :func:`.lazyload` are not supported, so care should be taken to ensure the ``self.accounts`` collection is accessible to this hybrid accessor when using asyncio. At the expression level, it's expected that the ``User`` class will be used in an appropriate context such that an appropriate join to ``SavingsAccount`` will be present: .. sourcecode:: pycon+sql >>> from sqlalchemy import select >>> print( ... select(User, User.balance) ... .join(User.accounts) ... .filter(User.balance > 5000) ... ) {printsql}SELECT "user".id AS user_id, "user".name AS user_name, account.balance AS account_balance FROM "user" JOIN account ON "user".id = account.user_id WHERE account.balance > :balance_1 Note however, that while the instance level accessors need to worry about whether ``self.accounts`` is even present, this issue expresses itself differently at the SQL expression level, where we basically would use an outer join: .. sourcecode:: pycon+sql >>> from sqlalchemy import select >>> from sqlalchemy import or_ >>> print( ... select(User, User.balance) ... .outerjoin(User.accounts) ... .filter(or_(User.balance < 5000, User.balance == None)) ... ) {printsql}SELECT "user".id AS user_id, "user".name AS user_name, account.balance AS account_balance FROM "user" LEFT OUTER JOIN account ON "user".id = account.user_id WHERE account.balance < :balance_1 OR account.balance IS NULL Correlated Subquery Relationship Hybrid ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ We can, of course, forego being dependent on the enclosing query's usage of joins in favor of the correlated subquery, which can portably be packed into a single column expression. A correlated subquery is more portable, but often performs more poorly at the SQL level. Using the same technique illustrated at :ref:`mapper_column_property_sql_expressions`, we can adjust our ``SavingsAccount`` example to aggregate the balances for *all* accounts, and use a correlated subquery for the column expression:: from __future__ import annotations from decimal import Decimal from typing import List from sqlalchemy import ForeignKey from sqlalchemy import func from sqlalchemy import Numeric from sqlalchemy import select from sqlalchemy import SQLColumnExpression from sqlalchemy import String from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column from sqlalchemy.orm import relationship class Base(DeclarativeBase): pass class SavingsAccount(Base): __tablename__ = "account" id: Mapped[int] = mapped_column(primary_key=True) user_id: Mapped[int] = mapped_column(ForeignKey("user.id")) balance: Mapped[Decimal] = mapped_column(Numeric(15, 5)) owner: Mapped[User] = relationship(back_populates="accounts") class User(Base): __tablename__ = "user" id: Mapped[int] = mapped_column(primary_key=True) name: Mapped[str] = mapped_column(String(100)) accounts: Mapped[List[SavingsAccount]] = relationship( back_populates="owner", lazy="selectin" ) @hybrid_property def balance(self) -> Decimal: return sum( (acc.balance for acc in self.accounts), start=Decimal("0") ) @balance.inplace.expression @classmethod def _balance_expression(cls) -> SQLColumnExpression[Decimal]: return ( select(func.sum(SavingsAccount.balance)) .where(SavingsAccount.user_id == cls.id) .label("total_balance") ) The above recipe will give us the ``balance`` column which renders a correlated SELECT: .. sourcecode:: pycon+sql >>> from sqlalchemy import select >>> print(select(User).filter(User.balance > 400)) {printsql}SELECT "user".id, "user".name FROM "user" WHERE ( SELECT sum(account.balance) AS sum_1 FROM account WHERE account.user_id = "user".id ) > :param_1 .. _hybrid_custom_comparators: Building Custom Comparators --------------------------- The hybrid property also includes a helper that allows construction of custom comparators. A comparator object allows one to customize the behavior of each SQLAlchemy expression operator individually. They are useful when creating custom types that have some highly idiosyncratic behavior on the SQL side. .. note:: The :meth:`.hybrid_property.comparator` decorator introduced in this section **replaces** the use of the :meth:`.hybrid_property.expression` decorator. They cannot be used together. The example class below allows case-insensitive comparisons on the attribute named ``word_insensitive``:: from __future__ import annotations from typing import Any from sqlalchemy import ColumnElement from sqlalchemy import func from sqlalchemy.ext.hybrid import Comparator from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import DeclarativeBase from sqlalchemy.orm import Mapped from sqlalchemy.orm import mapped_column class Base(DeclarativeBase): pass class CaseInsensitiveComparator(Comparator[str]): def __eq__(self, other: Any) -> ColumnElement[bool]: # type: ignore[override] # noqa: E501 return func.lower(self.__clause_element__()) == func.lower(other) class SearchWord(Base): __tablename__ = "searchword" id: Mapped[int] = mapped_column(primary_key=True) word: Mapped[str] @hybrid_property def word_insensitive(self) -> str: return self.word.lower() @word_insensitive.inplace.comparator @classmethod def _word_insensitive_comparator(cls) -> CaseInsensitiveComparator: return CaseInsensitiveComparator(cls.word) Above, SQL expressions against ``word_insensitive`` will apply the ``LOWER()`` SQL function to both sides: .. sourcecode:: pycon+sql >>> from sqlalchemy import select >>> print(select(SearchWord).filter_by(word_insensitive="Trucks")) {printsql}SELECT searchword.id, searchword.word FROM searchword WHERE lower(searchword.word) = lower(:lower_1) The ``CaseInsensitiveComparator`` above implements part of the :class:`.ColumnOperators` interface. A "coercion" operation like lowercasing can be applied to all comparison operations (i.e. ``eq``, ``lt``, ``gt``, etc.) using :meth:`.Operators.operate`:: class CaseInsensitiveComparator(Comparator): def operate(self, op, other, **kwargs): return op( func.lower(self.__clause_element__()), func.lower(other), **kwargs, ) .. _hybrid_reuse_subclass: Reusing Hybrid Properties across Subclasses ------------------------------------------- A hybrid can be referred to from a superclass, to allow modifying methods like :meth:`.hybrid_property.getter`, :meth:`.hybrid_property.setter` to be used to redefine those methods on a subclass. This is similar to how the standard Python ``@property`` object works:: class FirstNameOnly(Base): # ... first_name: Mapped[str] @hybrid_property def name(self) -> str: return self.first_name @name.inplace.setter def _name_setter(self, value: str) -> None: self.first_name = value class FirstNameLastName(FirstNameOnly): # ... last_name: Mapped[str] # 'inplace' is not used here; calling getter creates a copy # of FirstNameOnly.name that is local to FirstNameLastName @FirstNameOnly.name.getter def name(self) -> str: return self.first_name + " " + self.last_name @name.inplace.setter def _name_setter(self, value: str) -> None: self.first_name, self.last_name = value.split(" ", 1) Above, the ``FirstNameLastName`` class refers to the hybrid from ``FirstNameOnly.name`` to repurpose its getter and setter for the subclass. When overriding :meth:`.hybrid_property.expression` and :meth:`.hybrid_property.comparator` alone as the first reference to the superclass, these names conflict with the same-named accessors on the class- level :class:`.QueryableAttribute` object returned at the class level. To override these methods when referring directly to the parent class descriptor, add the special qualifier :attr:`.hybrid_property.overrides`, which will de- reference the instrumented attribute back to the hybrid object:: class FirstNameLastName(FirstNameOnly): # ... last_name: Mapped[str] @FirstNameOnly.name.overrides.expression @classmethod def name(cls): return func.concat(cls.first_name, " ", cls.last_name) .. _hybrid_value_objects: Hybrid Value Objects -------------------- In the example shown previously at :ref:`hybrid_custom_comparators`, if we were to compare the ``word_insensitive`` attribute of a ``SearchWord`` instance to a plain Python string, the plain Python string would not be coerced to lower case - the ``CaseInsensitiveComparator`` we built, being returned by ``@word_insensitive.comparator``, only applies to the SQL side. A more comprehensive form of the custom comparator is to construct a **Hybrid Value Object**. This technique applies the target value or expression to a value object which is then returned by the accessor in all cases. The value object allows control of all operations upon the value as well as how compared values are treated, both on the SQL expression side as well as the Python value side. Replacing the previous ``CaseInsensitiveComparator`` class with a new ``CaseInsensitiveWord`` class:: from sqlalchemy import func from sqlalchemy.ext.hybrid import Comparator class CaseInsensitiveWord(Comparator): "Hybrid value representing a lower case representation of a word." def __init__(self, word): if isinstance(word, str): self.word = word.lower() else: self.word = func.lower(word) def operate(self, op, other, **kwargs): if not isinstance(other, CaseInsensitiveWord): other = CaseInsensitiveWord(other) return op(self.word, other.word, **kwargs) def __clause_element__(self): return self.word def __str__(self): return self.word key = "word" "Label to apply to Query tuple results" Above, the ``CaseInsensitiveWord`` object represents ``self.word``, which may be a SQL function, or may be a Python native string. The hybrid value object should implement ``__clause_element__()``, which allows the object to be coerced into a SQL-capable value when used in SQL expression constructs, as well as Python comparison methods such as ``__eq__()``, which is accomplished in the above example by subclassing :class:`.hybrid.Comparator` and overriding the ``operate()`` method. .. topic:: Building the Value object with dataclasses Hybrid value objects may also be implemented as Python dataclasses. If modification to values upon construction is needed, use the ``__post_init__()`` dataclasses method. Instance variables that work in a "hybrid" fashion may be instance of a plain Python value, or an instance of :class:`.SQLColumnExpression` genericized against that type. Also make sure to disable dataclass comparison features, as the :class:`.hybrid.Comparator` class provides these:: from sqlalchemy import func from sqlalchemy.ext.hybrid import Comparator from dataclasses import dataclass @dataclass(eq=False) class CaseInsensitiveWord(Comparator): word: str | SQLColumnExpression[str] def __post_init__(self): if isinstance(self.word, str): self.word = self.word.lower() else: self.word = func.lower(self.word) def operate(self, op, other, **kwargs): if not isinstance(other, CaseInsensitiveWord): other = CaseInsensitiveWord(other) return op(self.word, other.word, **kwargs) def __clause_element__(self): return self.word With ``__clause_element__()`` provided, our ``SearchWord`` class can now deliver the ``CaseInsensitiveWord`` object unconditionally from a single hybrid method, returning an object that behaves appropriately in both value-based and SQL contexts:: class SearchWord(Base): __tablename__ = "searchword" id: Mapped[int] = mapped_column(primary_key=True) word: Mapped[str] @hybrid_property def word_insensitive(self) -> CaseInsensitiveWord: return CaseInsensitiveWord(self.word) The class-level version of ``CaseInsensitiveWord`` will work in SQL constructs: .. sourcecode:: pycon+sql >>> print(select(SearchWord).filter(SearchWord.word_insensitive == "Trucks")) {printsql}SELECT searchword.id AS searchword_id, searchword.word AS searchword_word FROM searchword WHERE lower(searchword.word) = :lower_1 By also subclassing :class:`.hybrid.Comparator` and providing an implementation for ``operate()``, the ``word_insensitive`` attribute also has case-insensitive comparison behavior universally, including SQL expression and Python expression (note the Python value is converted to lower case on the Python side here): .. sourcecode:: pycon+sql >>> from sqlalchemy.orm import aliased >>> sw1 = aliased(SearchWord) >>> sw2 = aliased(SearchWord) >>> print( ... select(sw1.word_insensitive, sw2.word_insensitive).filter( ... sw1.word_insensitive > sw2.word_insensitive ... ) ... ) {printsql}SELECT lower(searchword_1.word) AS lower_1, lower(searchword_2.word) AS lower_2 FROM searchword AS searchword_1, searchword AS searchword_2 WHERE lower(searchword_1.word) > lower(searchword_2.word) Python only expression:: >>> ws1 = SearchWord(word="SomeWord") >>> ws1.word_insensitive == "sOmEwOrD" True >>> ws1.word_insensitive == "XOmEwOrX" False >>> print(ws1.word_insensitive) someword The Hybrid Value pattern is very useful for any kind of value that may have multiple representations, such as timestamps, time deltas, units of measurement, currencies and encrypted passwords. .. seealso:: `Hybrids and Value Agnostic Types <https://techspot.zzzeek.org/2011/10/21/hybrids-and-value-agnostic-types/>`_ - on the techspot.zzzeek.org blog `Value Agnostic Types, Part II <https://techspot.zzzeek.org/2011/10/29/value-agnostic-types-part-ii/>`_ - on the techspot.zzzeek.org blog .. _composite_hybrid_value_objects: Composite Hybrid Value Objects ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The functionality of :ref:`hybrid_value_objects` may also be expanded to support "composite" forms; in this pattern, SQLAlchemy hybrids begin to approximate most (though not all) the same functionality that is available from the ORM natively via the :ref:`mapper_composite` feature. We can imitate the example of ``Point`` and ``Vertex`` from that section using hybrids, where ``Point`` is modified to become a Hybrid Value Object:: from dataclasses import dataclass from sqlalchemy import tuple_ from sqlalchemy.ext.hybrid import Comparator from sqlalchemy import SQLColumnExpression @dataclass(eq=False) class Point(Comparator): x: int | SQLColumnExpression[int] y: int | SQLColumnExpression[int] def operate(self, op, other, **kwargs): return op(self.x, other.x) & op(self.y, other.y) def __clause_element__(self): return tuple_(self.x, self.y) Above, the ``operate()`` method is where the most "hybrid" behavior takes place, making use of ``op()`` (the Python operator function in use) along with the the bitwise ``&`` operator provides us with the SQL AND operator in a SQL context, and boolean "and" in a Python boolean context. Following from there, the owning ``Vertex`` class now uses hybrids to represent ``start`` and ``end``:: from sqlalchemy.orm import DeclarativeBase, Mapped from sqlalchemy.orm import mapped_column from sqlalchemy.ext.hybrid import hybrid_property class Base(DeclarativeBase): pass class Vertex(Base): __tablename__ = "vertices" id: Mapped[int] = mapped_column(primary_key=True) x1: Mapped[int] y1: Mapped[int] x2: Mapped[int] y2: Mapped[int] @hybrid_property def start(self) -> Point: return Point(self.x1, self.y1) @start.inplace.setter def _set_start(self, value: Point) -> None: self.x1 = value.x self.y1 = value.y @hybrid_property def end(self) -> Point: return Point(self.x2, self.y2) @end.inplace.setter def _set_end(self, value: Point) -> None: self.x2 = value.x self.y2 = value.y def __repr__(self) -> str: return f"Vertex(start={self.start}, end={self.end})" Using the above mapping, we can use expressions at the Python or SQL level using ``Vertex.start`` and ``Vertex.end``:: >>> v1 = Vertex(start=Point(3, 4), end=Point(15, 10)) >>> v1.end == Point(15, 10) True >>> stmt = ( ... select(Vertex) ... .where(Vertex.start == Point(3, 4)) ... .where(Vertex.end < Point(7, 8)) ... ) >>> print(stmt) SELECT vertices.id, vertices.x1, vertices.y1, vertices.x2, vertices.y2 FROM vertices WHERE vertices.x1 = :x1_1 AND vertices.y1 = :y1_1 AND vertices.x2 < :x2_1 AND vertices.y2 < :y2_1 DML Support for Composite Value Objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Composite value objects like ``Point`` can also be used with the ORM's DML features. The :meth:`.hybrid_property.update_expression` decorator allows the hybrid to expand a composite value into multiple column assignments in UPDATE and INSERT statements:: class Location(Base): __tablename__ = "location" id: Mapped[int] = mapped_column(primary_key=True) x: Mapped[int] y: Mapped[int] @hybrid_property def coordinates(self) -> Point: return Point(self.x, self.y) @coordinates.inplace.update_expression @classmethod def _coordinates_update_expression( cls, value: Any ) -> List[Tuple[Any, Any]]: assert isinstance(value, Point) return [(cls.x, value.x), (cls.y, value.y)] This allows UPDATE statements to work with the composite value: .. sourcecode:: pycon+sql >>> from sqlalchemy import update >>> print( ... update(Location) ... .where(Location.id == 5) ... .values({Location.coordinates: Point(25, 17)}) ... ) {printsql}UPDATE location SET x=:x, y=:y WHERE location.id = :id_1 For bulk operations that use parameter dictionaries, the :meth:`.hybrid_property.bulk_dml` decorator provides a hook to convert composite values into individual column values:: from typing import MutableMapping class Location(Base): # ... (same as above) @coordinates.inplace.bulk_dml @classmethod def _coordinates_bulk_dml( cls, mapping: MutableMapping[str, Any], value: Point ) -> None: mapping["x"] = value.x mapping["y"] = value.y This enables bulk operations with composite values:: # Bulk INSERT session.execute( insert(Location), [ {"id": 1, "coordinates": Point(10, 20)}, {"id": 2, "coordinates": Point(30, 40)}, ], ) # Bulk UPDATE session.execute( update(Location), [ {"id": 1, "coordinates": Point(15, 25)}, {"id": 2, "coordinates": Point(35, 45)}, ], ) """ # noqa from __future__ import annotations from typing import Any from typing import Callable from typing import cast from typing import Concatenate from typing import Generic from typing import List from typing import Literal from typing import MutableMapping from typing import Optional from typing import overload from typing import ParamSpec from typing import Protocol from typing import Sequence from typing import Tuple from typing import Type from typing import TYPE_CHECKING from typing import TypeVar from typing import Union from .. import exc from .. import util from ..orm import attributes from ..orm import InspectionAttrExtensionType from ..orm import interfaces from ..orm import ORMDescriptor from ..orm.attributes import QueryableAttribute from ..sql import roles from ..sql._typing import is_has_clause_element from ..sql.elements import ColumnElement from ..sql.elements import SQLCoreOperations from ..util.typing import Self if TYPE_CHECKING: from ..orm.interfaces import MapperProperty from ..orm.util import AliasedInsp from ..sql import SQLColumnExpression from ..sql._typing import _ColumnExpressionArgument from ..sql._typing import _DMLColumnArgument from ..sql._typing import _HasClauseElement from ..sql._typing import _InfoType from ..sql.operators import OperatorType _P = ParamSpec("_P") _R = TypeVar("_R") _T = TypeVar("_T", bound=Any) _TE = TypeVar("_TE", bound=Any) _T_co = TypeVar("_T_co", bound=Any, covariant=True) _T_con = TypeVar("_T_con", bound=Any, contravariant=True)
level
python
ansible__ansible
test/lib/ansible_test/_internal/docker_util.py
{ "start": 34983, "end": 38329 }
class ____: """The results of `docker network inspect` for a single network.""" def __init__(self, args: CommonConfig, inspection: dict[str, t.Any]) -> None: self.args = args self.inspection = inspection def docker_network_inspect(args: CommonConfig, network: str, always: bool = False) -> t.Optional[DockerNetworkInspect]: """ Return the results of `docker network inspect` for the specified network or None if the network does not exist. """ try: stdout = docker_command(args, ['network', 'inspect', network], capture=True, always=always)[0] except SubprocessError: stdout = '[]' if args.explain and not always: items = [] else: items = json.loads(stdout) if len(items) == 1: return DockerNetworkInspect(args, items[0]) return None def docker_logs(args: CommonConfig, container_id: str) -> None: """Display logs for the specified container. If an error occurs, it is displayed rather than raising an exception.""" try: docker_command(args, ['logs', container_id], capture=False) except SubprocessError as ex: display.error(str(ex)) def docker_exec( args: CommonConfig, container_id: str, cmd: list[str], capture: bool, options: t.Optional[list[str]] = None, stdin: t.Optional[t.IO[bytes]] = None, stdout: t.Optional[t.IO[bytes]] = None, interactive: bool = False, output_stream: t.Optional[OutputStream] = None, data: t.Optional[str] = None, ) -> tuple[t.Optional[str], t.Optional[str]]: """Execute the given command in the specified container.""" if not options: options = [] if data or stdin or stdout: options.append('-i') return docker_command( args, ['exec'] + options + [container_id] + cmd, capture=capture, stdin=stdin, stdout=stdout, interactive=interactive, output_stream=output_stream, data=data, ) def docker_command( args: CommonConfig, cmd: list[str], capture: bool, stdin: t.Optional[t.IO[bytes]] = None, stdout: t.Optional[t.IO[bytes]] = None, interactive: bool = False, output_stream: t.Optional[OutputStream] = None, always: bool = False, data: t.Optional[str] = None, ) -> tuple[t.Optional[str], t.Optional[str]]: """Run the specified docker command.""" env = docker_environment() command = [require_docker().command] if command[0] == 'podman' and get_podman_remote(): command.append('--remote') return run_command( args, command + cmd, env=env, capture=capture, stdin=stdin, stdout=stdout, interactive=interactive, always=always, output_stream=output_stream, data=data, ) def docker_environment() -> dict[str, str]: """Return a dictionary of docker related environment variables found in the current environment.""" env = common_environment() var_names = { 'XDG_RUNTIME_DIR', # podman } var_prefixes = { 'CONTAINER_', # podman remote 'DOCKER_', # docker } env.update({name: value for name, value in os.environ.items() if name in var_names or any(name.startswith(prefix) for prefix in var_prefixes)}) return env
DockerNetworkInspect
python
huggingface__transformers
src/transformers/models/eomt/modeling_eomt.py
{ "start": 43524, "end": 45545 }
class ____(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config: EomtConfig base_model_prefix = "eomt" main_input_name = "pixel_values" input_modalities = ("image",) supports_gradient_checkpointing = False _no_split_modules = ["EomtLayer"] _supports_sdpa = True _can_record_outputs = { "hidden_states": EomtLayer, "attentions": EomtAttention, } @torch.no_grad() def _init_weights(self, module: nn.Module) -> None: std = self.config.initializer_range if isinstance(module, (nn.Linear, nn.Conv2d, nn.ConvTranspose2d)): init.kaiming_uniform_(module.weight, a=math.sqrt(5)) if module.bias is not None: fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(module.weight) bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 init.uniform_(module.bias, -bound, bound) elif isinstance(module, nn.LayerNorm): init.ones_(module.weight) init.zeros_(module.bias) elif isinstance(module, nn.Embedding): init.normal_(module.weight, mean=0.0, std=1) # Here we need the check explicitly, as we slice the weight in the `zeros_` call, so it looses the flag if module.padding_idx is not None and not getattr(module.weight, "_is_hf_initialized", False): init.zeros_(module.weight[module.padding_idx]) elif isinstance(module, EomtLayerScale): if hasattr(module, "lambda1"): init.constant_(module.lambda1, self.config.layerscale_value) elif isinstance(module, EomtEmbeddings): init.trunc_normal_(module.cls_token, mean=0.0, std=std) init.zeros_(module.register_tokens) @auto_docstring( custom_intro=""" The EoMT Model with head on top for instance/semantic/panoptic segmentation. """ )
EomtPreTrainedModel
python
huggingface__transformers
tests/models/align/test_modeling_align.py
{ "start": 7858, "end": 11623 }
class ____: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = self.get_config() return config, input_ids, token_type_ids, input_mask def get_config(self): return AlignTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, token_type_ids, input_mask): model = AlignTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch
AlignTextModelTester
python
apache__airflow
task-sdk/src/airflow/sdk/execution_time/context.py
{ "start": 26438, "end": 33171 }
class ____( _AssetRefResolutionMixin, Mapping["Asset | AssetAlias | AssetRef", Sequence["AssetEventDagRunReferenceResult"]], _AssetEventAccessorsMixin[Sequence["AssetEventDagRunReferenceResult"]], ): """Lazy mapping of triggering asset events.""" _events: Mapping[BaseAssetUniqueKey, Sequence[AssetEventDagRunReferenceResult]] @classmethod def build(cls, events: Iterable[AssetEventDagRunReferenceResult]) -> TriggeringAssetEventsAccessor: coll: dict[BaseAssetUniqueKey, list[AssetEventDagRunReferenceResult]] = collections.defaultdict(list) for event in events: coll[AssetUniqueKey(name=event.asset.name, uri=event.asset.uri)].append(event) for alias in event.source_aliases: coll[AssetAliasUniqueKey(name=alias.name)].append(event) return cls(coll) def __str__(self) -> str: return f"TriggeringAssetEventAccessor(_events={self._events})" def __iter__(self) -> Iterator[Asset | AssetAlias]: return ( key.to_asset() if isinstance(key, AssetUniqueKey) else key.to_asset_alias() for key in self._events ) def __len__(self) -> int: return len(self._events) def __getitem__(self, key: Asset | AssetAlias | AssetRef) -> Sequence[AssetEventDagRunReferenceResult]: hashable_key: BaseAssetUniqueKey if isinstance(key, Asset): hashable_key = AssetUniqueKey.from_asset(key) elif isinstance(key, AssetRef): hashable_key, _ = self._resolve_asset_ref(key) elif isinstance(key, AssetAlias): hashable_key = AssetAliasUniqueKey.from_asset_alias(key) else: raise TypeError(f"Key should be either an asset or an asset alias, not {type(key)}") return self._events[hashable_key] @cache # Prevent multiple API access. def get_previous_dagrun_success(ti_id: UUID) -> PrevSuccessfulDagRunResponse: from airflow.sdk.execution_time import task_runner from airflow.sdk.execution_time.comms import ( GetPrevSuccessfulDagRun, PrevSuccessfulDagRunResponse, PrevSuccessfulDagRunResult, ) msg = task_runner.SUPERVISOR_COMMS.send(GetPrevSuccessfulDagRun(ti_id=ti_id)) if TYPE_CHECKING: assert isinstance(msg, PrevSuccessfulDagRunResult) return PrevSuccessfulDagRunResponse(**msg.model_dump(exclude={"type"})) @contextlib.contextmanager def set_current_context(context: Context) -> Generator[Context, None, None]: """ Set the current execution context to the provided context object. This method should be called once per Task execution, before calling operator.execute. """ _CURRENT_CONTEXT.append(context) try: yield context finally: expected_state = _CURRENT_CONTEXT.pop() if expected_state != context: log.warning( "Current context is not equal to the state at context stack.", expected=context, got=expected_state, ) def context_update_for_unmapped(context: Context, task: BaseOperator) -> None: """ Update context after task unmapping. Since ``get_template_context()`` is called before unmapping, the context contains information about the mapped task. We need to do some in-place updates to ensure the template context reflects the unmapped task instead. :meta private: """ from airflow.sdk.definitions.param import process_params context["task"] = context["ti"].task = task context["params"] = process_params( context["dag"], task, context["dag_run"].conf, suppress_exception=False ) def context_to_airflow_vars(context: Mapping[str, Any], in_env_var_format: bool = False) -> dict[str, str]: """ Return values used to externally reconstruct relations between dags, dag_runs, tasks and task_instances. Given a context, this function provides a dictionary of values that can be used to externally reconstruct relations between dags, dag_runs, tasks and task_instances. Default to abc.def.ghi format and can be made to ABC_DEF_GHI format if in_env_var_format is set to True. :param context: The context for the task_instance of interest. :param in_env_var_format: If returned vars should be in ABC_DEF_GHI format. :return: task_instance context as dict. """ from datetime import datetime from airflow import settings params = {} if in_env_var_format: name_format = "env_var_format" else: name_format = "default" task = context.get("task") task_instance = context.get("task_instance") dag_run = context.get("dag_run") ops = [ (task, "email", "AIRFLOW_CONTEXT_DAG_EMAIL"), (task, "owner", "AIRFLOW_CONTEXT_DAG_OWNER"), (task_instance, "dag_id", "AIRFLOW_CONTEXT_DAG_ID"), (task_instance, "task_id", "AIRFLOW_CONTEXT_TASK_ID"), (dag_run, "logical_date", "AIRFLOW_CONTEXT_LOGICAL_DATE"), (task_instance, "try_number", "AIRFLOW_CONTEXT_TRY_NUMBER"), (dag_run, "run_id", "AIRFLOW_CONTEXT_DAG_RUN_ID"), ] context_params = settings.get_airflow_context_vars(context) for key_raw, value in context_params.items(): if not isinstance(key_raw, str): raise TypeError(f"key <{key_raw}> must be string") if not isinstance(value, str): raise TypeError(f"value of key <{key_raw}> must be string, not {type(value)}") if in_env_var_format and not key_raw.startswith(ENV_VAR_FORMAT_PREFIX): key = ENV_VAR_FORMAT_PREFIX + key_raw.upper() elif not key_raw.startswith(DEFAULT_FORMAT_PREFIX): key = DEFAULT_FORMAT_PREFIX + key_raw else: key = key_raw params[key] = value for subject, attr, mapping_key in ops: _attr = getattr(subject, attr, None) if subject and _attr: mapping_value = AIRFLOW_VAR_NAME_FORMAT_MAPPING[mapping_key][name_format] if isinstance(_attr, str): params[mapping_value] = _attr elif isinstance(_attr, datetime): params[mapping_value] = _attr.isoformat() elif isinstance(_attr, list): # os env variable value needs to be string params[mapping_value] = ",".join(_attr) else: params[mapping_value] = str(_attr) return params def context_get_outlet_events(context: Context) -> OutletEventAccessorsProtocol: try: outlet_events = context["outlet_events"] except KeyError: outlet_events = context["outlet_events"] = OutletEventAccessors() return outlet_events
TriggeringAssetEventsAccessor
python
django__django
tests/handlers/tests.py
{ "start": 11731, "end": 15027 }
class ____(SimpleTestCase): """Async variants of the normal handler request tests.""" async def test_sync_view(self): """Calling a sync view down the asynchronous path.""" response = await self.async_client.get("/regular/") self.assertEqual(response.status_code, 200) async def test_async_view(self): """Calling an async view down the asynchronous path.""" response = await self.async_client.get("/async_regular/") self.assertEqual(response.status_code, 200) async def test_suspiciousop_in_view_returns_400(self): response = await self.async_client.get("/suspicious/") self.assertEqual(response.status_code, 400) async def test_bad_request_in_view_returns_400(self): response = await self.async_client.get("/bad_request/") self.assertEqual(response.status_code, 400) async def test_no_response(self): msg = ( "The view handlers.views.no_response didn't return an " "HttpResponse object. It returned None instead." ) with self.assertRaisesMessage(ValueError, msg): await self.async_client.get("/no_response_fbv/") async def test_unawaited_response(self): msg = ( "The view handlers.views.CoroutineClearingView.__call__ didn't" " return an HttpResponse object. It returned an unawaited" " coroutine instead. You may need to add an 'await'" " into your view." ) with self.assertRaisesMessage(ValueError, msg): await self.async_client.get("/unawaited/") def test_root_path(self): async_request_factory = AsyncRequestFactory() request = async_request_factory.request( **{"path": "/root/somepath/", "root_path": "/root"} ) self.assertEqual(request.path, "/root/somepath/") self.assertEqual(request.script_name, "/root") self.assertEqual(request.path_info, "/somepath/") @override_settings(FORCE_SCRIPT_NAME="/FORCED_PREFIX") def test_force_script_name(self): async_request_factory = AsyncRequestFactory() request = async_request_factory.request(**{"path": "/FORCED_PREFIX/somepath/"}) self.assertEqual(request.path, "/FORCED_PREFIX/somepath/") self.assertEqual(request.script_name, "/FORCED_PREFIX") self.assertEqual(request.path_info, "/somepath/") async def test_sync_streaming(self): response = await self.async_client.get("/streaming/") self.assertEqual(response.status_code, 200) msg = ( "StreamingHttpResponse must consume synchronous iterators in order to " "serve them asynchronously. Use an asynchronous iterator instead." ) with self.assertWarnsMessage(Warning, msg) as ctx: self.assertEqual( b"".join([chunk async for chunk in response]), b"streaming content" ) self.assertEqual(ctx.filename, __file__) async def test_async_streaming(self): response = await self.async_client.get("/async_streaming/") self.assertEqual(response.status_code, 200) self.assertEqual( b"".join([chunk async for chunk in response]), b"streaming content" )
AsyncHandlerRequestTests
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 582064, "end": 582426 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("client_mutation_id", "removed_user") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId") removed_user = sgqlc.types.Field("User", graphql_name="removedUser")
RemoveOutsideCollaboratorPayload
python
sympy__sympy
sympy/functions/special/hyper.py
{ "start": 35776, "end": 36388 }
class ____(HyperRep): """ Represent hyper([a, -a], [1/2], z) == cos(2*a*asin(sqrt(z))). """ # Note there are many alternative expressions, e.g. as powers of a sum of # square roots. @classmethod def _expr_small(cls, a, z): return cos(2*a*asin(sqrt(z))) @classmethod def _expr_small_minus(cls, a, z): return cosh(2*a*asinh(sqrt(z))) @classmethod def _expr_big(cls, a, z, n): return cosh(2*a*acosh(sqrt(z)) + a*pi*I*(2*n - 1)) @classmethod def _expr_big_minus(cls, a, z, n): return cosh(2*a*asinh(sqrt(z)) + 2*a*pi*I*n)
HyperRep_cosasin
python
huggingface__transformers
src/transformers/models/bloom/modeling_bloom.py
{ "start": 17078, "end": 17371 }
class ____(PreTrainedModel): config: BloomConfig base_model_prefix = "transformer" supports_gradient_checkpointing = True _no_split_modules = ["BloomBlock"] _skip_keys_device_placement = "past_key_values" _can_compile_fullgraph = True @auto_docstring
BloomPreTrainedModel
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_bytes.py
{ "start": 126, "end": 207 }
class ____: def __bytes__(self): return 0 # [invalid-bytes-return]
Int
python
tensorflow__tensorflow
tensorflow/python/tpu/feature_column.py
{ "start": 14312, "end": 16914 }
class ____(object): """Base class for TPU Embedding Column.""" def __init__(self, categorical_column, max_sequence_length=0, learning_rate_fn=None): self._tpu_categorical_column = categorical_column self._max_sequence_length = max_sequence_length self._learning_rate_fn = learning_rate_fn if (self.is_sequence_column() and max_sequence_length < 1): raise ValueError('max_sequence_length must be greater than 0 for ' 'sequence columns. Got max_sequence_length={} for ' 'sequence column {}.'.format(max_sequence_length, categorical_column.name)) if (not self.is_sequence_column() and max_sequence_length != 0): raise ValueError('Non zero max_seq_length={} specified for non ' 'sequence column {}.'.format(max_sequence_length, categorical_column.name)) def get_combiner(self): """Returns the embedding combiner.""" raise NotImplementedError('not implemented') def get_embedding_table_size(self): """Returns the embedding table size, tuple of vocab size and dimension.""" raise NotImplementedError('not implemented') def get_feature_key_name(self): """Returns the feature key name in the features dict.""" raise NotImplementedError('not impl') def get_weight_key_name(self): """Return the key name for weights.""" raise NotImplementedError('not impl') def get_embedding_var_name(self): """Returns the embedding variable name. Feature key name and embedding variable name are usually one-to-one mapping. But for shared embedding columns, it is many-to-one mapping. """ raise NotImplementedError('not impl') def get_initializer(self): """Returns the initializer.""" raise NotImplementedError('not impl') def is_categorical_column_weighted(self): """Check if the categorical column of the embedding column is weighted.""" raise NotImplementedError('not impl') def is_sequence_column(self): return isinstance(self._tpu_categorical_column, _SUPPORTED_SEQUENCE_COLUMNS) def get_max_sequence_length(self): return self._max_sequence_length def get_learning_rate_fn(self): return self._learning_rate_fn def get_sequence_length_feature_key_name(self): """Get the key for the associated sequence length feature.""" return get_sequence_length_feature_key_name_from_feature_key_name( self.get_feature_key_name())
_TPUBaseEmbeddingColumn
python
OmkarPathak__pygorithm
pygorithm/data_structures/quadtree.py
{ "start": 248, "end": 2185 }
class ____(object): """ This is the minimum information required for an object to be usable in a quadtree as an entity. Entities are the things that you are trying to compare in a quadtree. :ivar aabb: the axis-aligned bounding box of this entity :type aabb: :class:`pygorithm.geometry.rect2.Rect2` """ def __init__(self, aabb): """ Create a new quad tree entity with the specified aabb :param aabb: axis-aligned bounding box :type aabb: :class:`pygorithm.geometry.rect2.Rect2` """ self.aabb = aabb def __repr__(self): """ Create an unambiguous representation of this entity. Example: .. code-block:: python from pygorithm.geometry import (vector2, rect2) from pygorithm.data_structures import quadtree _ent = quadtree.QuadTreeEntity(rect2.Rect2(5, 5)) # prints quadtreeentity(aabb=rect2(width=5, height=5, mincorner=vector2(x=0, y=0))) print(repr(_ent)) :returns: unambiguous representation of this quad tree entity :rtype: string """ return "quadtreeentity(aabb={})".format(repr(self.aabb)) def __str__(self): """ Create a human readable representation of this entity Example: .. code-block:: python from pygorithm.geometry import (vector2, rect2) from pygorithm.data_structures import quadtree _ent = quadtree.QuadTreeEntity(rect2.Rect2(5, 5)) # prints entity(at rect(5x5 at <0, 0>)) print(str(_ent)) :returns: human readable representation of this entity :rtype: string """ return "entity(at {})".format(str(self.aabb))
QuadTreeEntity
python
langchain-ai__langchain
libs/langchain_v1/tests/integration_tests/chat_models/test_base.py
{ "start": 404, "end": 1229 }
class ____(BaseModel): """Product of two ints.""" x: int y: int @pytest.mark.requires("langchain_openai", "langchain_anthropic") async def test_init_chat_model_chain() -> None: model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar") model_with_tools = model.bind_tools([multiply]) model_with_config = model_with_tools.with_config( RunnableConfig(tags=["foo"]), configurable={"bar_model": "claude-sonnet-4-5-20250929"}, ) prompt = ChatPromptTemplate.from_messages([("system", "foo"), ("human", "{input}")]) chain = prompt | model_with_config output = chain.invoke({"input": "bar"}) assert isinstance(output, AIMessage) events = [event async for event in chain.astream_events({"input": "bar"}, version="v2")] assert events
multiply
python
pandas-dev__pandas
pandas/tests/indexes/datetimes/methods/test_insert.py
{ "start": 213, "end": 9593 }
class ____: @pytest.mark.parametrize("null", [None, np.nan, np.datetime64("NaT"), NaT, NA]) @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"]) def test_insert_nat(self, tz, null): # GH#16537, GH#18295 (test missing) idx = DatetimeIndex(["2017-01-01"], tz=tz) expected = DatetimeIndex(["NaT", "2017-01-01"], tz=tz) if tz is not None and isinstance(null, np.datetime64): expected = Index([null, idx[0]], dtype=object) res = idx.insert(0, null) tm.assert_index_equal(res, expected) @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"]) def test_insert_invalid_na(self, tz): idx = DatetimeIndex(["2017-01-01"], tz=tz) item = np.timedelta64("NaT") result = idx.insert(0, item) expected = Index([item] + list(idx), dtype=object) tm.assert_index_equal(result, expected) def test_insert_empty_preserves_freq(self, tz_naive_fixture): # GH#33573 tz = tz_naive_fixture dti = DatetimeIndex([], tz=tz, freq="D") item = Timestamp("2017-04-05").tz_localize(tz) result = dti.insert(0, item) assert result.freq == dti.freq # But not when we insert an item that doesn't conform to freq dti = DatetimeIndex([], tz=tz, freq="W-THU") result = dti.insert(0, item) assert result.freq is None def test_insert(self, unit): idx = DatetimeIndex( ["2000-01-04", "2000-01-01", "2000-01-02"], name="idx" ).as_unit(unit) result = idx.insert(2, datetime(2000, 1, 5)) exp = DatetimeIndex( ["2000-01-04", "2000-01-01", "2000-01-05", "2000-01-02"], name="idx" ).as_unit(unit) tm.assert_index_equal(result, exp) # insertion of non-datetime should coerce to object index result = idx.insert(1, "inserted") expected = Index( [ datetime(2000, 1, 4), "inserted", datetime(2000, 1, 1), datetime(2000, 1, 2), ], name="idx", ) assert not isinstance(result, DatetimeIndex) tm.assert_index_equal(result, expected) assert result.name == expected.name def test_insert2(self, unit): idx = date_range("1/1/2000", periods=3, freq="ME", name="idx", unit=unit) # preserve freq expected_0 = DatetimeIndex( ["1999-12-31", "2000-01-31", "2000-02-29", "2000-03-31"], name="idx", freq="ME", ).as_unit(unit) expected_3 = DatetimeIndex( ["2000-01-31", "2000-02-29", "2000-03-31", "2000-04-30"], name="idx", freq="ME", ).as_unit(unit) # reset freq to None expected_1_nofreq = DatetimeIndex( ["2000-01-31", "2000-01-31", "2000-02-29", "2000-03-31"], name="idx", freq=None, ).as_unit(unit) expected_3_nofreq = DatetimeIndex( ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"], name="idx", freq=None, ).as_unit(unit) cases = [ (0, datetime(1999, 12, 31), expected_0), (-3, datetime(1999, 12, 31), expected_0), (3, datetime(2000, 4, 30), expected_3), (1, datetime(2000, 1, 31), expected_1_nofreq), (3, datetime(2000, 1, 2), expected_3_nofreq), ] for n, d, expected in cases: result = idx.insert(n, d) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq def test_insert3(self, unit): idx = date_range("1/1/2000", periods=3, freq="ME", name="idx", unit=unit) # reset freq to None result = idx.insert(3, datetime(2000, 1, 2)) expected = DatetimeIndex( ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"], name="idx", freq=None, ).as_unit(unit) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq is None @pytest.mark.parametrize("tz", ["US/Pacific", "Asia/Singapore"]) @pytest.mark.parametrize( "to_ts", [lambda x: x, lambda x: x.to_pydatetime()], ids=["Timestamp", "datetime"], ) def test_insert4(self, unit, tz, to_ts): idx = date_range( "1/1/2000 09:00", periods=6, freq="h", tz=tz, name="idx", unit=unit ) # preserve freq expected = date_range( "1/1/2000 09:00", periods=7, freq="h", tz=tz, name="idx", unit=unit ) tz = zoneinfo.ZoneInfo(tz) d = to_ts(Timestamp("2000-01-01 15:00", tz=tz)) result = idx.insert(6, d) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.freq == expected.freq assert result.tz == expected.tz @pytest.mark.parametrize("tz", ["US/Pacific", "Asia/Singapore"]) @pytest.mark.parametrize( "to_ts", [lambda x: x, lambda x: x.to_pydatetime()], ids=["Timestamp", "datetime"], ) def test_insert4_no_freq(self, unit, tz, to_ts): idx = date_range( "1/1/2000 09:00", periods=6, freq="h", tz=tz, name="idx", unit=unit ) expected = DatetimeIndex( [ "2000-01-01 09:00", "2000-01-01 10:00", "2000-01-01 11:00", "2000-01-01 12:00", "2000-01-01 13:00", "2000-01-01 14:00", "2000-01-01 10:00", ], name="idx", tz=tz, freq=None, ).as_unit(unit) # reset freq to None d = to_ts(Timestamp("2000-01-01 10:00", tz=tz)) result = idx.insert(6, d) tm.assert_index_equal(result, expected) assert result.name == expected.name assert result.tz == expected.tz assert result.freq is None # TODO: also changes DataFrame.__setitem__ with expansion def test_insert_mismatched_tzawareness(self): # see GH#7299 idx = date_range("1/1/2000", periods=3, freq="D", tz="Asia/Tokyo", name="idx") # mismatched tz-awareness item = Timestamp("2000-01-04") result = idx.insert(3, item) expected = Index( list(idx[:3]) + [item] + list(idx[3:]), dtype=object, name="idx" ) tm.assert_index_equal(result, expected) # mismatched tz-awareness item = datetime(2000, 1, 4) result = idx.insert(3, item) expected = Index( list(idx[:3]) + [item] + list(idx[3:]), dtype=object, name="idx" ) tm.assert_index_equal(result, expected) # TODO: also changes DataFrame.__setitem__ with expansion def test_insert_mismatched_tz(self): # see GH#7299 # pre-2.0 with mismatched tzs we would cast to object idx = date_range( "1/1/2000", periods=3, freq="D", tz="Asia/Tokyo", unit="ns", name="idx" ) # mismatched tz -> cast to object (could reasonably cast to same tz or UTC) item = Timestamp("2000-01-04", tz="US/Eastern") result = idx.insert(3, item) expected = Index( list(idx[:3]) + [item.tz_convert(idx.tz)] + list(idx[3:]), name="idx", ) assert expected.dtype == idx.dtype tm.assert_index_equal(result, expected) item = datetime(2000, 1, 4, tzinfo=zoneinfo.ZoneInfo("US/Eastern")) result = idx.insert(3, item) expected = Index( list(idx[:3]) + [item.astimezone(idx.tzinfo)] + list(idx[3:]), name="idx", ) assert expected.dtype == idx.dtype tm.assert_index_equal(result, expected) @pytest.mark.parametrize( "item", [0, np.int64(0), np.float64(0), np.array(0), np.timedelta64(456)] ) def test_insert_mismatched_types_raises(self, tz_aware_fixture, item): # GH#33703 dont cast these to dt64 tz = tz_aware_fixture dti = date_range("2019-11-04", periods=9, freq="-1D", name=9, tz=tz) result = dti.insert(1, item) if isinstance(item, np.ndarray): assert item.item() == 0 expected = Index([dti[0], 0] + list(dti[1:]), dtype=object, name=9) else: expected = Index([dti[0], item] + list(dti[1:]), dtype=object, name=9) tm.assert_index_equal(result, expected) def test_insert_castable_str(self, tz_aware_fixture): # GH#33703 tz = tz_aware_fixture dti = date_range("2019-11-04", periods=3, freq="-1D", name=9, tz=tz) value = "2019-11-05" result = dti.insert(0, value) ts = Timestamp(value).tz_localize(tz) expected = DatetimeIndex([ts] + list(dti), dtype=dti.dtype, name=9) tm.assert_index_equal(result, expected) def test_insert_non_castable_str(self, tz_aware_fixture): # GH#33703 tz = tz_aware_fixture dti = date_range("2019-11-04", periods=3, freq="-1D", name=9, tz=tz) value = "foo" result = dti.insert(0, value) expected = Index(["foo"] + list(dti), dtype=object, name=9) tm.assert_index_equal(result, expected)
TestInsert
python
lxml__lxml
doc/s5/ep2008/atom.py
{ "start": 13579, "end": 16876 }
class ____(_EntryElement): type = _attr_element_property('type', None) src = _attr_element_property('src', None) def _html__get(self): """ Gives the parsed HTML of element's content. May return an HtmlElement (from lxml.html) or an XHTML tree. If the element is ``type="text"`` then it is returned as quoted HTML. You can also set this attribute to either an lxml.html element, an XHTML element, or an HTML string. Raises AttributeError if this is not HTML content. """ ## FIXME: should this handle text/html types? if self.type == 'html': content = self.text elif self.type == 'text': content = cgi.escape(self.text) elif self.type == 'xhtml': div = copy.deepcopy(self[0]) # Now remove the namespaces: for el in div.getiterator(): if el.tag.startswith('{'): el.tag = el.tag.split('}', 1)[1] if div.tag.startswith('{'): div.tag = el.tag.split('}', 1)[1] from lxml.html import tostring content = tostring(div) else: raise AttributeError( "Not an HTML or text content (type=%r)" % self.type) from lxml.html import fromstring return fromstring(content) def _html__set(self, value): if value is None: del self.html return if isinstance(value, basestring): # Some HTML text self.type = 'html' self.text = value return if value.tag.startswith('{%s}' % xhtml_ns): if value.tag != '{%s}div' % xhtml_ns: # Need to wrap it in a <div> el = self.makeelement('{%s}div' % xhtml_ns) el.append(value) value = el self[:] = [] self.type = 'xhtml' self.append(value) return from lxml import html if isinstance(value, html.HtmlElement): value = tostring(value) self[:] = [] self.type = 'html' self.text = value return raise TypeError( "Unknown HTML type: %s" % type(value)) def _html__del(self): self.text = None html = property(_html__get, _html__set, _html__del, doc=_html__get.__doc__) def _binary__get(self): """ Gets/sets the binary content, which is base64 encoded in the text. """ text = self.text if text is None: raise AttributeError( "No text (maybe in src?)") text = text.decode('base64') return text def _binary__set(self, value): if isinstance(value, unicode): ## FIXME: is this kosher? value = value.encode('utf8') if not isinstance(value, str): raise TypeError( "Must set .binary to a str or unicode object (not %s)" % type(value)) value = value.encode('base64') self.text = value def _binary__del(self): self.text = None binary = property(_binary__get, _binary__set, _binary__del, doc=_binary__get.__doc__)
TextElement
python
google__flatbuffers
tests/py_test.py
{ "start": 86692, "end": 88979 }
class ____(unittest.TestCase): def test_fixed_length_array(self): builder = flatbuffers.Builder(0) a = 0.5 b = range(0, 15) c = 1 d_a = [[1, 2], [3, 4]] d_b = [ MyGame.Example.TestEnum.TestEnum.B, MyGame.Example.TestEnum.TestEnum.C, ] d_c = [ [ MyGame.Example.TestEnum.TestEnum.A, MyGame.Example.TestEnum.TestEnum.B, ], [ MyGame.Example.TestEnum.TestEnum.C, MyGame.Example.TestEnum.TestEnum.B, ], ] d_d = [[-1, 1], [-2, 2]] e = 2 f = [-1, 1] arrayOffset = MyGame.Example.ArrayStruct.CreateArrayStruct( builder, a, b, c, d_a, d_b, d_c, d_d, e, f ) # Create a table with the ArrayStruct. MyGame.Example.ArrayTable.Start(builder) MyGame.Example.ArrayTable.AddA(builder, arrayOffset) tableOffset = MyGame.Example.ArrayTable.End(builder) builder.Finish(tableOffset) buf = builder.Output() table = MyGame.Example.ArrayTable.ArrayTable.GetRootAs(buf) # Verify structure. nested = MyGame.Example.NestedStruct.NestedStruct() self.assertEqual(table.A().A(), 0.5) self.assertEqual( table.A().B(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] ) self.assertEqual(table.A().C(), 1) self.assertEqual(table.A().D(0).A(), [1, 2]) self.assertEqual(table.A().D(1).A(), [3, 4]) self.assertEqual(table.A().D(0).B(), MyGame.Example.TestEnum.TestEnum.B) self.assertEqual(table.A().D(1).B(), MyGame.Example.TestEnum.TestEnum.C) self.assertEqual( table.A().D(0).C(), [ MyGame.Example.TestEnum.TestEnum.A, MyGame.Example.TestEnum.TestEnum.B, ], ) self.assertEqual( table.A().D(1).C(), [ MyGame.Example.TestEnum.TestEnum.C, MyGame.Example.TestEnum.TestEnum.B, ], ) self.assertEqual(table.A().D(0).D(), [-1, 1]) self.assertEqual(table.A().D(1).D(), [-2, 2]) self.assertEqual(table.A().E(), 2) self.assertEqual(table.A().F(), [-1, 1]) self.assertEqual(table.A().D(0).D(0), -1) self.assertEqual(table.A().D(0).D(1), 1) self.assertEqual(table.A().D(1).D(0), -2) self.assertEqual(table.A().D(1).D(1), 2)
TestFixedLengthArrays
python
charliermarsh__ruff
crates/ruff_python_formatter/resources/test/fixtures/ruff/fmt_skip/type_params.py
{ "start": 0, "end": 48 }
class ____[ T ]: # fmt: skip pass
TestTypeParam
python
airbytehq__airbyte
airbyte-integrations/connectors/source-iterable/source_iterable/streams.py
{ "start": 17318, "end": 17409 }
class ____(IterableExportEventsStreamAdjustableRange): data_field = "inAppOpen"
InAppOpen
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_default_format06.py
{ "start": 315, "end": 1218 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("default_format06.xlsx") def test_create_file(self): """Test the creation of a file with user defined default format""" workbook = Workbook( self.got_filename, { "default_format_properties": { "font_name": "MS Pゴシック", "font_size": 11, "valign": "vcenter", "font_charset": 128, }, "default_row_height": 18, "default_column_width": 72, }, ) worksheet = workbook.add_worksheet() worksheet.insert_image("E9", self.image_dir + "red.png") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_rich_string04.py
{ "start": 315, "end": 1028 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("rich_string04.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() bold = workbook.add_format({"bold": 1}) italic = workbook.add_format({"italic": 1}) worksheet.write("A1", "Foo", bold) worksheet.write("A2", "Bar", italic) worksheet.write_rich_string("A3", bold, "abc", italic, "defg") workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
pytorch__pytorch
torch/_higher_order_ops/wrap.py
{ "start": 5002, "end": 6859 }
class ____(HigherOrderOperator): """ This operator is used to wrap torch.utils.checkpoint. This avoids TorchDynamo to look into saved tensor hooks and directly passes the control to AOT Autograd, which is ok with tracing saved tensor hooks. As a result of AOT tracing torch.utils.checkpoint code, we have a backward graph with recomputed forward nodes. However, we might deprecate this operator soon. The difficulty arises in the functionalization of rng ops. Today, there are two different functionalization of rng ops - one at AOT autograd and other at Inductor. And they are difficult to map to each other. The rng states also complicate pattern matching in Inductor. Due to the ease of implementation, we are currently inclined towards functionalization at Inductor level, which means that duplication/recomputation is done as a compiler pass in the partitioners. See TagActivationCheckpoint for more information. """ def __init__(self) -> None: super().__init__("wrap_activation_checkpoint", cacheable=False) def __call__(self, function, *args, **kwargs): # use_reentrant is set to False because this op is going to be traced. # And we ensure that AOT Autograd traces through the non reentrant # version of checkpointing. import torch.fx.traceback as fx_traceback from torch.fx import Interpreter kwargs["use_reentrant"] = False kwargs["preserve_rng_state"] = False # Using interpreter allows preservation of metadata through torch.compile stack. with fx_traceback.preserve_node_meta(): from torch.utils.checkpoint import checkpoint return checkpoint(Interpreter(function).run, *args, **kwargs) wrap_activation_checkpoint = WrapActivationCheckpoint()
WrapActivationCheckpoint
python
doocs__leetcode
solution/0700-0799/0741.Cherry Pickup/Solution.py
{ "start": 0, "end": 1009 }
class ____: def cherryPickup(self, grid: List[List[int]]) -> int: n = len(grid) f = [[[-inf] * n for _ in range(n)] for _ in range((n << 1) - 1)] f[0][0][0] = grid[0][0] for k in range(1, (n << 1) - 1): for i1 in range(n): for i2 in range(n): j1, j2 = k - i1, k - i2 if ( not 0 <= j1 < n or not 0 <= j2 < n or grid[i1][j1] == -1 or grid[i2][j2] == -1 ): continue t = grid[i1][j1] if i1 != i2: t += grid[i2][j2] for x1 in range(i1 - 1, i1 + 1): for x2 in range(i2 - 1, i2 + 1): if x1 >= 0 and x2 >= 0: f[k][i1][i2] = max(f[k][i1][i2], f[k - 1][x1][x2] + t) return max(0, f[-1][-1][-1])
Solution
python
redis__redis-py
redis/multidb/command_executor.py
{ "start": 4428, "end": 11780 }
class ____(SyncCommandExecutor, BaseCommandExecutor): def __init__( self, failure_detectors: List[FailureDetector], databases: Databases, command_retry: Retry, failover_strategy: FailoverStrategy, event_dispatcher: EventDispatcherInterface, failover_attempts: int = DEFAULT_FAILOVER_ATTEMPTS, failover_delay: float = DEFAULT_FAILOVER_DELAY, auto_fallback_interval: float = DEFAULT_AUTO_FALLBACK_INTERVAL, ): """ Initialize the DefaultCommandExecutor instance. Args: failure_detectors: List of failure detector instances to monitor database health databases: Collection of available databases to execute commands on command_retry: Retry policy for failed command execution failover_strategy: Strategy for handling database failover event_dispatcher: Interface for dispatching events failover_attempts: Number of failover attempts failover_delay: Delay between failover attempts auto_fallback_interval: Time interval in seconds between attempts to fall back to a primary database """ super().__init__(auto_fallback_interval) for fd in failure_detectors: fd.set_command_executor(command_executor=self) self._databases = databases self._failure_detectors = failure_detectors self._command_retry = command_retry self._failover_strategy_executor = DefaultFailoverStrategyExecutor( failover_strategy, failover_attempts, failover_delay ) self._event_dispatcher = event_dispatcher self._active_database: Optional[Database] = None self._active_pubsub: Optional[PubSub] = None self._active_pubsub_kwargs = {} self._setup_event_dispatcher() self._schedule_next_fallback() @property def databases(self) -> Databases: return self._databases @property def failure_detectors(self) -> List[FailureDetector]: return self._failure_detectors def add_failure_detector(self, failure_detector: FailureDetector) -> None: self._failure_detectors.append(failure_detector) @property def command_retry(self) -> Retry: return self._command_retry @property def active_database(self) -> Optional[SyncDatabase]: return self._active_database @active_database.setter def active_database(self, database: SyncDatabase) -> None: old_active = self._active_database self._active_database = database if old_active is not None and old_active is not database: self._event_dispatcher.dispatch( ActiveDatabaseChanged( old_active, self._active_database, self, **self._active_pubsub_kwargs, ) ) @property def active_pubsub(self) -> Optional[PubSub]: return self._active_pubsub @active_pubsub.setter def active_pubsub(self, pubsub: PubSub) -> None: self._active_pubsub = pubsub @property def failover_strategy_executor(self) -> FailoverStrategyExecutor: return self._failover_strategy_executor def execute_command(self, *args, **options): def callback(): response = self._active_database.client.execute_command(*args, **options) self._register_command_execution(args) return response return self._execute_with_failure_detection(callback, args) def execute_pipeline(self, command_stack: tuple): def callback(): with self._active_database.client.pipeline() as pipe: for command, options in command_stack: pipe.execute_command(*command, **options) response = pipe.execute() self._register_command_execution(command_stack) return response return self._execute_with_failure_detection(callback, command_stack) def execute_transaction( self, transaction: Callable[[Pipeline], None], *watches, **options ): def callback(): response = self._active_database.client.transaction( transaction, *watches, **options ) self._register_command_execution(()) return response return self._execute_with_failure_detection(callback) def pubsub(self, **kwargs): def callback(): if self._active_pubsub is None: self._active_pubsub = self._active_database.client.pubsub(**kwargs) self._active_pubsub_kwargs = kwargs return None return self._execute_with_failure_detection(callback) def execute_pubsub_method(self, method_name: str, *args, **kwargs): def callback(): method = getattr(self.active_pubsub, method_name) response = method(*args, **kwargs) self._register_command_execution(args) return response return self._execute_with_failure_detection(callback, *args) def execute_pubsub_run(self, sleep_time, **kwargs) -> "PubSubWorkerThread": def callback(): return self._active_pubsub.run_in_thread(sleep_time, **kwargs) return self._execute_with_failure_detection(callback) def _execute_with_failure_detection(self, callback: Callable, cmds: tuple = ()): """ Execute a commands execution callback with failure detection. """ def wrapper(): # On each retry we need to check active database as it might change. self._check_active_database() return callback() return self._command_retry.call_with_retry( lambda: wrapper(), lambda error: self._on_command_fail(error, *cmds), ) def _on_command_fail(self, error, *args): self._event_dispatcher.dispatch(OnCommandsFailEvent(args, error)) def _check_active_database(self): """ Checks if active a database needs to be updated. """ if ( self._active_database is None or self._active_database.circuit.state != CBState.CLOSED or ( self._auto_fallback_interval > 0 and self._next_fallback_attempt <= datetime.now() ) ): self.active_database = self._failover_strategy_executor.execute() self._schedule_next_fallback() def _register_command_execution(self, cmd: tuple): for detector in self._failure_detectors: detector.register_command_execution(cmd) def _setup_event_dispatcher(self): """ Registers necessary listeners. """ failure_listener = RegisterCommandFailure(self._failure_detectors) resubscribe_listener = ResubscribeOnActiveDatabaseChanged() close_connection_listener = CloseConnectionOnActiveDatabaseChanged() self._event_dispatcher.register_listeners( { OnCommandsFailEvent: [failure_listener], ActiveDatabaseChanged: [ close_connection_listener, resubscribe_listener, ], } )
DefaultCommandExecutor
python
wandb__wandb
wandb/sdk/data_types/_dtypes.py
{ "start": 13020, "end": 13200 }
class ____(Type): name = "timestamp" types: t.ClassVar[t.List[type]] = [datetime.datetime, datetime.date] if np: TimestampType.types.append(np.datetime64)
TimestampType
python
scipy__scipy
scipy/fftpack/tests/test_real_transforms.py
{ "start": 14906, "end": 15047 }
class ____(_TestDSTBase): def setup_method(self): self.rdt = np.float64 self.dec = 14 self.type = 2
TestDSTIIDouble
python
geekcomputers__Python
venv/Lib/site-packages/pip/_vendor/distlib/metadata.py
{ "start": 9371, "end": 22256 }
class ____(object): """The legacy metadata of a release. Supports versions 1.0, 1.1, 1.2, 2.0 and 1.3/2.1 (auto-detected). You can instantiate the class with one of these arguments (or none): - *path*, the path to a metadata file - *fileobj* give a file-like object with metadata as content - *mapping* is a dict-like object - *scheme* is a version scheme name """ # TODO document the mapping API and UNKNOWN default key def __init__(self, path=None, fileobj=None, mapping=None, scheme='default'): if [path, fileobj, mapping].count(None) < 2: raise TypeError('path, fileobj and mapping are exclusive') self._fields = {} self.requires_files = [] self._dependencies = None self.scheme = scheme if path is not None: self.read(path) elif fileobj is not None: self.read_file(fileobj) elif mapping is not None: self.update(mapping) self.set_metadata_version() def set_metadata_version(self): self._fields['Metadata-Version'] = _best_version(self._fields) def _write_field(self, fileobj, name, value): fileobj.write('%s: %s\n' % (name, value)) def __getitem__(self, name): return self.get(name) def __setitem__(self, name, value): return self.set(name, value) def __delitem__(self, name): field_name = self._convert_name(name) try: del self._fields[field_name] except KeyError: raise KeyError(name) def __contains__(self, name): return (name in self._fields or self._convert_name(name) in self._fields) def _convert_name(self, name): if name in _ALL_FIELDS: return name name = name.replace('-', '_').lower() return _ATTR2FIELD.get(name, name) def _default_value(self, name): if name in _LISTFIELDS or name in _ELEMENTSFIELD: return [] return 'UNKNOWN' def _remove_line_prefix(self, value): if self.metadata_version in ('1.0', '1.1'): return _LINE_PREFIX_PRE_1_2.sub('\n', value) else: return _LINE_PREFIX_1_2.sub('\n', value) def __getattr__(self, name): if name in _ATTR2FIELD: return self[name] raise AttributeError(name) # # Public API # # dependencies = property(_get_dependencies, _set_dependencies) def get_fullname(self, filesafe=False): """Return the distribution name with version. If filesafe is true, return a filename-escaped form.""" return _get_name_and_version(self['Name'], self['Version'], filesafe) def is_field(self, name): """return True if name is a valid metadata key""" name = self._convert_name(name) return name in _ALL_FIELDS def is_multi_field(self, name): name = self._convert_name(name) return name in _LISTFIELDS def read(self, filepath): """Read the metadata values from a file path.""" fp = codecs.open(filepath, 'r', encoding='utf-8') try: self.read_file(fp) finally: fp.close() def read_file(self, fileob): """Read the metadata values from a file object.""" msg = message_from_file(fileob) self._fields['Metadata-Version'] = msg['metadata-version'] # When reading, get all the fields we can for field in _ALL_FIELDS: if field not in msg: continue if field in _LISTFIELDS: # we can have multiple lines values = msg.get_all(field) if field in _LISTTUPLEFIELDS and values is not None: values = [tuple(value.split(',')) for value in values] self.set(field, values) else: # single line value = msg[field] if value is not None and value != 'UNKNOWN': self.set(field, value) # PEP 566 specifies that the body be used for the description, if # available body = msg.get_payload() self["Description"] = body if body else self["Description"] # logger.debug('Attempting to set metadata for %s', self) # self.set_metadata_version() def write(self, filepath, skip_unknown=False): """Write the metadata fields to filepath.""" fp = codecs.open(filepath, 'w', encoding='utf-8') try: self.write_file(fp, skip_unknown) finally: fp.close() def write_file(self, fileobject, skip_unknown=False): """Write the PKG-INFO format data to a file object.""" self.set_metadata_version() for field in _version2fieldlist(self['Metadata-Version']): values = self.get(field) if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']): continue if field in _ELEMENTSFIELD: self._write_field(fileobject, field, ','.join(values)) continue if field not in _LISTFIELDS: if field == 'Description': if self.metadata_version in ('1.0', '1.1'): values = values.replace('\n', '\n ') else: values = values.replace('\n', '\n |') values = [values] if field in _LISTTUPLEFIELDS: values = [','.join(value) for value in values] for value in values: self._write_field(fileobject, field, value) def update(self, other=None, **kwargs): """Set metadata values from the given iterable `other` and kwargs. Behavior is like `dict.update`: If `other` has a ``keys`` method, they are looped over and ``self[key]`` is assigned ``other[key]``. Else, ``other`` is an iterable of ``(key, value)`` iterables. Keys that don't match a metadata field or that have an empty value are dropped. """ def _set(key, value): if key in _ATTR2FIELD and value: self.set(self._convert_name(key), value) if not other: # other is None or empty container pass elif hasattr(other, 'keys'): for k in other.keys(): _set(k, other[k]) else: for k, v in other: _set(k, v) if kwargs: for k, v in kwargs.items(): _set(k, v) def set(self, name, value): """Control then set a metadata field.""" name = self._convert_name(name) if ((name in _ELEMENTSFIELD or name == 'Platform') and not isinstance(value, (list, tuple))): if isinstance(value, string_types): value = [v.strip() for v in value.split(',')] else: value = [] elif (name in _LISTFIELDS and not isinstance(value, (list, tuple))): if isinstance(value, string_types): value = [value] else: value = [] if logger.isEnabledFor(logging.WARNING): project_name = self['Name'] scheme = get_scheme(self.scheme) if name in _PREDICATE_FIELDS and value is not None: for v in value: # check that the values are valid if not scheme.is_valid_matcher(v.split(';')[0]): logger.warning( "'%s': '%s' is not valid (field '%s')", project_name, v, name) # FIXME this rejects UNKNOWN, is that right? elif name in _VERSIONS_FIELDS and value is not None: if not scheme.is_valid_constraint_list(value): logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name) elif name in _VERSION_FIELDS and value is not None: if not scheme.is_valid_version(value): logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name) if name in _UNICODEFIELDS: if name == 'Description': value = self._remove_line_prefix(value) self._fields[name] = value def get(self, name, default=_MISSING): """Get a metadata field.""" name = self._convert_name(name) if name not in self._fields: if default is _MISSING: default = self._default_value(name) return default if name in _UNICODEFIELDS: value = self._fields[name] return value elif name in _LISTFIELDS: value = self._fields[name] if value is None: return [] res = [] for val in value: if name not in _LISTTUPLEFIELDS: res.append(val) else: # That's for Project-URL res.append((val[0], val[1])) return res elif name in _ELEMENTSFIELD: value = self._fields[name] if isinstance(value, string_types): return value.split(',') return self._fields[name] def check(self, strict=False): """Check if the metadata is compliant. If strict is True then raise if no Name or Version are provided""" self.set_metadata_version() # XXX should check the versions (if the file was loaded) missing, warnings = [], [] for attr in ('Name', 'Version'): # required by PEP 345 if attr not in self: missing.append(attr) if strict and missing != []: msg = 'missing required metadata: %s' % ', '.join(missing) raise MetadataMissingError(msg) for attr in ('Home-page', 'Author'): if attr not in self: missing.append(attr) # checking metadata 1.2 (XXX needs to check 1.1, 1.0) if self['Metadata-Version'] != '1.2': return missing, warnings scheme = get_scheme(self.scheme) def are_valid_constraints(value): for v in value: if not scheme.is_valid_matcher(v.split(';')[0]): return False return True for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), (_VERSIONS_FIELDS, scheme.is_valid_constraint_list), (_VERSION_FIELDS, scheme.is_valid_version)): for field in fields: value = self.get(field, None) if value is not None and not controller(value): warnings.append("Wrong value for '%s': %s" % (field, value)) return missing, warnings def todict(self, skip_missing=False): """Return fields as a dict. Field names will be converted to use the underscore-lowercase style instead of hyphen-mixed case (i.e. home_page instead of Home-page). This is as per https://www.python.org/dev/peps/pep-0566/#id17. """ self.set_metadata_version() fields = _version2fieldlist(self['Metadata-Version']) data = {} for field_name in fields: if not skip_missing or field_name in self._fields: key = _FIELD2ATTR[field_name] if key != 'project_url': data[key] = self[field_name] else: data[key] = [','.join(u) for u in self[field_name]] return data def add_requirements(self, requirements): if self['Metadata-Version'] == '1.1': # we can't have 1.1 metadata *and* Setuptools requires for field in ('Obsoletes', 'Requires', 'Provides'): if field in self: del self[field] self['Requires-Dist'] += requirements # Mapping API # TODO could add iter* variants def keys(self): return list(_version2fieldlist(self['Metadata-Version'])) def __iter__(self): for key in self.keys(): yield key def values(self): return [self[key] for key in self.keys()] def items(self): return [(key, self[key]) for key in self.keys()] def __repr__(self): return '<%s %s %s>' % (self.__class__.__name__, self.name, self.version) METADATA_FILENAME = 'pydist.json' WHEEL_METADATA_FILENAME = 'metadata.json' LEGACY_METADATA_FILENAME = 'METADATA'
LegacyMetadata