language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
GoogleCloudPlatform__python-docs-samples
appengine/standard/hello_world/main.py
{ "start": 593, "end": 847 }
class ____(webapp2.RequestHandler): def get(self): self.response.headers["Content-Type"] = "text/plain" self.response.write("Hello, World!") app = webapp2.WSGIApplication( [ ("/", MainPage), ], debug=True, )
MainPage
python
django__django
tests/admin_registration/models.py
{ "start": 356, "end": 572 }
class ____(models.Model): pk = models.CompositePrimaryKey("traveler", "place") traveler = models.ForeignKey(Traveler, on_delete=models.CASCADE) place = models.ForeignKey(Place, on_delete=models.CASCADE)
Guest
python
spack__spack
lib/spack/spack/spec.py
{ "start": 7420, "end": 23046 }
class ____: """Aggregate the target platform, the operating system and the target microarchitecture.""" ANY_TARGET = _make_microarchitecture("*") @staticmethod def default_arch(): """Return the default architecture""" platform = spack.platforms.host() default_os = platform.default_operating_system() default_target = platform.default_target() arch_tuple = str(platform), str(default_os), str(default_target) return ArchSpec(arch_tuple) __slots__ = "_platform", "_os", "_target" def __init__(self, spec_or_platform_tuple=(None, None, None)): """Architecture specification a package should be built with. Each ArchSpec is comprised of three elements: a platform (e.g. Linux), an OS (e.g. RHEL6), and a target (e.g. x86_64). Args: spec_or_platform_tuple (ArchSpec or str or tuple): if an ArchSpec is passed it will be duplicated into the new instance. Otherwise information on platform, OS and target should be passed in either as a spec string or as a tuple. """ # If the argument to __init__ is a spec string, parse it # and construct an ArchSpec def _string_or_none(s): if s and s != "None": return str(s) return None # If another instance of ArchSpec was passed, duplicate it if isinstance(spec_or_platform_tuple, ArchSpec): other = spec_or_platform_tuple platform_tuple = other.platform, other.os, other.target elif isinstance(spec_or_platform_tuple, (str, tuple)): spec_fields = spec_or_platform_tuple # Normalize the string to a tuple if isinstance(spec_or_platform_tuple, str): spec_fields = spec_or_platform_tuple.split("-") if len(spec_fields) != 3: msg = "cannot construct an ArchSpec from {0!s}" raise ValueError(msg.format(spec_or_platform_tuple)) platform, operating_system, target = spec_fields platform_tuple = (_string_or_none(platform), _string_or_none(operating_system), target) self.platform, self.os, self.target = platform_tuple @staticmethod def override(init_spec, change_spec): if init_spec: new_spec = init_spec.copy() else: new_spec = ArchSpec() if change_spec.platform: new_spec.platform = change_spec.platform # TODO: if the platform is changed to something that is incompatible # with the current os, we should implicitly remove it if change_spec.os: new_spec.os = change_spec.os if change_spec.target: new_spec.target = change_spec.target return new_spec def _autospec(self, spec_like): if isinstance(spec_like, ArchSpec): return spec_like return ArchSpec(spec_like) def _cmp_iter(self): yield self.platform yield self.os if self.target is None: yield self.target else: yield self.target.name @property def platform(self): """The platform of the architecture.""" return self._platform @platform.setter def platform(self, value): # The platform of the architecture spec will be verified as a # supported Spack platform before it's set to ensure all specs # refer to valid platforms. value = str(value) if value is not None else None self._platform = value @property def os(self): """The OS of this ArchSpec.""" return self._os @os.setter def os(self, value): # The OS of the architecture spec will update the platform field # if the OS is set to one of the reserved OS types so that the # default OS type can be resolved. Since the reserved OS # information is only available for the host machine, the platform # will assumed to be the host machine's platform. value = str(value) if value is not None else None if value in spack.platforms.Platform.reserved_oss: curr_platform = str(spack.platforms.host()) self.platform = self.platform or curr_platform if self.platform != curr_platform: raise ValueError( "Can't set arch spec OS to reserved value '%s' when the " "arch platform (%s) isn't the current platform (%s)" % (value, self.platform, curr_platform) ) spec_platform = spack.platforms.by_name(self.platform) value = str(spec_platform.operating_system(value)) self._os = value @property def target(self): """The target of the architecture.""" return self._target @target.setter def target(self, value): # The target of the architecture spec will update the platform field # if the target is set to one of the reserved target types so that # the default target type can be resolved. Since the reserved target # information is only available for the host machine, the platform # will assumed to be the host machine's platform. def target_or_none(t): if isinstance(t, spack.vendor.archspec.cpu.Microarchitecture): return t if t and t != "None": return _make_microarchitecture(t) return None value = target_or_none(value) if str(value) in spack.platforms.Platform.reserved_targets: curr_platform = str(spack.platforms.host()) self.platform = self.platform or curr_platform if self.platform != curr_platform: raise ValueError( "Can't set arch spec target to reserved value '%s' when " "the arch platform (%s) isn't the current platform (%s)" % (value, self.platform, curr_platform) ) spec_platform = spack.platforms.by_name(self.platform) value = spec_platform.target(value) self._target = value def satisfies(self, other: "ArchSpec") -> bool: """Return True if all concrete specs matching self also match other, otherwise False. Args: other: spec to be satisfied """ other = self._autospec(other) # Check platform and os for attribute in ("platform", "os"): other_attribute = getattr(other, attribute) self_attribute = getattr(self, attribute) # platform=* or os=* if self_attribute and other_attribute == "*": return True if other_attribute and self_attribute != other_attribute: return False return self._target_satisfies(other, strict=True) def intersects(self, other: "ArchSpec") -> bool: """Return True if there exists at least one concrete spec that matches both self and other, otherwise False. This operation is commutative, and if two specs intersect it means that one can constrain the other. Args: other: spec to be checked for compatibility """ other = self._autospec(other) # Check platform and os for attribute in ("platform", "os"): other_attribute = getattr(other, attribute) self_attribute = getattr(self, attribute) if other_attribute and self_attribute and self_attribute != other_attribute: return False return self._target_satisfies(other, strict=False) def _target_satisfies(self, other: "ArchSpec", strict: bool) -> bool: if strict is True: need_to_check = bool(other.target) else: need_to_check = bool(other.target and self.target) if not need_to_check: return True # other_target is there and strict=True if self.target is None: return False # self.target is not None, and other is target=* if other.target == ArchSpec.ANY_TARGET: return True return bool(self._target_intersection(other)) def _target_constrain(self, other: "ArchSpec") -> bool: if self.target is None and other.target is None: return False if not other._target_satisfies(self, strict=False): raise UnsatisfiableArchitectureSpecError(self, other) if self.target_concrete: return False elif other.target_concrete: self.target = other.target return True # Compute the intersection of every combination of ranges in the lists results = self._target_intersection(other) attribute_str = ",".join(results) intersection_target = _make_microarchitecture(attribute_str) if self.target == intersection_target: return False self.target = intersection_target return True def _target_intersection(self, other): results = [] if not self.target or not other.target: return results for s_target_range in str(self.target).split(","): s_min, s_sep, s_max = s_target_range.partition(":") for o_target_range in str(other.target).split(","): o_min, o_sep, o_max = o_target_range.partition(":") if not s_sep: # s_target_range is a concrete target # get a microarchitecture reference for at least one side # of each comparison so we can use archspec comparators s_comp = _make_microarchitecture(s_min) if not o_sep: if s_min == o_min: results.append(s_min) elif (not o_min or s_comp >= o_min) and (not o_max or s_comp <= o_max): results.append(s_min) elif not o_sep: # "cast" to microarchitecture o_comp = _make_microarchitecture(o_min) if (not s_min or o_comp >= s_min) and (not s_max or o_comp <= s_max): results.append(o_min) else: # Take the "min" of the two max, if there is a partial ordering. n_max = "" if s_max and o_max: _s_max = _make_microarchitecture(s_max) _o_max = _make_microarchitecture(o_max) if _s_max.family != _o_max.family: continue if _s_max <= _o_max: n_max = s_max elif _o_max < _s_max: n_max = o_max else: continue elif s_max: n_max = s_max elif o_max: n_max = o_max # Take the "max" of the two min. n_min = "" if s_min and o_min: _s_min = _make_microarchitecture(s_min) _o_min = _make_microarchitecture(o_min) if _s_min.family != _o_min.family: continue if _s_min >= _o_min: n_min = s_min elif _o_min > _s_min: n_min = o_min else: continue elif s_min: n_min = s_min elif o_min: n_min = o_min if n_min and n_max: _n_min = _make_microarchitecture(n_min) _n_max = _make_microarchitecture(n_max) if _n_min.family != _n_max.family or not _n_min <= _n_max: continue if n_min == n_max: results.append(n_min) else: results.append(f"{n_min}:{n_max}") elif n_min: results.append(f"{n_min}:") elif n_max: results.append(f":{n_max}") return results def constrain(self, other: "ArchSpec") -> bool: """Projects all architecture fields that are specified in the given spec onto the instance spec if they're missing from the instance spec. This will only work if the two specs are compatible. Args: other (ArchSpec or str): constraints to be added Returns: True if the current instance was constrained, False otherwise. """ other = self._autospec(other) if not other.intersects(self): raise UnsatisfiableArchitectureSpecError(other, self) constrained = False for attr in ("platform", "os"): svalue, ovalue = getattr(self, attr), getattr(other, attr) if svalue is None and ovalue is not None: setattr(self, attr, ovalue) constrained = True constrained |= self._target_constrain(other) return constrained def copy(self): """Copy the current instance and returns the clone.""" return ArchSpec(self) @property def concrete(self): """True if the spec is concrete, False otherwise""" return self.platform and self.os and self.target and self.target_concrete @property def target_concrete(self): """True if the target is not a range or list.""" return ( self.target is not None and ":" not in str(self.target) and "," not in str(self.target) ) def to_dict(self): # Generic targets represent either an architecture family (like x86_64) # or a custom micro-architecture if self.target.vendor == "generic": target_data = str(self.target) else: # Get rid of compiler flag information before turning the uarch into a dict target_data = self.target.to_dict() target_data.pop("compilers", None) return {"arch": {"platform": self.platform, "platform_os": self.os, "target": target_data}} @staticmethod def from_dict(d): """Import an ArchSpec from raw YAML/JSON data""" arch = d["arch"] target_name = arch["target"] if not isinstance(target_name, str): target_name = target_name["name"] target = _make_microarchitecture(target_name) return ArchSpec((arch["platform"], arch["platform_os"], target)) def __str__(self): return "%s-%s-%s" % (self.platform, self.os, self.target) def __repr__(self): fmt = "ArchSpec(({0.platform!r}, {0.os!r}, {1!r}))" return fmt.format(self, str(self.target)) def __contains__(self, string): return string in str(self) or string in self.target def complete_with_defaults(self) -> None: default_architecture = ArchSpec.default_arch() if not self.platform: self.platform = default_architecture.platform if not self.os: self.os = default_architecture.os if not self.target: self.target = default_architecture.target
ArchSpec
python
PrefectHQ__prefect
tests/server/schemas/test_actions.py
{ "start": 11746, "end": 13716 }
class ____: @pytest.mark.parametrize( "template", [ { "job_configuration": {"thing_one": "{{ expected_variable }}"}, "variables": { "properties": {"wrong_variable": {}}, "required": [], }, }, { "job_configuration": { "thing_one": "{{ expected_variable_1 }}", "thing_two": "{{ expected_variable_2 }}", }, "variables": { "properties": { "not_expected_variable_1": {}, "expected_variable_2": {}, }, "required": [], }, }, ], ) async def test_validate_base_job_template_fails(self, template): """Test that error is raised if base_job_template job_configuration expects a variable that is not provided in variables.""" with pytest.raises( ValueError, match=( r"Your job configuration uses the following undeclared variable\(s\):" r" expected_variable" ), ): WorkPoolCreate(name="test", base_job_template=template) @pytest.mark.parametrize( "template", [ dict(), { "job_configuration": {"thing_one": "{{ expected_variable }}"}, "variables": { "properties": {"expected_variable": {}}, "required": [], }, }, ], ) async def test_validate_base_job_template_succeeds(self, template): """Test that no error is raised if all variables expected by job_configuration are provided in variables.""" wp = WorkPoolCreate(name="test", type="test", base_job_template=template) assert wp
TestWorkPoolCreate
python
rq__rq
rq/job.py
{ "start": 71776, "end": 72366 }
class ____: def __init__(self, func: Union[str, Callable[..., Any]], timeout: Optional[Any] = None): if not isinstance(func, str) and not inspect.isfunction(func) and not inspect.isbuiltin(func): raise ValueError('Callback `func` must be a string or function') self.func = func self.timeout = parse_timeout(timeout) if timeout else CALLBACK_TIMEOUT @property def name(self) -> str: if isinstance(self.func, str): return self.func _, func_name = resolve_function_reference(self.func) return func_name
Callback
python
html5lib__html5lib-python
html5lib/serializer.py
{ "start": 3623, "end": 15668 }
class ____(object): # attribute quoting options quote_attr_values = "legacy" # be secure by default quote_char = '"' use_best_quote_char = True # tag syntax options omit_optional_tags = True minimize_boolean_attributes = True use_trailing_solidus = False space_before_trailing_solidus = True # escaping options escape_lt_in_attrs = False escape_rcdata = False resolve_entities = True # miscellaneous options alphabetical_attributes = False inject_meta_charset = True strip_whitespace = False sanitize = False options = ("quote_attr_values", "quote_char", "use_best_quote_char", "omit_optional_tags", "minimize_boolean_attributes", "use_trailing_solidus", "space_before_trailing_solidus", "escape_lt_in_attrs", "escape_rcdata", "resolve_entities", "alphabetical_attributes", "inject_meta_charset", "strip_whitespace", "sanitize") def __init__(self, **kwargs): """Initialize HTMLSerializer :arg inject_meta_charset: Whether or not to inject the meta charset. Defaults to ``True``. :arg quote_attr_values: Whether to quote attribute values that don't require quoting per legacy browser behavior (``"legacy"``), when required by the standard (``"spec"``), or always (``"always"``). Defaults to ``"legacy"``. :arg quote_char: Use given quote character for attribute quoting. Defaults to ``"`` which will use double quotes unless attribute value contains a double quote, in which case single quotes are used. :arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute values. Defaults to ``False``. :arg escape_rcdata: Whether to escape characters that need to be escaped within normal elements within rcdata elements such as style. Defaults to ``False``. :arg resolve_entities: Whether to resolve named character entities that appear in the source tree. The XML predefined entities &lt; &gt; &amp; &quot; &apos; are unaffected by this setting. Defaults to ``True``. :arg strip_whitespace: Whether to remove semantically meaningless whitespace. (This compresses all whitespace to a single space except within ``pre``.) Defaults to ``False``. :arg minimize_boolean_attributes: Shortens boolean attributes to give just the attribute value, for example:: <input disabled="disabled"> becomes:: <input disabled> Defaults to ``True``. :arg use_trailing_solidus: Includes a close-tag slash at the end of the start tag of void elements (empty elements whose end tag is forbidden). E.g. ``<hr/>``. Defaults to ``False``. :arg space_before_trailing_solidus: Places a space immediately before the closing slash in a tag using a trailing solidus. E.g. ``<hr />``. Requires ``use_trailing_solidus=True``. Defaults to ``True``. :arg sanitize: Strip all unsafe or unknown constructs from output. See :py:class:`html5lib.filters.sanitizer.Filter`. Defaults to ``False``. :arg omit_optional_tags: Omit start/end tags that are optional. Defaults to ``True``. :arg alphabetical_attributes: Reorder attributes to be in alphabetical order. Defaults to ``False``. """ unexpected_args = frozenset(kwargs) - frozenset(self.options) if len(unexpected_args) > 0: raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args))) if 'quote_char' in kwargs: self.use_best_quote_char = False for attr in self.options: setattr(self, attr, kwargs.get(attr, getattr(self, attr))) self.errors = [] self.strict = False def encode(self, string): assert isinstance(string, text_type) if self.encoding: return string.encode(self.encoding, "htmlentityreplace") else: return string def encodeStrict(self, string): assert isinstance(string, text_type) if self.encoding: return string.encode(self.encoding, "strict") else: return string def serialize(self, treewalker, encoding=None): # pylint:disable=too-many-nested-blocks self.encoding = encoding in_cdata = False self.errors = [] if encoding and self.inject_meta_charset: from .filters.inject_meta_charset import Filter treewalker = Filter(treewalker, encoding) # Alphabetical attributes is here under the assumption that none of # the later filters add or change order of attributes; it needs to be # before the sanitizer so escaped elements come out correctly if self.alphabetical_attributes: from .filters.alphabeticalattributes import Filter treewalker = Filter(treewalker) # WhitespaceFilter should be used before OptionalTagFilter # for maximum efficiently of this latter filter if self.strip_whitespace: from .filters.whitespace import Filter treewalker = Filter(treewalker) if self.sanitize: from .filters.sanitizer import Filter treewalker = Filter(treewalker) if self.omit_optional_tags: from .filters.optionaltags import Filter treewalker = Filter(treewalker) for token in treewalker: type = token["type"] if type == "Doctype": doctype = "<!DOCTYPE %s" % token["name"] if token["publicId"]: doctype += ' PUBLIC "%s"' % token["publicId"] elif token["systemId"]: doctype += " SYSTEM" if token["systemId"]: if token["systemId"].find('"') >= 0: if token["systemId"].find("'") >= 0: self.serializeError("System identifier contains both single and double quote characters") quote_char = "'" else: quote_char = '"' doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char) doctype += ">" yield self.encodeStrict(doctype) elif type in ("Characters", "SpaceCharacters"): if type == "SpaceCharacters" or in_cdata: if in_cdata and token["data"].find("</") >= 0: self.serializeError("Unexpected </ in CDATA") yield self.encode(token["data"]) else: yield self.encode(escape(token["data"])) elif type in ("StartTag", "EmptyTag"): name = token["name"] yield self.encodeStrict("<%s" % name) if name in rcdataElements and not self.escape_rcdata: in_cdata = True elif in_cdata: self.serializeError("Unexpected child element of a CDATA element") for (_, attr_name), attr_value in token["data"].items(): # TODO: Add namespace support here k = attr_name v = attr_value yield self.encodeStrict(' ') yield self.encodeStrict(k) if not self.minimize_boolean_attributes or \ (k not in booleanAttributes.get(name, tuple()) and k not in booleanAttributes.get("", tuple())): yield self.encodeStrict("=") if self.quote_attr_values == "always" or len(v) == 0: quote_attr = True elif self.quote_attr_values == "spec": quote_attr = _quoteAttributeSpec.search(v) is not None elif self.quote_attr_values == "legacy": quote_attr = _quoteAttributeLegacy.search(v) is not None else: raise ValueError("quote_attr_values must be one of: " "'always', 'spec', or 'legacy'") v = v.replace("&", "&amp;") if self.escape_lt_in_attrs: v = v.replace("<", "&lt;") if quote_attr: quote_char = self.quote_char if self.use_best_quote_char: if "'" in v and '"' not in v: quote_char = '"' elif '"' in v and "'" not in v: quote_char = "'" if quote_char == "'": v = v.replace("'", "&#39;") else: v = v.replace('"', "&quot;") yield self.encodeStrict(quote_char) yield self.encode(v) yield self.encodeStrict(quote_char) else: yield self.encode(v) if name in voidElements and self.use_trailing_solidus: if self.space_before_trailing_solidus: yield self.encodeStrict(" /") else: yield self.encodeStrict("/") yield self.encode(">") elif type == "EndTag": name = token["name"] if name in rcdataElements: in_cdata = False elif in_cdata: self.serializeError("Unexpected child element of a CDATA element") yield self.encodeStrict("</%s>" % name) elif type == "Comment": data = token["data"] if data.find("--") >= 0: self.serializeError("Comment contains --") yield self.encodeStrict("<!--%s-->" % token["data"]) elif type == "Entity": name = token["name"] key = name + ";" if key not in entities: self.serializeError("Entity %s not recognized" % name) if self.resolve_entities and key not in xmlEntities: data = entities[key] else: data = "&%s;" % name yield self.encodeStrict(data) else: self.serializeError(token["data"]) def render(self, treewalker, encoding=None): """Serializes the stream from the treewalker into a string :arg treewalker: the treewalker to serialize :arg encoding: the string encoding to use :returns: the serialized tree Example: >>> from html5lib import parse, getTreeWalker >>> from html5lib.serializer import HTMLSerializer >>> token_stream = parse('<html><body>Hi!</body></html>') >>> walker = getTreeWalker('etree') >>> serializer = HTMLSerializer(omit_optional_tags=False) >>> serializer.render(walker(token_stream)) '<html><head></head><body>Hi!</body></html>' """ if encoding: return b"".join(list(self.serialize(treewalker, encoding))) else: return "".join(list(self.serialize(treewalker))) def serializeError(self, data="XXX ERROR MESSAGE NEEDED"): # XXX The idea is to make data mandatory. self.errors.append(data) if self.strict: raise SerializeError
HTMLSerializer
python
huggingface__transformers
src/transformers/models/owlvit/modeling_owlvit.py
{ "start": 39184, "end": 41074 }
class ____(OwlViTPreTrainedModel): config: OwlViTVisionConfig main_input_name = "pixel_values" input_modalities = ("image",) def __init__(self, config: OwlViTVisionConfig): super().__init__(config) self.vision_model = OwlViTVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: r""" Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, OwlViTVisionModel >>> model = OwlViTVisionModel.from_pretrained("google/owlvit-base-patch32") >>> processor = AutoProcessor.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) @auto_docstring
OwlViTVisionModel
python
huggingface__transformers
tests/models/blip/test_image_processing_blip.py
{ "start": 2952, "end": 4083 }
class ____(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = BlipImageProcessor if is_vision_available() else None fast_image_processing_class = BlipImageProcessorFast if is_torchvision_available() else None def setUp(self): super().setUp() self.image_processor_tester = BlipImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): for image_processing_class in self.image_processor_list: image_processor = image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processor, "do_resize")) self.assertTrue(hasattr(image_processor, "size")) self.assertTrue(hasattr(image_processor, "do_normalize")) self.assertTrue(hasattr(image_processor, "image_mean")) self.assertTrue(hasattr(image_processor, "image_std")) self.assertTrue(hasattr(image_processor, "do_convert_rgb")) @require_torch @require_vision
BlipImageProcessingTest
python
ansible__ansible
test/lib/ansible_test/_internal/commands/integration/cloud/hcloud.py
{ "start": 2148, "end": 3007 }
class ____(CloudEnvironment): """Hetzner Cloud cloud environment plugin. Updates integration test environment after delegation.""" def get_environment_config(self) -> CloudEnvironmentConfig: """Return environment configuration for use in the test environment after delegation.""" parser = configparser.ConfigParser() parser.read(self.config_path) env_vars = dict( HCLOUD_TOKEN=parser.get('default', 'hcloud_api_token'), ) display.sensitive.add(env_vars['HCLOUD_TOKEN']) ansible_vars = dict( hcloud_prefix=self.resource_prefix, ) ansible_vars.update(dict((key.lower(), value) for key, value in env_vars.items())) return CloudEnvironmentConfig( env_vars=env_vars, ansible_vars=ansible_vars, )
HcloudCloudEnvironment
python
ansible__ansible
test/integration/targets/collections/test_task_resolved_plugin/action_plugins/legacy_action.py
{ "start": 178, "end": 348 }
class ____(ActionBase): TRANSFERS_FILES = False _VALID_ARGS = frozenset() def run(self, tmp=None, task_vars=None): return {'changed': False}
ActionModule
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 78555, "end": 79232 }
class ____(sgqlc.types.Enum): """Emojis that can be attached to Issues, Pull Requests and Comments. Enumeration Choices: * `CONFUSED`: Represents the `:confused:` emoji. * `EYES`: Represents the `:eyes:` emoji. * `HEART`: Represents the `:heart:` emoji. * `HOORAY`: Represents the `:hooray:` emoji. * `LAUGH`: Represents the `:laugh:` emoji. * `ROCKET`: Represents the `:rocket:` emoji. * `THUMBS_DOWN`: Represents the `:-1:` emoji. * `THUMBS_UP`: Represents the `:+1:` emoji. """ __schema__ = github_schema __choices__ = ("CONFUSED", "EYES", "HEART", "HOORAY", "LAUGH", "ROCKET", "THUMBS_DOWN", "THUMBS_UP")
ReactionContent
python
allegroai__clearml
clearml/backend_api/services/v2_23/tasks.py
{ "start": 535802, "end": 536919 }
class ____(Response): """ Response of tasks.update_batch endpoint. :param updated: Number of tasks updated (0 or 1) :type updated: int """ _service = "tasks" _action = "update_batch" _version = "2.23" _schema = { "definitions": {}, "properties": { "updated": { "description": "Number of tasks updated (0 or 1)", "enum": [0, 1], "type": ["integer", "null"], } }, "type": "object", } def __init__(self, updated=None, **kwargs): super(UpdateBatchResponse, self).__init__(**kwargs) self.updated = updated @schema_property("updated") def updated(self): return self._property_updated @updated.setter def updated(self, value): if value is None: self._property_updated = None return if isinstance(value, float) and value.is_integer(): value = int(value) self.assert_isinstance(value, "updated", six.integer_types) self._property_updated = value
UpdateBatchResponse
python
PyCQA__pylint
tests/functional/i/invalid/invalid_getnewargs/invalid_getnewargs_returned.py
{ "start": 351, "end": 483 }
class ____: """__getnewargs__ returns <type 'tuple'>""" def __getnewargs__(self): return tuple()
SecondGoodGetNewArgs
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 142403, "end": 142820 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("field", "direction") field = sgqlc.types.Field( sgqlc.types.non_null(SponsorshipNewsletterOrderField), graphql_name="field" ) direction = sgqlc.types.Field( sgqlc.types.non_null(OrderDirection), graphql_name="direction" )
SponsorshipNewsletterOrder
python
tensorflow__tensorflow
tensorflow/tools/compatibility/tf_upgrade.py
{ "start": 839, "end": 9035 }
class ____(ast_edits.APIChangeSpec): """List of maps that describe what changed in the API.""" def __init__(self): # Maps from a function name to a dictionary that describes how to # map from an old argument keyword to the new argument keyword. self.function_keyword_renames = { "tf.batch_matmul": { "adj_x": "adjoint_a", "adj_y": "adjoint_b", }, "tf.count_nonzero": { "reduction_indices": "axis" }, "tf.reduce_all": { "reduction_indices": "axis" }, "tf.reduce_any": { "reduction_indices": "axis" }, "tf.reduce_max": { "reduction_indices": "axis" }, "tf.reduce_mean": { "reduction_indices": "axis" }, "tf.reduce_min": { "reduction_indices": "axis" }, "tf.reduce_prod": { "reduction_indices": "axis" }, "tf.reduce_sum": { "reduction_indices": "axis" }, "tf.reduce_logsumexp": { "reduction_indices": "axis" }, "tf.expand_dims": { "dim": "axis" }, "tf.argmax": { "dimension": "axis" }, "tf.argmin": { "dimension": "axis" }, "tf.reduce_join": { "reduction_indices": "axis" }, "tf.sparse_concat": { "concat_dim": "axis" }, "tf.sparse_split": { "split_dim": "axis" }, "tf.sparse_reduce_sum": { "reduction_axes": "axis" }, "tf.reverse_sequence": { "seq_dim": "seq_axis", "batch_dim": "batch_axis" }, "tf.sparse_reduce_sum_sparse": { "reduction_axes": "axis" }, "tf.squeeze": { "squeeze_dims": "axis" }, "tf.split": { "split_dim": "axis", "num_split": "num_or_size_splits" }, "tf.concat": { "concat_dim": "axis" }, } # Mapping from function to the new name of the function self.symbol_renames = { "tf.inv": "tf.reciprocal", "tf.contrib.deprecated.scalar_summary": "tf.summary.scalar", "tf.contrib.deprecated.histogram_summary": "tf.summary.histogram", "tf.listdiff": "tf.setdiff1d", "tf.list_diff": "tf.setdiff1d", "tf.mul": "tf.multiply", "tf.neg": "tf.negative", "tf.sub": "tf.subtract", "tf.train.SummaryWriter": "tf.summary.FileWriter", "tf.scalar_summary": "tf.summary.scalar", "tf.histogram_summary": "tf.summary.histogram", "tf.audio_summary": "tf.summary.audio", "tf.image_summary": "tf.summary.image", "tf.merge_summary": "tf.summary.merge", "tf.merge_all_summaries": "tf.summary.merge_all", "tf.image.per_image_whitening": "tf.image.per_image_standardization", "tf.all_variables": "tf.global_variables", "tf.VARIABLES": "tf.GLOBAL_VARIABLES", "tf.initialize_all_variables": "tf.global_variables_initializer", "tf.initialize_variables": "tf.variables_initializer", "tf.initialize_local_variables": "tf.local_variables_initializer", "tf.batch_matrix_diag": "tf.matrix_diag", "tf.batch_band_part": "tf.band_part", "tf.batch_set_diag": "tf.set_diag", "tf.batch_matrix_transpose": "tf.matrix_transpose", "tf.batch_matrix_determinant": "tf.matrix_determinant", "tf.batch_matrix_inverse": "tf.matrix_inverse", "tf.batch_cholesky": "tf.cholesky", "tf.batch_cholesky_solve": "tf.cholesky_solve", "tf.batch_matrix_solve": "tf.matrix_solve", "tf.batch_matrix_triangular_solve": "tf.matrix_triangular_solve", "tf.batch_matrix_solve_ls": "tf.matrix_solve_ls", "tf.batch_self_adjoint_eig": "tf.self_adjoint_eig", "tf.batch_self_adjoint_eigvals": "tf.self_adjoint_eigvals", "tf.batch_svd": "tf.svd", "tf.batch_fft": "tf.fft", "tf.batch_ifft": "tf.ifft", "tf.batch_fft2d": "tf.fft2d", "tf.batch_ifft2d": "tf.ifft2d", "tf.batch_fft3d": "tf.fft3d", "tf.batch_ifft3d": "tf.ifft3d", "tf.select": "tf.where", "tf.complex_abs": "tf.abs", "tf.batch_matmul": "tf.matmul", "tf.pack": "tf.stack", "tf.unpack": "tf.unstack", "tf.op_scope": "tf.name_scope", } self.change_to_function = { "tf.ones_initializer", "tf.zeros_initializer", } # Functions that were reordered should be changed to the new keyword args # for safety, if positional arguments are used. If you have reversed the # positional arguments yourself, this could do the wrong thing. self.function_reorders = { "tf.split": ["axis", "num_or_size_splits", "value", "name"], "tf.sparse_split": ["axis", "num_or_size_splits", "value", "name"], "tf.concat": ["concat_dim", "values", "name"], "tf.svd": ["tensor", "compute_uv", "full_matrices", "name"], "tf.nn.softmax_cross_entropy_with_logits": [ "logits", "labels", "dim", "name" ], "tf.nn.sparse_softmax_cross_entropy_with_logits": [ "logits", "labels", "name" ], "tf.nn.sigmoid_cross_entropy_with_logits": ["logits", "labels", "name"], "tf.op_scope": ["values", "name", "default_name"], } # Warnings that should be printed if corresponding functions are used. self.function_warnings = { "tf.reverse": ( ast_edits.ERROR, "tf.reverse has had its argument semantics changed " "significantly. The converter cannot detect this reliably, so " "you need to inspect this usage manually.\n"), } self.module_deprecations = {} if __name__ == "__main__": parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="""Convert a TensorFlow Python file to 1.0 Simple usage: tf_convert.py --infile foo.py --outfile bar.py tf_convert.py --intree ~/code/old --outtree ~/code/new """) parser.add_argument( "--infile", dest="input_file", help="If converting a single file, the name of the file " "to convert") parser.add_argument( "--outfile", dest="output_file", help="If converting a single file, the output filename.") parser.add_argument( "--intree", dest="input_tree", help="If converting a whole tree of files, the directory " "to read from (relative or absolute).") parser.add_argument( "--outtree", dest="output_tree", help="If converting a whole tree of files, the output " "directory (relative or absolute).") parser.add_argument( "--copyotherfiles", dest="copy_other_files", help=("If converting a whole tree of files, whether to " "copy the other files."), type=bool, default=False) parser.add_argument( "--reportfile", dest="report_filename", help=("The name of the file where the report log is " "stored." "(default: %(default)s)"), default="report.txt") args = parser.parse_args() upgrade = ast_edits.ASTCodeUpgrader(TFAPIChangeSpec()) report_text = None report_filename = args.report_filename files_processed = 0 if args.input_file: files_processed, report_text, errors = upgrade.process_file( args.input_file, args.output_file) files_processed = 1 elif args.input_tree: files_processed, report_text, errors = upgrade.process_tree( args.input_tree, args.output_tree, args.copy_other_files) else: parser.print_help() if report_text: open(report_filename, "w").write(report_text) print("TensorFlow 1.0 Upgrade Script") print("-----------------------------") print("Converted %d files\n" % files_processed) print("Detected %d errors that require attention" % len(errors)) print("-" * 80) print("\n".join(errors)) print("\nMake sure to read the detailed log %r\n" % report_filename)
TFAPIChangeSpec
python
numba__numba
numba/cuda/tests/cudapy/test_vectorize.py
{ "start": 1385, "end": 9248 }
class ____(CUDATestCase): # Presumably chosen as an odd number unlikely to coincide with the total # thread count, and large enough to ensure a significant number of blocks # are used. N = 1000001 def test_scalar(self): @vectorize(signatures, target='cuda') def vector_add(a, b): return a + b a = 1.2 b = 2.3 c = vector_add(a, b) self.assertEqual(c, a + b) def test_1d(self): @vectorize(signatures, target='cuda') def vector_add(a, b): return a + b for ty in dtypes: data = np.array(np.random.random(self.N), dtype=ty) expected = np.add(data, data) actual = vector_add(data, data) np.testing.assert_allclose(expected, actual) self.assertEqual(actual.dtype, ty) def test_1d_async(self): @vectorize(signatures, target='cuda') def vector_add(a, b): return a + b stream = cuda.stream() for ty in dtypes: data = np.array(np.random.random(self.N), dtype=ty) device_data = cuda.to_device(data, stream) dresult = vector_add(device_data, device_data, stream=stream) actual = dresult.copy_to_host() expected = np.add(data, data) np.testing.assert_allclose(expected, actual) self.assertEqual(actual.dtype, ty) def test_nd(self): @vectorize(signatures, target='cuda') def vector_add(a, b): return a + b for nd, dtype, order in product(range(1, 8), dtypes, orders): shape = (4,) * nd data = np.random.random(shape).astype(dtype) data2 = np.array(data.T, order=order) expected = data + data2 actual = vector_add(data, data2) np.testing.assert_allclose(expected, actual) self.assertEqual(actual.dtype, dtype) def test_output_arg(self): @vectorize(signatures, target='cuda') def vector_add(a, b): return a + b A = np.arange(10, dtype=np.float32) B = np.arange(10, dtype=np.float32) expected = A + B actual = np.empty_like(A) vector_add(A, B, out=actual) np.testing.assert_allclose(expected, actual) self.assertEqual(expected.dtype, actual.dtype) def test_reduce(self): @vectorize(signatures, target='cuda') def vector_add(a, b): return a + b dtype = np.int32 for n in input_sizes: x = np.arange(n, dtype=dtype) expected = np.add.reduce(x) actual = vector_add.reduce(x) np.testing.assert_allclose(expected, actual) # np.add.reduce is special-cased to return an int64 for any int # arguments, so we can't compare against its returned dtype when # we're checking the general reduce machinery (which just happens # to be using addition). Instead, compare against the input dtype. self.assertEqual(dtype, actual.dtype) def test_reduce_async(self): @vectorize(signatures, target='cuda') def vector_add(a, b): return a + b stream = cuda.stream() dtype = np.int32 for n in input_sizes: x = np.arange(n, dtype=dtype) expected = np.add.reduce(x) dx = cuda.to_device(x, stream) actual = vector_add.reduce(dx, stream=stream) np.testing.assert_allclose(expected, actual) # Compare against the input dtype as in test_reduce(). self.assertEqual(dtype, actual.dtype) def test_manual_transfer(self): @vectorize(signatures, target='cuda') def vector_add(a, b): return a + b n = 10 x = np.arange(n, dtype=np.int32) dx = cuda.to_device(x) expected = x + x actual = vector_add(x, dx).copy_to_host() np.testing.assert_equal(expected, actual) self.assertEqual(expected.dtype, actual.dtype) def test_ufunc_output_2d(self): @vectorize(signatures, target='cuda') def vector_add(a, b): return a + b n = 10 x = np.arange(n, dtype=np.int32).reshape(2, 5) dx = cuda.to_device(x) vector_add(dx, dx, out=dx) expected = x + x actual = dx.copy_to_host() np.testing.assert_equal(expected, actual) self.assertEqual(expected.dtype, actual.dtype) def check_tuple_arg(self, a, b): @vectorize(signatures, target='cuda') def vector_add(a, b): return a + b r = vector_add(a, b) np.testing.assert_equal(np.asarray(a) + np.asarray(b), r) def test_tuple_arg(self): a = (1.0, 2.0, 3.0) b = (4.0, 5.0, 6.0) self.check_tuple_arg(a, b) def test_namedtuple_arg(self): Point = namedtuple('Point', ('x', 'y', 'z')) a = Point(x=1.0, y=2.0, z=3.0) b = Point(x=4.0, y=5.0, z=6.0) self.check_tuple_arg(a, b) def test_tuple_of_array_arg(self): arr = np.arange(10, dtype=np.int32) a = (arr, arr + 1) b = (arr + 2, arr + 2) self.check_tuple_arg(a, b) def test_tuple_of_namedtuple_arg(self): Point = namedtuple('Point', ('x', 'y', 'z')) a = (Point(x=1.0, y=2.0, z=3.0), Point(x=1.5, y=2.5, z=3.5)) b = (Point(x=4.0, y=5.0, z=6.0), Point(x=4.5, y=5.5, z=6.5)) self.check_tuple_arg(a, b) def test_namedtuple_of_array_arg(self): xs1 = np.arange(10, dtype=np.int32) ys1 = xs1 + 2 xs2 = np.arange(10, dtype=np.int32) * 2 ys2 = xs2 + 1 Points = namedtuple('Points', ('xs', 'ys')) a = Points(xs=xs1, ys=ys1) b = Points(xs=xs2, ys=ys2) self.check_tuple_arg(a, b) def test_name_attribute(self): @vectorize('f8(f8)', target='cuda') def bar(x): return x ** 2 self.assertEqual(bar.__name__, 'bar') def test_no_transfer_for_device_data(self): # Initialize test data on the device prior to banning host <-> device # transfer noise = np.random.randn(1, 3, 64, 64).astype(np.float32) noise = cuda.to_device(noise) # A mock of a CUDA function that always raises a CudaAPIError def raising_transfer(*args, **kwargs): raise CudaAPIError(999, 'Transfer not allowed') # Use the mock for transfers between the host and device old_HtoD = getattr(driver, 'cuMemcpyHtoD', None) old_DtoH = getattr(driver, 'cuMemcpyDtoH', None) setattr(driver, 'cuMemcpyHtoD', raising_transfer) setattr(driver, 'cuMemcpyDtoH', raising_transfer) # Ensure that the mock functions are working as expected with self.assertRaisesRegex(CudaAPIError, "Transfer not allowed"): noise.copy_to_host() with self.assertRaisesRegex(CudaAPIError, "Transfer not allowed"): cuda.to_device([1]) try: # Check that defining and calling a ufunc with data on the device # induces no transfers @vectorize(['float32(float32)'], target='cuda') def func(noise): return noise + 1.0 func(noise) finally: # Replace our mocks with the original implementations. If there was # no original implementation, simply remove ours. if old_HtoD is not None: setattr(driver, 'cuMemcpyHtoD', old_HtoD) else: del driver.cuMemcpyHtoD if old_DtoH is not None: setattr(driver, 'cuMemcpyDtoH', old_DtoH) else: del driver.cuMemcpyDtoH if __name__ == '__main__': unittest.main()
TestCUDAVectorize
python
coleifer__peewee
tests/pwiz_integration.py
{ "start": 3472, "end": 4383 }
class ____(BasePwizTestCase): requires = [User, Note, Category] def test_print_models(self): with capture_output() as output: print_models(self.introspector) self.assertEqual(output.data.strip(), EXPECTED) def test_print_header(self): cmdline = '-i -e sqlite %s' % db.database with capture_output() as output: with mock.patch('pwiz.datetime.datetime') as mock_datetime: now = mock_datetime.now.return_value now.strftime.return_value = 'February 03, 2015 15:30PM' print_header(cmdline, self.introspector) self.assertEqual(output.data.strip(), ( '# Code generated by:\n' '# python -m pwiz %s\n' '# Date: February 03, 2015 15:30PM\n' '# Database: %s\n' '# Peewee version: %s') % (cmdline, db.database, peewee_version))
TestPwiz
python
has2k1__plotnine
plotnine/scales/scale_color.py
{ "start": 6919, "end": 7069 }
class ____(scale_color_desaturate): """ Create a desaturated color gradient """ _aesthetics = ["fill"] @dataclass
scale_fill_desaturate
python
pypa__setuptools
setuptools/_distutils/tests/test_text_file.py
{ "start": 239, "end": 3460 }
class ____(support.TempdirManager): def test_class(self): # old tests moved from text_file.__main__ # so they are really called by the buildbots # result 1: no fancy options result1 = [ '# test file\n', '\n', 'line 3 \\\n', '# intervening comment\n', ' continues on next line\n', ] # result 2: just strip comments result2 = ["\n", "line 3 \\\n", " continues on next line\n"] # result 3: just strip blank lines result3 = [ "# test file\n", "line 3 \\\n", "# intervening comment\n", " continues on next line\n", ] # result 4: default, strip comments, blank lines, # and trailing whitespace result4 = ["line 3 \\", " continues on next line"] # result 5: strip comments and blanks, plus join lines (but don't # "collapse" joined lines result5 = ["line 3 continues on next line"] # result 6: strip comments and blanks, plus join lines (and # "collapse" joined lines result6 = ["line 3 continues on next line"] def test_input(count, description, file, expected_result): result = file.readlines() assert result == expected_result tmp_path = path.Path(self.mkdtemp()) filename = tmp_path / 'test.txt' jaraco.path.build({filename.name: TEST_DATA}, tmp_path) in_file = TextFile( filename, strip_comments=False, skip_blanks=False, lstrip_ws=False, rstrip_ws=False, ) try: test_input(1, "no processing", in_file, result1) finally: in_file.close() in_file = TextFile( filename, strip_comments=True, skip_blanks=False, lstrip_ws=False, rstrip_ws=False, ) try: test_input(2, "strip comments", in_file, result2) finally: in_file.close() in_file = TextFile( filename, strip_comments=False, skip_blanks=True, lstrip_ws=False, rstrip_ws=False, ) try: test_input(3, "strip blanks", in_file, result3) finally: in_file.close() in_file = TextFile(filename) try: test_input(4, "default processing", in_file, result4) finally: in_file.close() in_file = TextFile( filename, strip_comments=True, skip_blanks=True, join_lines=True, rstrip_ws=True, ) try: test_input(5, "join lines without collapsing", in_file, result5) finally: in_file.close() in_file = TextFile( filename, strip_comments=True, skip_blanks=True, join_lines=True, rstrip_ws=True, collapse_join=True, ) try: test_input(6, "join lines with collapsing", in_file, result6) finally: in_file.close()
TestTextFile
python
facebook__pyre-check
client/tests/coverage_data_tests.py
{ "start": 28241, "end": 37663 }
class ____(testslide.TestCase): maxDiff = 2000 def _assert_suppressions( self, source: str, expected: Sequence[TypeErrorSuppression] ) -> None: source_module = parse_code( source.replace("PYRE_FIXME", "pyre-fixme") .replace("PYRE_IGNORE", "pyre-ignore") .replace("TYPE_IGNORE", "type: ignore") ) actual = coverage_data.collect_suppressions(source_module) self.assertEqual(actual, expected) def test_find_fixmes__simple(self) -> None: self._assert_suppressions( """ # PYRE_FIXME # PYRE_FIXME with message # PYRE_FIXME[1] # PYRE_FIXME[10, 11] with message # PYRE_FIXME[10,] (trailing comma is illegal, codes are ignored) """, [ TypeErrorSuppression( kind=SuppressionKind.PYRE_FIXME, location=Location( start_line=2, start_column=0, end_line=2, end_column=12, ), error_codes=None, ), TypeErrorSuppression( kind=SuppressionKind.PYRE_FIXME, location=Location( start_line=3, start_column=0, end_line=3, end_column=25, ), error_codes=None, ), TypeErrorSuppression( kind=SuppressionKind.PYRE_FIXME, location=Location( start_line=4, start_column=0, end_line=4, end_column=15, ), error_codes=[1], ), TypeErrorSuppression( kind=SuppressionKind.PYRE_FIXME, location=Location( start_line=5, start_column=0, end_line=5, end_column=33, ), error_codes=[10, 11], ), TypeErrorSuppression( kind=SuppressionKind.PYRE_FIXME, location=Location( start_line=6, start_column=0, end_line=6, end_column=65, ), error_codes=[], ), ], ) def test_find_ignores__simple(self) -> None: self._assert_suppressions( """ # PYRE_IGNORE # PYRE_IGNORE with message # PYRE_IGNORE[1] # PYRE_IGNORE[10, 11] # PYRE_IGNORE[10, 11] with message # PYRE_IGNORE[10,] (trailing comma is illegal, codes are ignored) """, [ TypeErrorSuppression( kind=SuppressionKind.PYRE_IGNORE, location=Location( start_line=2, start_column=0, end_line=2, end_column=13, ), error_codes=None, ), TypeErrorSuppression( kind=SuppressionKind.PYRE_IGNORE, location=Location( start_line=3, start_column=0, end_line=3, end_column=26, ), error_codes=None, ), TypeErrorSuppression( kind=SuppressionKind.PYRE_IGNORE, location=Location( start_line=4, start_column=0, end_line=4, end_column=16, ), error_codes=[1], ), TypeErrorSuppression( kind=SuppressionKind.PYRE_IGNORE, location=Location( start_line=5, start_column=0, end_line=5, end_column=21, ), error_codes=[10, 11], ), TypeErrorSuppression( kind=SuppressionKind.PYRE_IGNORE, location=Location( start_line=6, start_column=0, end_line=6, end_column=34, ), error_codes=[10, 11], ), TypeErrorSuppression( kind=SuppressionKind.PYRE_IGNORE, location=Location( start_line=7, start_column=0, end_line=7, end_column=66, ), error_codes=[], ), ], ) def test_find_type_ignores(self) -> None: self._assert_suppressions( """ # TYPE_IGNORE # TYPE_IGNORE[1] (codes won't be parsed) """, [ TypeErrorSuppression( kind=SuppressionKind.TYPE_IGNORE, location=Location( start_line=2, start_column=0, end_line=2, end_column=14, ), error_codes=None, ), TypeErrorSuppression( kind=SuppressionKind.TYPE_IGNORE, location=Location( start_line=3, start_column=0, end_line=3, end_column=42, ), error_codes=None, ), ], ) def test_find_suppressions__trailing_comments(self) -> None: self._assert_suppressions( """ a: int = 42.0 # PYRE_FIXME b: int = 42.0 # leading comment # PYRE_FIXME[3, 4] c: int = 42.0 # leading comment # PYRE_IGNORE[5] f: int = 42.0 # leading comment # TYPE_IGNORE """, [ TypeErrorSuppression( kind=SuppressionKind.PYRE_FIXME, location=Location( start_line=2, start_column=14, end_line=2, end_column=26, ), error_codes=None, ), TypeErrorSuppression( kind=SuppressionKind.PYRE_FIXME, location=Location( start_line=3, start_column=14, end_line=3, end_column=50, ), error_codes=[3, 4], ), TypeErrorSuppression( kind=SuppressionKind.PYRE_IGNORE, location=Location( start_line=4, start_column=14, end_line=4, end_column=48, ), error_codes=[5], ), TypeErrorSuppression( kind=SuppressionKind.TYPE_IGNORE, location=Location( start_line=5, start_column=14, end_line=5, end_column=46, ), error_codes=None, ), ], ) def test_find_suppressions__multiline_string(self) -> None: self._assert_suppressions( """ ''' # PYRE_IGNORE ''' """, [], ) def test_find_suppressions__nested_suppressions(self) -> None: # If there are multiple suppressions, we count all of them. This is unlikely # to arise in practice but needs to have well-defined behavior. self._assert_suppressions( """ # # PYRE_IGNORE # TYPE_IGNORE """, [ TypeErrorSuppression( kind=SuppressionKind.PYRE_IGNORE, location=Location( start_line=2, start_column=0, end_line=2, end_column=30, ), error_codes=None, ), TypeErrorSuppression( kind=SuppressionKind.TYPE_IGNORE, location=Location( start_line=2, start_column=0, end_line=2, end_column=30, ), error_codes=None, ), ], )
SuppressionCollectorTest
python
fastai__fastai
fastai/layers.py
{ "start": 25302, "end": 25598 }
class ____(Module): def forward(self, x): return MishJitAutoFn.apply(x) # %% ../nbs/01_layers.ipynb 165 Mish = nn.Mish Swish = nn.SiLU # %% ../nbs/01_layers.ipynb 166 for o in swish,Swish,SwishJit,mish,Mish,MishJit: o.__default_init__ = kaiming_uniform_ # %% ../nbs/01_layers.ipynb 169
MishJit
python
gevent__gevent
src/greentest/3.13/test_selectors.py
{ "start": 1382, "end": 15395 }
class ____: def make_socketpair(self): rd, wr = socketpair() self.addCleanup(rd.close) self.addCleanup(wr.close) return rd, wr def test_register(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, selectors.EVENT_READ, "data") self.assertIsInstance(key, selectors.SelectorKey) self.assertEqual(key.fileobj, rd) self.assertEqual(key.fd, rd.fileno()) self.assertEqual(key.events, selectors.EVENT_READ) self.assertEqual(key.data, "data") # register an unknown event self.assertRaises(ValueError, s.register, 0, 999999) # register an invalid FD self.assertRaises(ValueError, s.register, -10, selectors.EVENT_READ) # register twice self.assertRaises(KeyError, s.register, rd, selectors.EVENT_READ) # register the same FD, but with a different object self.assertRaises(KeyError, s.register, rd.fileno(), selectors.EVENT_READ) def test_unregister(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.unregister(rd) # unregister an unknown file obj self.assertRaises(KeyError, s.unregister, 999999) # unregister twice self.assertRaises(KeyError, s.unregister, rd) def test_unregister_after_fd_close(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() r, w = rd.fileno(), wr.fileno() s.register(r, selectors.EVENT_READ) s.register(w, selectors.EVENT_WRITE) rd.close() wr.close() s.unregister(r) s.unregister(w) @unittest.skipUnless(os.name == 'posix', "requires posix") def test_unregister_after_fd_close_and_reuse(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() r, w = rd.fileno(), wr.fileno() s.register(r, selectors.EVENT_READ) s.register(w, selectors.EVENT_WRITE) rd2, wr2 = self.make_socketpair() rd.close() wr.close() os.dup2(rd2.fileno(), r) os.dup2(wr2.fileno(), w) self.addCleanup(os.close, r) self.addCleanup(os.close, w) s.unregister(r) s.unregister(w) def test_unregister_after_socket_close(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) rd.close() wr.close() s.unregister(rd) s.unregister(wr) def test_modify(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, selectors.EVENT_READ) # modify events key2 = s.modify(rd, selectors.EVENT_WRITE) self.assertNotEqual(key.events, key2.events) self.assertEqual(key2, s.get_key(rd)) s.unregister(rd) # modify data d1 = object() d2 = object() key = s.register(rd, selectors.EVENT_READ, d1) key2 = s.modify(rd, selectors.EVENT_READ, d2) self.assertEqual(key.events, key2.events) self.assertNotEqual(key.data, key2.data) self.assertEqual(key2, s.get_key(rd)) self.assertEqual(key2.data, d2) # modify unknown file obj self.assertRaises(KeyError, s.modify, 999999, selectors.EVENT_READ) # modify use a shortcut d3 = object() s.register = unittest.mock.Mock() s.unregister = unittest.mock.Mock() s.modify(rd, selectors.EVENT_READ, d3) self.assertFalse(s.register.called) self.assertFalse(s.unregister.called) def test_modify_unregister(self): # Make sure the fd is unregister()ed in case of error on # modify(): http://bugs.python.org/issue30014 if self.SELECTOR.__name__ == 'EpollSelector': patch = unittest.mock.patch( 'selectors.EpollSelector._selector_cls') elif self.SELECTOR.__name__ == 'PollSelector': patch = unittest.mock.patch( 'selectors.PollSelector._selector_cls') elif self.SELECTOR.__name__ == 'DevpollSelector': patch = unittest.mock.patch( 'selectors.DevpollSelector._selector_cls') else: raise self.skipTest("") with patch as m: m.return_value.modify = unittest.mock.Mock( side_effect=ZeroDivisionError) s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) self.assertEqual(len(s._map), 1) with self.assertRaises(ZeroDivisionError): s.modify(rd, selectors.EVENT_WRITE) self.assertEqual(len(s._map), 0) def test_close(self): s = self.SELECTOR() self.addCleanup(s.close) mapping = s.get_map() rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) s.close() self.assertRaises(RuntimeError, s.get_key, rd) self.assertRaises(RuntimeError, s.get_key, wr) self.assertRaises(KeyError, mapping.__getitem__, rd) self.assertRaises(KeyError, mapping.__getitem__, wr) self.assertEqual(mapping.get(rd), None) self.assertEqual(mapping.get(wr), None) def test_get_key(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, selectors.EVENT_READ, "data") self.assertEqual(key, s.get_key(rd)) # unknown file obj self.assertRaises(KeyError, s.get_key, 999999) def test_get_map(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() sentinel = object() keys = s.get_map() self.assertFalse(keys) self.assertEqual(len(keys), 0) self.assertEqual(list(keys), []) self.assertEqual(keys.get(rd), None) self.assertEqual(keys.get(rd, sentinel), sentinel) key = s.register(rd, selectors.EVENT_READ, "data") self.assertIn(rd, keys) self.assertEqual(key, keys.get(rd)) self.assertEqual(key, keys[rd]) self.assertEqual(len(keys), 1) self.assertEqual(list(keys), [rd.fileno()]) self.assertEqual(list(keys.values()), [key]) # unknown file obj with self.assertRaises(KeyError): keys[999999] # Read-only mapping with self.assertRaises(TypeError): del keys[rd] def test_select(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) wr_key = s.register(wr, selectors.EVENT_WRITE) result = s.select() for key, events in result: self.assertTrue(isinstance(key, selectors.SelectorKey)) self.assertTrue(events) self.assertFalse(events & ~(selectors.EVENT_READ | selectors.EVENT_WRITE)) self.assertEqual([(wr_key, selectors.EVENT_WRITE)], result) def test_select_read_write(self): # gh-110038: when a file descriptor is registered for both read and # write, the two events must be seen on a single call to select(). s = self.SELECTOR() self.addCleanup(s.close) sock1, sock2 = self.make_socketpair() sock2.send(b"foo") my_key = s.register(sock1, selectors.EVENT_READ | selectors.EVENT_WRITE) seen_read, seen_write = False, False result = s.select() # We get the read and write either in the same result entry or in two # distinct entries with the same key. self.assertLessEqual(len(result), 2) for key, events in result: self.assertTrue(isinstance(key, selectors.SelectorKey)) self.assertEqual(key, my_key) self.assertFalse(events & ~(selectors.EVENT_READ | selectors.EVENT_WRITE)) if events & selectors.EVENT_READ: self.assertFalse(seen_read) seen_read = True if events & selectors.EVENT_WRITE: self.assertFalse(seen_write) seen_write = True self.assertTrue(seen_read) self.assertTrue(seen_write) def test_context_manager(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() with s as sel: sel.register(rd, selectors.EVENT_READ) sel.register(wr, selectors.EVENT_WRITE) self.assertRaises(RuntimeError, s.get_key, rd) self.assertRaises(RuntimeError, s.get_key, wr) def test_fileno(self): s = self.SELECTOR() self.addCleanup(s.close) if hasattr(s, 'fileno'): fd = s.fileno() self.assertTrue(isinstance(fd, int)) self.assertGreaterEqual(fd, 0) def test_selector(self): s = self.SELECTOR() self.addCleanup(s.close) NUM_SOCKETS = 12 MSG = b" This is a test." MSG_LEN = len(MSG) readers = [] writers = [] r2w = {} w2r = {} for i in range(NUM_SOCKETS): rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) readers.append(rd) writers.append(wr) r2w[rd] = wr w2r[wr] = rd bufs = [] while writers: ready = s.select() ready_writers = find_ready_matching(ready, selectors.EVENT_WRITE) if not ready_writers: self.fail("no sockets ready for writing") wr = random.choice(ready_writers) wr.send(MSG) for i in range(10): ready = s.select() ready_readers = find_ready_matching(ready, selectors.EVENT_READ) if ready_readers: break # there might be a delay between the write to the write end and # the read end is reported ready sleep(0.1) else: self.fail("no sockets ready for reading") self.assertEqual([w2r[wr]], ready_readers) rd = ready_readers[0] buf = rd.recv(MSG_LEN) self.assertEqual(len(buf), MSG_LEN) bufs.append(buf) s.unregister(r2w[rd]) s.unregister(rd) writers.remove(r2w[rd]) self.assertEqual(bufs, [MSG] * NUM_SOCKETS) @unittest.skipIf(sys.platform == 'win32', 'select.select() cannot be used with empty fd sets') def test_empty_select(self): # Issue #23009: Make sure EpollSelector.select() works when no FD is # registered. s = self.SELECTOR() self.addCleanup(s.close) self.assertEqual(s.select(timeout=0), []) def test_timeout(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(wr, selectors.EVENT_WRITE) t = time() self.assertEqual(1, len(s.select(0))) self.assertEqual(1, len(s.select(-1))) self.assertLess(time() - t, 0.5) s.unregister(wr) s.register(rd, selectors.EVENT_READ) t = time() self.assertFalse(s.select(0)) self.assertFalse(s.select(-1)) self.assertLess(time() - t, 0.5) t0 = time() self.assertFalse(s.select(1)) t1 = time() dt = t1 - t0 # Tolerate 2.0 seconds for very slow buildbots self.assertTrue(0.8 <= dt <= 2.0, dt) @unittest.skipUnless(hasattr(signal, "alarm"), "signal.alarm() required for this test") def test_select_interrupt_exc(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() class InterruptSelect(Exception): pass def handler(*args): raise InterruptSelect orig_alrm_handler = signal.signal(signal.SIGALRM, handler) self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler) try: signal.alarm(1) s.register(rd, selectors.EVENT_READ) t = time() # select() is interrupted by a signal which raises an exception with self.assertRaises(InterruptSelect): s.select(30) # select() was interrupted before the timeout of 30 seconds self.assertLess(time() - t, 5.0) finally: signal.alarm(0) @unittest.skipUnless(hasattr(signal, "alarm"), "signal.alarm() required for this test") def test_select_interrupt_noraise(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() orig_alrm_handler = signal.signal(signal.SIGALRM, lambda *args: None) self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler) try: signal.alarm(1) s.register(rd, selectors.EVENT_READ) t = time() # select() is interrupted by a signal, but the signal handler doesn't # raise an exception, so select() should by retries with a recomputed # timeout self.assertFalse(s.select(1.5)) self.assertGreaterEqual(time() - t, 1.0) finally: signal.alarm(0)
BaseSelectorTestCase
python
huggingface__transformers
src/transformers/models/maskformer/modeling_maskformer_swin.py
{ "start": 20560, "end": 25549 }
class ____(nn.Module): def __init__(self, config, dim, input_resolution, num_heads, drop_path_rate=0.0, shift_size=0): super().__init__() self.shift_size = shift_size self.window_size = config.window_size self.input_resolution = input_resolution self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = MaskFormerSwinAttention(config, dim, num_heads, self.window_size) self.drop_path = MaskFormerSwinDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = MaskFormerSwinIntermediate(config, dim) self.output = MaskFormerSwinOutput(config, dim) def get_attn_mask(self, input_resolution): if self.shift_size > 0: # calculate attention mask for SW-MSA height, width = input_resolution img_mask = torch.zeros((1, height, width, 1)) height_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) width_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) count = 0 for height_slice in height_slices: for width_slice in width_slices: img_mask[:, height_slice, width_slice, :] = count count += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, -100.0).masked_fill(attn_mask == 0, 0.0) else: attn_mask = None return attn_mask def maybe_pad(self, hidden_states, height, width): pad_left = pad_top = 0 pad_right = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size pad_values = (0, 0, pad_left, pad_right, pad_top, pad_bottom) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward(self, hidden_states, input_dimensions, output_attentions=False): height, width = input_dimensions batch_size, dim, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) hidden_states = hidden_states.view(batch_size, height, width, channels) # pad hidden_states to multiples of window size hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape # cyclic shift if self.shift_size > 0: shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_hidden_states = hidden_states # partition windows hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) attn_mask = self.get_attn_mask((height_pad, width_pad)) if attn_mask is not None: attn_mask = attn_mask.to(hidden_states_windows.device) self_attention_outputs = self.attention(hidden_states_windows, attn_mask, output_attentions=output_attentions) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) shifted_windows = window_reverse( attention_windows, self.window_size, height_pad, width_pad ) # B height' width' C # reverse cyclic shift if self.shift_size > 0: attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :].contiguous() attention_windows = attention_windows.view(batch_size, height * width, channels) hidden_states = shortcut + self.drop_path(attention_windows) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = hidden_states + self.output(layer_output) outputs = (layer_output,) + outputs return outputs
MaskFormerSwinLayer
python
encode__django-rest-framework
tests/test_versioning.py
{ "start": 514, "end": 650 }
class ____(APIView): def get(self, request, *args, **kwargs): return Response({'version': request.version})
RequestVersionView
python
apache__airflow
devel-common/src/sphinx_exts/operators_and_hooks_ref.py
{ "start": 20010, "end": 23957 }
class ____(BaseJinjaReferenceDirective): """Generate list of auth managers""" def render_content( self, *, tags: set[str] | None, header_separator: str = DEFAULT_HEADER_SEPARATOR ) -> str: return _common_render_list_content( header_separator=header_separator, resource_type="auth-managers", template="auth-managers.rst.jinja2", ) def setup(app): """Setup plugin""" app.add_directive("operators-hooks-ref", OperatorsHooksReferenceDirective) app.add_directive("transfers-ref", TransfersReferenceDirective) app.add_directive("airflow-logging", LoggingDirective) app.add_directive("airflow-configurations", AuthConfigurations) app.add_directive("airflow-secrets-backends", SecretsBackendDirective) app.add_directive("airflow-connections", ConnectionsDirective) app.add_directive("airflow-extra-links", ExtraLinksDirective) app.add_directive("airflow-notifications", NotificationsDirective) app.add_directive("airflow-executors", ExecutorsDirective) app.add_directive("airflow-queues", QueuesDirective) app.add_directive("airflow-deferrable-operators", DeferrableOperatorDirective) app.add_directive("airflow-deprecations", DeprecationsDirective) app.add_directive("airflow-dataset-schemes", AssetSchemeDirective) app.add_directive("airflow-auth-managers", AuthManagersDirective) return {"parallel_read_safe": True, "parallel_write_safe": True} option_tag = click.option( "--tag", multiple=True, help="If passed, displays integrations that have a matching tag", ) option_header_separator = click.option( "--header-separator", default=DEFAULT_HEADER_SEPARATOR, show_default=True ) @click.group(context_settings={"help_option_names": ["-h", "--help"], "max_content_width": 500}) def cli(): """Render tables with integrations""" @cli.command() @option_tag @option_header_separator def operators_and_hooks(tag: Iterable[str], header_separator: str): """Renders Operators and Hooks content""" print(_render_operator_content(tags=set(tag) if tag else None, header_separator=header_separator)) @cli.command() @option_tag @option_header_separator def transfers(tag: Iterable[str], header_separator: str): """Renders Transfers content""" print(_render_transfer_content(tags=set(tag) if tag else None, header_separator=header_separator)) @cli.command() @option_header_separator def logging(header_separator: str): """Renders Logger content""" print( _common_render_list_content( header_separator=header_separator, resource_type="logging", template="logging.rst.jinja2" ) ) @cli.command() @option_header_separator def secret_backends(header_separator: str): """Renders Secret Backends content""" print( _common_render_list_content( header_separator=header_separator, resource_type="secrets-backends", template="secret_backend.rst.jinja2", ) ) @cli.command() @option_header_separator def connections(header_separator: str): """Renders Connections content""" print( _common_render_list_content( header_separator=header_separator, resource_type="connection-types", template="connections.rst.jinja2", ) ) @cli.command() @option_header_separator def extra_links(header_separator: str): """Renders Extra links content""" print( _common_render_list_content( header_separator=header_separator, resource_type="extra-links", template="extra_links.rst.jinja2" ) ) @cli.command() @option_tag @option_header_separator def deferrable_operators(tag: Iterable[str], header_separator: str): """Renders Deferrable Operators content""" print(_render_deferrable_operator_content(header_separator=header_separator)) if __name__ == "__main__": cli()
AuthManagersDirective
python
getsentry__sentry
tests/sentry/search/test_utils.py
{ "start": 34577, "end": 37510 }
class ____(TestCase): def test_date(self) -> None: with pytest.raises(Release.DoesNotExist): get_first_last_release_for_group(self.group, LatestReleaseOrders.DATE, True) oldest = self.create_release(version="old") self.create_group_release(group=self.group, release=oldest) newest = self.create_release( version="newest", date_released=oldest.date_added + timedelta(minutes=5) ) self.create_group_release(group=self.group, release=newest) assert newest == get_first_last_release_for_group( self.group, LatestReleaseOrders.DATE, True ) assert oldest == get_first_last_release_for_group( self.group, LatestReleaseOrders.DATE, False ) group_2 = self.create_group() with pytest.raises(Release.DoesNotExist): get_first_last_release_for_group(group_2, LatestReleaseOrders.DATE, True) self.create_group_release(group=group_2, release=oldest) assert oldest == get_first_last_release_for_group(group_2, LatestReleaseOrders.DATE, True) assert oldest == get_first_last_release_for_group(group_2, LatestReleaseOrders.DATE, False) def test_semver(self) -> None: with pytest.raises(Release.DoesNotExist): get_first_last_release_for_group(self.group, LatestReleaseOrders.SEMVER, True) latest = self.create_release(version="test@2.0.0") middle = self.create_release(version="test@1.3.2") earliest = self.create_release( version="test@1.0.0", date_released=latest.date_added + timedelta(minutes=5) ) self.create_group_release(group=self.group, release=latest) self.create_group_release(group=self.group, release=middle) self.create_group_release(group=self.group, release=earliest) assert latest == get_first_last_release_for_group( self.group, LatestReleaseOrders.SEMVER, True ) assert earliest == get_first_last_release_for_group( self.group, LatestReleaseOrders.DATE, True ) assert earliest == get_first_last_release_for_group( self.group, LatestReleaseOrders.SEMVER, False ) assert latest == get_first_last_release_for_group( self.group, LatestReleaseOrders.DATE, False ) group_2 = self.create_group() with pytest.raises(Release.DoesNotExist): get_first_last_release_for_group(group_2, LatestReleaseOrders.SEMVER, True) self.create_group_release(group=group_2, release=latest) self.create_group_release(group=group_2, release=middle) assert latest == get_first_last_release_for_group(group_2, LatestReleaseOrders.SEMVER, True) assert middle == get_first_last_release_for_group( group_2, LatestReleaseOrders.SEMVER, False ) @control_silo_test
GetFirstLastReleaseForGroupTest
python
cython__cython
Tools/dump_github_issues.py
{ "start": 282, "end": 3702 }
class ____(Exception): pass def gen_urls(repo): i = 0 while True: yield f"https://api.github.com/repos/{repo}/issues?state=all&per_page=100&page={i}" i += 1 def read_rate_limit(): with urlopen("https://api.github.com/rate_limit") as p: return json.load(p) def parse_rate_limit(limits): limits = limits['resources']['core'] return limits['limit'], limits['remaining'], datetime.fromtimestamp(limits['reset']) def load_url(url): with urlopen(url) as p: data = json.load(p) if isinstance(data, dict) and 'rate limit' in data.get('message', ''): raise RateLimitReached() assert isinstance(data, list), type(data) return data or None # None indicates empty last page def join_list_data(lists): result = [] for data in lists: if not data: break result.extend(data) return result def output_filename(repo): timestamp = datetime.now() return f"github_issues_{repo.replace('/', '_')}_{timestamp.strftime('%Y%m%d_%H%M%S')}.json.gz" def write_gzjson(file_name, data, indent=2): with gzip.open(file_name, "wt", encoding='utf-8') as gz: json.dump(data, gz, indent=indent) def find_origin_url(git_config=GIT_CONFIG_FILE): assert os.path.exists(git_config) parser = configparser.ConfigParser() parser.read(git_config) return parser.get('remote "origin"', 'url') def parse_repo_name(git_url): if git_url.endswith('.git'): git_url = git_url[:-4] return '/'.join(git_url.split('/')[-2:]) def dump_issues(repo): """Main entry point.""" print(f"Reading issues from repo '{repo}'") urls = gen_urls(repo) try: paged_data = map(load_url, urls) issues = join_list_data(paged_data) except RateLimitReached: limit, remaining, reset_time = parse_rate_limit(read_rate_limit()) print(f"FAILURE: Rate limits ({limit}) reached, remaining: {remaining}, reset at {reset_time}") return filename = output_filename(repo) print(f"Writing {len(issues)} to {filename}") write_gzjson(filename, issues) ### TESTS def test_join_list_data(): assert join_list_data([]) == [] assert join_list_data([[1,2]]) == [1,2] assert join_list_data([[1,2], [3]]) == [1,2,3] assert join_list_data([[0], [1,2], [3]]) == [0,1,2,3] assert join_list_data([[0], [1,2], [[[]],[]]]) == [0,1,2,[[]],[]] def test_output_filename(): filename = output_filename("re/po") import re assert re.match(r"github_issues_re_po_[0-9]{8}_[0-9]{6}\.json", filename) def test_find_origin_url(): assert find_origin_url() def test_parse_repo_name(): assert parse_repo_name("https://github.com/cython/cython") == "cython/cython" assert parse_repo_name("git+ssh://git@github.com/cython/cython.git") == "cython/cython" assert parse_repo_name("git+ssh://git@github.com/fork/cython.git") == "fork/cython" def test_write_gzjson(): import tempfile with tempfile.NamedTemporaryFile() as tmp: write_gzjson(tmp.name, [{}]) # test JSON format with gzip.open(tmp.name) as f: assert json.load(f) == [{}] # test indentation with gzip.open(tmp.name) as f: assert f.read() == b'[\n {}\n]' ### MAIN if __name__ == '__main__': repo_name = parse_repo_name(find_origin_url()) dump_issues(repo_name)
RateLimitReached
python
astropy__astropy
astropy/utils/console.py
{ "start": 26612, "end": 31475 }
class ____: """ A class that displays either a `ProgressBar` or `Spinner` depending on whether the total size of the operation is known or not. It is designed to be used with the ``with`` statement:: if file.has_length(): length = file.get_length() else: length = None bytes_read = 0 with ProgressBarOrSpinner(length) as bar: while file.read(blocksize): bytes_read += blocksize bar.update(bytes_read) """ def __init__(self, total, msg, color="default", file=None): """ Parameters ---------- total : int or None If an int, the number of increments in the process being tracked and a `ProgressBar` is displayed. If `None`, a `Spinner` is displayed. msg : str The message to display above the `ProgressBar` or alongside the `Spinner`. color : str, optional The color of ``msg``, if any. Must be an ANSI terminal color name. Must be one of: black, red, green, brown, blue, magenta, cyan, lightgrey, default, darkgrey, lightred, lightgreen, yellow, lightblue, lightmagenta, lightcyan, white. file : :term:`file-like (writeable)`, optional The file to write the to. Defaults to `sys.stdout`. If ``file`` is not a tty (as determined by calling its `isatty` member, if any), only ``msg`` will be displayed: the `ProgressBar` or `Spinner` will be silent. """ if file is None: file = sys.stdout if total is None or not isatty(file): self._is_spinner = True self._obj = Spinner(msg, color=color, file=file) else: self._is_spinner = False color_print(msg, color, file=file) self._obj = ProgressBar(total, file=file) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): return self._obj.__exit__(exc_type, exc_value, traceback) def update(self, value): """ Update the progress bar to the given value (out of the total given to the constructor. """ self._obj.update(value) def print_code_line(line, col=None, file=None, tabwidth=8, width=70): """ Prints a line of source code, highlighting a particular character position in the line. Useful for displaying the context of error messages. If the line is more than ``width`` characters, the line is truncated accordingly and '…' characters are inserted at the front and/or end. It looks like this:: there_is_a_syntax_error_here : ^ Parameters ---------- line : unicode The line of code to display col : int, optional The character in the line to highlight. ``col`` must be less than ``len(line)``. file : :term:`file-like (writeable)`, optional Where to write to. Defaults to `sys.stdout`. tabwidth : int, optional The number of spaces per tab (``'\\t'``) character. Default is 8. All tabs will be converted to spaces to ensure that the caret lines up with the correct column. width : int, optional The width of the display, beyond which the line will be truncated. Defaults to 70 (this matches the default in the standard library's `textwrap` module). """ if file is None: file = sys.stdout if conf.unicode_output: ellipsis = "…" else: ellipsis = "..." write = file.write if col is not None: if col >= len(line): raise ValueError("col must be less than the line length.") ntabs = line[:col].count("\t") col += ntabs * (tabwidth - 1) line = line.rstrip("\n") line = line.replace("\t", " " * tabwidth) if col is not None and col > width: new_col = min(width // 2, len(line) - col) offset = col - new_col line = line[offset + len(ellipsis) :] width -= len(ellipsis) new_col = col col -= offset color_print(ellipsis, "darkgrey", file=file, end="") if len(line) > width: write(line[: width - len(ellipsis)]) color_print(ellipsis, "darkgrey", file=file) else: write(line) write("\n") if col is not None: write(" " * col) color_print("^", "red", file=file) # The following three Getch* classes implement unbuffered character reading from # stdin on Windows, and Unix. This is taken directly from ActiveState # Code Recipes: # http://code.activestate.com/recipes/134892-getch-like-unbuffered-character-reading-from-stdin/ #
ProgressBarOrSpinner
python
pytorch__pytorch
torch/testing/_internal/distributed/rpc/dist_autograd_test.py
{ "start": 6534, "end": 6809 }
class ____(Enum): LOCAL = 1 # Run the operation locally. RPC_SYNC = 2 # Run the operation using rpc_sync REMOTE = 3 # Run the operation using remote. RPC_ASYNC = 4 # Run the operation using rpc_async # Common utils for both CPU and CUDA test suites
ExecMode
python
huggingface__transformers
src/transformers/models/mimi/modeling_mimi.py
{ "start": 32455, "end": 38498 }
class ____(MimiAttention): """ Mimi flash attention module. This module inherits from `MimiAttention` as the weights of the module stays untouched. The only required change would be on the forward pass where it needs to correctly call the public API of flash attention and deal with padding tokens in case the input contains any of them. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, output_attentions: bool = False, use_cache: bool = False, cache_position: Optional[torch.LongTensor] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]: if isinstance(past_key_values, StaticCache): raise ValueError( "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` " "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers" ) output_attentions = False bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) # Flash attention requires the input to have the shape # batch_size x seq_length x head_dim x hidden_dim # therefore we just need to keep the original shape query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) cos, sin = self.rotary_emb(value_states, position_ids) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) if past_key_values is not None: # sin and cos are specific to RoPE models; cache_position needed for the static cache cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs) # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache # to be able to avoid many of these transpose/reshape/view. query_states = query_states.transpose(1, 2) key_states = key_states.transpose(1, 2) value_states = value_states.transpose(1, 2) dropout_rate = self.attention_dropout if self.training else 0.0 # In PEFT, usually we cast the layer norms in float32 for training stability reasons # therefore the input hidden states gets silently casted in float32. Hence, we need # cast them back in the correct dtype just to be sure everything works as expected. # This might slowdown training & inference so it is recommended to not cast the LayerNorms # in fp32. (MimiRMSNorm handles it correctly) input_dtype = query_states.dtype device_type = query_states.device.type if query_states.device.type != "mps" else "cpu" if input_dtype == torch.float32: if torch.is_autocast_enabled(): # NOTE: `torch.get_autocast_dtype` is there starting from PyTorch 2.4 target_dtype = ( torch.get_autocast_dtype(device_type) if hasattr(torch, "get_autocast_dtype") else torch.get_autocast_gpu_dtype() ) # Handle the case where the model is quantized elif hasattr(self.config, "_pre_quantization_dtype"): target_dtype = self.config._pre_quantization_dtype else: target_dtype = self.q_proj.weight.dtype logger.warning_once( f"The input hidden states seems to be silently casted in float32, this might be related to" f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" f" {target_dtype}." ) query_states = query_states.to(target_dtype) key_states = key_states.to(target_dtype) value_states = value_states.to(target_dtype) attn_output = _flash_attention_forward( query_states, key_states, value_states, attention_mask, q_len, position_ids=position_ids, dropout=dropout_rate, sliding_window=getattr(self, "sliding_window", None), is_causal=self.is_causal, use_top_left_mask=self._flash_attn_uses_top_left_mask, ) attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights # NO LONGER EXIST Copied from transformers.models.gemma.modeling_gemma.GemmaSdpaAttention with Gemma->Mimi # TODO cyril: modular
MimiFlashAttention2
python
numpy__numpy
numpy/testing/tests/test_utils.py
{ "start": 77505, "end": 79701 }
class ____: """ Test assert_no_gc_cycles """ def test_passes(self): def no_cycle(): b = [] b.append([]) return b with assert_no_gc_cycles(): no_cycle() assert_no_gc_cycles(no_cycle) def test_asserts(self): def make_cycle(): a = [] a.append(a) a.append(a) return a with assert_raises(AssertionError): with assert_no_gc_cycles(): make_cycle() with assert_raises(AssertionError): assert_no_gc_cycles(make_cycle) @pytest.mark.slow def test_fails(self): """ Test that in cases where the garbage cannot be collected, we raise an error, instead of hanging forever trying to clear it. """ class ReferenceCycleInDel: """ An object that not only contains a reference cycle, but creates new cycles whenever it's garbage-collected and its __del__ runs """ make_cycle = True def __init__(self): self.cycle = self def __del__(self): # break the current cycle so that `self` can be freed self.cycle = None if ReferenceCycleInDel.make_cycle: # but create a new one so that the garbage collector (GC) has more # work to do. ReferenceCycleInDel() try: w = weakref.ref(ReferenceCycleInDel()) try: with assert_raises(RuntimeError): # this will be unable to get a baseline empty garbage assert_no_gc_cycles(lambda: None) except AssertionError: # the above test is only necessary if the GC actually tried to free # our object anyway. if w() is not None: pytest.skip("GC does not call __del__ on cyclic objects") raise finally: # make sure that we stop creating reference cycles ReferenceCycleInDel.make_cycle = False
TestAssertNoGcCycles
python
aio-libs__aiohttp
aiohttp/client.py
{ "start": 5957, "end": 49210 }
class ____: """First-class interface for making HTTP requests.""" __slots__ = ( "_base_url", "_base_url_origin", "_source_traceback", "_connector", "_loop", "_cookie_jar", "_connector_owner", "_default_auth", "_version", "_json_serialize", "_requote_redirect_url", "_timeout", "_raise_for_status", "_auto_decompress", "_trust_env", "_default_headers", "_skip_auto_headers", "_request_class", "_response_class", "_ws_response_class", "_trace_configs", "_read_bufsize", "_max_line_size", "_max_field_size", "_resolve_charset", "_default_proxy", "_default_proxy_auth", "_retry_connection", "_middlewares", ) def __init__( self, base_url: StrOrURL | None = None, *, connector: BaseConnector | None = None, cookies: LooseCookies | None = None, headers: LooseHeaders | None = None, proxy: StrOrURL | None = None, proxy_auth: BasicAuth | None = None, skip_auto_headers: Iterable[str] | None = None, auth: BasicAuth | None = None, json_serialize: JSONEncoder = json.dumps, request_class: type[ClientRequest] = ClientRequest, response_class: type[ClientResponse] = ClientResponse, ws_response_class: type[ClientWebSocketResponse] = ClientWebSocketResponse, version: HttpVersion = http.HttpVersion11, cookie_jar: AbstractCookieJar | None = None, connector_owner: bool = True, raise_for_status: bool | Callable[[ClientResponse], Awaitable[None]] = False, timeout: _SENTINEL | ClientTimeout | None = sentinel, auto_decompress: bool = True, trust_env: bool = False, requote_redirect_url: bool = True, trace_configs: list[TraceConfig[object]] | None = None, read_bufsize: int = 2**16, max_line_size: int = 8190, max_field_size: int = 8190, fallback_charset_resolver: _CharsetResolver = lambda r, b: "utf-8", middlewares: Sequence[ClientMiddlewareType] = (), ssl_shutdown_timeout: _SENTINEL | None | float = sentinel, ) -> None: # We initialise _connector to None immediately, as it's referenced in __del__() # and could cause issues if an exception occurs during initialisation. self._connector: BaseConnector | None = None if base_url is None or isinstance(base_url, URL): self._base_url: URL | None = base_url self._base_url_origin = None if base_url is None else base_url.origin() else: self._base_url = URL(base_url) self._base_url_origin = self._base_url.origin() assert self._base_url.absolute, "Only absolute URLs are supported" if self._base_url is not None and not self._base_url.path.endswith("/"): raise ValueError("base_url must have a trailing '/'") loop = asyncio.get_running_loop() if timeout is sentinel or timeout is None: timeout = DEFAULT_TIMEOUT if not isinstance(timeout, ClientTimeout): raise ValueError( f"timeout parameter cannot be of {type(timeout)} type, " "please use 'timeout=ClientTimeout(...)'", ) self._timeout = timeout if ssl_shutdown_timeout is not sentinel: warnings.warn( "The ssl_shutdown_timeout parameter is deprecated and will be removed in aiohttp 4.0", DeprecationWarning, stacklevel=2, ) if connector is None: connector = TCPConnector(ssl_shutdown_timeout=ssl_shutdown_timeout) # Initialize these three attrs before raising any exception, # they are used in __del__ self._connector = connector self._loop = loop if loop.get_debug(): self._source_traceback: traceback.StackSummary | None = ( traceback.extract_stack(sys._getframe(1)) ) else: self._source_traceback = None if connector._loop is not loop: raise RuntimeError("Session and connector have to use same event loop") if cookie_jar is None: cookie_jar = CookieJar() self._cookie_jar = cookie_jar if cookies: self._cookie_jar.update_cookies(cookies) self._connector_owner = connector_owner self._default_auth = auth self._version = version self._json_serialize = json_serialize self._raise_for_status = raise_for_status self._auto_decompress = auto_decompress self._trust_env = trust_env self._requote_redirect_url = requote_redirect_url self._read_bufsize = read_bufsize self._max_line_size = max_line_size self._max_field_size = max_field_size # Convert to list of tuples if headers: real_headers: CIMultiDict[str] = CIMultiDict(headers) else: real_headers = CIMultiDict() self._default_headers: CIMultiDict[str] = real_headers if skip_auto_headers is not None: self._skip_auto_headers = frozenset(istr(i) for i in skip_auto_headers) else: self._skip_auto_headers = frozenset() self._request_class = request_class self._response_class = response_class self._ws_response_class = ws_response_class self._trace_configs = trace_configs or [] for trace_config in self._trace_configs: trace_config.freeze() self._resolve_charset = fallback_charset_resolver self._default_proxy = proxy self._default_proxy_auth = proxy_auth self._retry_connection: bool = True self._middlewares = middlewares def __init_subclass__(cls: type["ClientSession"]) -> None: raise TypeError( f"Inheritance class {cls.__name__} from ClientSession is forbidden" ) def __del__(self, _warnings: Any = warnings) -> None: if not self.closed: _warnings.warn( f"Unclosed client session {self!r}", ResourceWarning, source=self, ) context = {"client_session": self, "message": "Unclosed client session"} if self._source_traceback is not None: context["source_traceback"] = self._source_traceback self._loop.call_exception_handler(context) if sys.version_info >= (3, 11) and TYPE_CHECKING: def request( self, method: str, url: StrOrURL, **kwargs: Unpack[_RequestOptions], ) -> "_RequestContextManager": ... else: def request( self, method: str, url: StrOrURL, **kwargs: Any ) -> "_RequestContextManager": """Perform HTTP request.""" return _RequestContextManager(self._request(method, url, **kwargs)) def _build_url(self, str_or_url: StrOrURL) -> URL: url = URL(str_or_url) if self._base_url and not url.absolute: return self._base_url.join(url) return url async def _request( self, method: str, str_or_url: StrOrURL, *, params: Query = None, data: Any = None, json: Any = None, cookies: LooseCookies | None = None, headers: LooseHeaders | None = None, skip_auto_headers: Iterable[str] | None = None, auth: BasicAuth | None = None, allow_redirects: bool = True, max_redirects: int = 10, compress: str | bool = False, chunked: bool | None = None, expect100: bool = False, raise_for_status: ( None | bool | Callable[[ClientResponse], Awaitable[None]] ) = None, read_until_eof: bool = True, proxy: StrOrURL | None = None, proxy_auth: BasicAuth | None = None, timeout: ClientTimeout | _SENTINEL | None = sentinel, ssl: SSLContext | bool | Fingerprint = True, server_hostname: str | None = None, proxy_headers: LooseHeaders | None = None, trace_request_ctx: Mapping[str, Any] | None = None, read_bufsize: int | None = None, auto_decompress: bool | None = None, max_line_size: int | None = None, max_field_size: int | None = None, middlewares: Sequence[ClientMiddlewareType] | None = None, ) -> ClientResponse: # NOTE: timeout clamps existing connect and read timeouts. We cannot # set the default to None because we need to detect if the user wants # to use the existing timeouts by setting timeout to None. if self.closed: raise RuntimeError("Session is closed") if not isinstance(ssl, SSL_ALLOWED_TYPES): raise TypeError( "ssl should be SSLContext, Fingerprint, or bool, " f"got {ssl!r} instead." ) if data is not None and json is not None: raise ValueError( "data and json parameters can not be used at the same time" ) elif json is not None: data = payload.JsonPayload(json, dumps=self._json_serialize) redirects = 0 history: list[ClientResponse] = [] version = self._version params = params or {} # Merge with default headers and transform to CIMultiDict headers = self._prepare_headers(headers) try: url = self._build_url(str_or_url) except ValueError as e: raise InvalidUrlClientError(str_or_url) from e assert self._connector is not None if url.scheme not in self._connector.allowed_protocol_schema_set: raise NonHttpUrlClientError(url) skip_headers: Iterable[istr] | None if skip_auto_headers is not None: skip_headers = { istr(i) for i in skip_auto_headers } | self._skip_auto_headers elif self._skip_auto_headers: skip_headers = self._skip_auto_headers else: skip_headers = None if proxy is None: proxy = self._default_proxy if proxy_auth is None: proxy_auth = self._default_proxy_auth if proxy is None: proxy_headers = None else: proxy_headers = self._prepare_headers(proxy_headers) try: proxy = URL(proxy) except ValueError as e: raise InvalidURL(proxy) from e if timeout is sentinel or timeout is None: real_timeout: ClientTimeout = self._timeout else: real_timeout = timeout # timeout is cumulative for all request operations # (request, redirects, responses, data consuming) tm = TimeoutHandle( self._loop, real_timeout.total, ceil_threshold=real_timeout.ceil_threshold ) handle = tm.start() if read_bufsize is None: read_bufsize = self._read_bufsize if auto_decompress is None: auto_decompress = self._auto_decompress if max_line_size is None: max_line_size = self._max_line_size if max_field_size is None: max_field_size = self._max_field_size traces = [ Trace( self, trace_config, trace_config.trace_config_ctx(trace_request_ctx=trace_request_ctx), ) for trace_config in self._trace_configs ] for trace in traces: await trace.send_request_start(method, url.update_query(params), headers) timer = tm.timer() try: with timer: # https://www.rfc-editor.org/rfc/rfc9112.html#name-retrying-requests retry_persistent_connection = ( self._retry_connection and method in IDEMPOTENT_METHODS ) while True: url, auth_from_url = strip_auth_from_url(url) if not url.raw_host: # NOTE: Bail early, otherwise, causes `InvalidURL` through # NOTE: `self._request_class()` below. err_exc_cls = ( InvalidUrlRedirectClientError if redirects else InvalidUrlClientError ) raise err_exc_cls(url) # If `auth` was passed for an already authenticated URL, # disallow only if this is the initial URL; this is to avoid issues # with sketchy redirects that are not the caller's responsibility if not history and (auth and auth_from_url): raise ValueError( "Cannot combine AUTH argument with " "credentials encoded in URL" ) # Override the auth with the one from the URL only if we # have no auth, or if we got an auth from a redirect URL if auth is None or (history and auth_from_url is not None): auth = auth_from_url if ( auth is None and self._default_auth and ( not self._base_url or self._base_url_origin == url.origin() ) ): auth = self._default_auth # Try netrc if auth is still None and trust_env is enabled. if auth is None and self._trust_env and url.host is not None: auth = await self._loop.run_in_executor( None, self._get_netrc_auth, url.host ) # It would be confusing if we support explicit # Authorization header with auth argument if auth is not None and hdrs.AUTHORIZATION in headers: raise ValueError( "Cannot combine AUTHORIZATION header " "with AUTH argument or credentials " "encoded in URL" ) all_cookies = self._cookie_jar.filter_cookies(url) if cookies is not None: tmp_cookie_jar = CookieJar( quote_cookie=self._cookie_jar.quote_cookie ) tmp_cookie_jar.update_cookies(cookies) req_cookies = tmp_cookie_jar.filter_cookies(url) if req_cookies: all_cookies.load(req_cookies) proxy_: URL | None = None if proxy is not None: proxy_ = URL(proxy) elif self._trust_env: with suppress(LookupError): proxy_, proxy_auth = await asyncio.to_thread( get_env_proxy_for_url, url ) req = self._request_class( method, url, params=params, headers=headers, skip_auto_headers=skip_headers, data=data, cookies=all_cookies, auth=auth, version=version, compress=compress, chunked=chunked, expect100=expect100, loop=self._loop, response_class=self._response_class, proxy=proxy_, proxy_auth=proxy_auth, timer=timer, session=self, ssl=ssl, server_hostname=server_hostname, proxy_headers=proxy_headers, traces=traces, trust_env=self.trust_env, ) async def _connect_and_send_request( req: ClientRequest, ) -> ClientResponse: # connection timeout assert self._connector is not None try: conn = await self._connector.connect( req, traces=traces, timeout=real_timeout ) except asyncio.TimeoutError as exc: raise ConnectionTimeoutError( f"Connection timeout to host {req.url}" ) from exc assert conn.protocol is not None conn.protocol.set_response_params( timer=timer, skip_payload=req.method in EMPTY_BODY_METHODS, read_until_eof=read_until_eof, auto_decompress=auto_decompress, read_timeout=real_timeout.sock_read, read_bufsize=read_bufsize, timeout_ceil_threshold=self._connector._timeout_ceil_threshold, max_line_size=max_line_size, max_field_size=max_field_size, ) try: resp = await req._send(conn) try: await resp.start(conn) except BaseException: resp.close() raise except BaseException: conn.close() raise return resp # Apply middleware (if any) - per-request middleware overrides session middleware effective_middlewares = ( self._middlewares if middlewares is None else middlewares ) if effective_middlewares: handler = build_client_middlewares( _connect_and_send_request, effective_middlewares ) else: handler = _connect_and_send_request try: resp = await handler(req) # Client connector errors should not be retried except ( ConnectionTimeoutError, ClientConnectorError, ClientConnectorCertificateError, ClientConnectorSSLError, ): raise except (ClientOSError, ServerDisconnectedError): if retry_persistent_connection: retry_persistent_connection = False continue raise except ClientError: raise except OSError as exc: if exc.errno is None and isinstance(exc, asyncio.TimeoutError): raise raise ClientOSError(*exc.args) from exc # Update cookies from raw headers to preserve duplicates if resp._raw_cookie_headers: self._cookie_jar.update_cookies_from_headers( resp._raw_cookie_headers, resp.url ) # redirects if resp.status in (301, 302, 303, 307, 308) and allow_redirects: for trace in traces: await trace.send_request_redirect( method, url.update_query(params), headers, resp ) redirects += 1 history.append(resp) if max_redirects and redirects >= max_redirects: if req._body is not None: await req._body.close() resp.close() raise TooManyRedirects( history[0].request_info, tuple(history) ) # For 301 and 302, mimic IE, now changed in RFC # https://github.com/kennethreitz/requests/pull/269 if (resp.status == 303 and resp.method != hdrs.METH_HEAD) or ( resp.status in (301, 302) and resp.method == hdrs.METH_POST ): method = hdrs.METH_GET data = None if headers.get(hdrs.CONTENT_LENGTH): headers.pop(hdrs.CONTENT_LENGTH) else: # For 307/308, always preserve the request body # For 301/302 with non-POST methods, preserve the request body # https://www.rfc-editor.org/rfc/rfc9110#section-15.4.3-3.1 # Use the existing payload to avoid recreating it from a potentially consumed file data = req._body r_url = resp.headers.get(hdrs.LOCATION) or resp.headers.get( hdrs.URI ) if r_url is None: # see github.com/aio-libs/aiohttp/issues/2022 break else: # reading from correct redirection # response is forbidden resp.release() try: parsed_redirect_url = URL( r_url, encoded=not self._requote_redirect_url ) except ValueError as e: if req._body is not None: await req._body.close() resp.close() raise InvalidUrlRedirectClientError( r_url, "Server attempted redirecting to a location that does not look like a URL", ) from e scheme = parsed_redirect_url.scheme if scheme not in HTTP_AND_EMPTY_SCHEMA_SET: if req._body is not None: await req._body.close() resp.close() raise NonHttpUrlRedirectClientError(r_url) elif not scheme: parsed_redirect_url = url.join(parsed_redirect_url) is_same_host_https_redirect = ( url.host == parsed_redirect_url.host and parsed_redirect_url.scheme == "https" and url.scheme == "http" ) try: redirect_origin = parsed_redirect_url.origin() except ValueError as origin_val_err: if req._body is not None: await req._body.close() resp.close() raise InvalidUrlRedirectClientError( parsed_redirect_url, "Invalid redirect URL origin", ) from origin_val_err if ( not is_same_host_https_redirect and url.origin() != redirect_origin ): auth = None headers.pop(hdrs.AUTHORIZATION, None) url = parsed_redirect_url params = {} resp.release() continue break if req._body is not None: await req._body.close() # check response status if raise_for_status is None: raise_for_status = self._raise_for_status if raise_for_status is None: pass elif callable(raise_for_status): await raise_for_status(resp) elif raise_for_status: resp.raise_for_status() # register connection if handle is not None: if resp.connection is not None: resp.connection.add_callback(handle.cancel) else: handle.cancel() resp._history = tuple(history) for trace in traces: await trace.send_request_end( method, url.update_query(params), headers, resp ) return resp except BaseException as e: # cleanup timer tm.close() if handle: handle.cancel() handle = None for trace in traces: await trace.send_request_exception( method, url.update_query(params), headers, e ) raise def ws_connect( self, url: StrOrURL, *, method: str = hdrs.METH_GET, protocols: Collection[str] = (), timeout: ClientWSTimeout | _SENTINEL = sentinel, receive_timeout: float | None = None, autoclose: bool = True, autoping: bool = True, heartbeat: float | None = None, auth: BasicAuth | None = None, origin: str | None = None, params: Query = None, headers: LooseHeaders | None = None, proxy: StrOrURL | None = None, proxy_auth: BasicAuth | None = None, ssl: SSLContext | bool | Fingerprint = True, server_hostname: str | None = None, proxy_headers: LooseHeaders | None = None, compress: int = 0, max_msg_size: int = 4 * 1024 * 1024, ) -> "_WSRequestContextManager": """Initiate websocket connection.""" return _WSRequestContextManager( self._ws_connect( url, method=method, protocols=protocols, timeout=timeout, receive_timeout=receive_timeout, autoclose=autoclose, autoping=autoping, heartbeat=heartbeat, auth=auth, origin=origin, params=params, headers=headers, proxy=proxy, proxy_auth=proxy_auth, ssl=ssl, server_hostname=server_hostname, proxy_headers=proxy_headers, compress=compress, max_msg_size=max_msg_size, ) ) async def _ws_connect( self, url: StrOrURL, *, method: str = hdrs.METH_GET, protocols: Collection[str] = (), timeout: ClientWSTimeout | _SENTINEL = sentinel, receive_timeout: float | None = None, autoclose: bool = True, autoping: bool = True, heartbeat: float | None = None, auth: BasicAuth | None = None, origin: str | None = None, params: Query = None, headers: LooseHeaders | None = None, proxy: StrOrURL | None = None, proxy_auth: BasicAuth | None = None, ssl: SSLContext | bool | Fingerprint = True, server_hostname: str | None = None, proxy_headers: LooseHeaders | None = None, compress: int = 0, max_msg_size: int = 4 * 1024 * 1024, ) -> ClientWebSocketResponse: if timeout is not sentinel: if isinstance(timeout, ClientWSTimeout): ws_timeout = timeout else: warnings.warn( # type: ignore[unreachable] "parameter 'timeout' of type 'float' " "is deprecated, please use " "'timeout=ClientWSTimeout(ws_close=...)'", DeprecationWarning, stacklevel=2, ) ws_timeout = ClientWSTimeout(ws_close=timeout) else: ws_timeout = DEFAULT_WS_CLIENT_TIMEOUT if receive_timeout is not None: warnings.warn( "float parameter 'receive_timeout' " "is deprecated, please use parameter " "'timeout=ClientWSTimeout(ws_receive=...)'", DeprecationWarning, stacklevel=2, ) ws_timeout = dataclasses.replace(ws_timeout, ws_receive=receive_timeout) if headers is None: real_headers: CIMultiDict[str] = CIMultiDict() else: real_headers = CIMultiDict(headers) default_headers = { hdrs.UPGRADE: "websocket", hdrs.CONNECTION: "Upgrade", hdrs.SEC_WEBSOCKET_VERSION: "13", } for key, value in default_headers.items(): real_headers.setdefault(key, value) sec_key = base64.b64encode(os.urandom(16)) real_headers[hdrs.SEC_WEBSOCKET_KEY] = sec_key.decode() if protocols: real_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = ",".join(protocols) if origin is not None: real_headers[hdrs.ORIGIN] = origin if compress: extstr = ws_ext_gen(compress=compress) real_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = extstr if not isinstance(ssl, SSL_ALLOWED_TYPES): raise TypeError( "ssl should be SSLContext, Fingerprint, or bool, " f"got {ssl!r} instead." ) # send request resp = await self.request( method, url, params=params, headers=real_headers, read_until_eof=False, auth=auth, proxy=proxy, proxy_auth=proxy_auth, ssl=ssl, server_hostname=server_hostname, proxy_headers=proxy_headers, ) try: # check handshake if resp.status != 101: raise WSServerHandshakeError( resp.request_info, resp.history, message="Invalid response status", status=resp.status, headers=resp.headers, ) if resp.headers.get(hdrs.UPGRADE, "").lower() != "websocket": raise WSServerHandshakeError( resp.request_info, resp.history, message="Invalid upgrade header", status=resp.status, headers=resp.headers, ) if resp.headers.get(hdrs.CONNECTION, "").lower() != "upgrade": raise WSServerHandshakeError( resp.request_info, resp.history, message="Invalid connection header", status=resp.status, headers=resp.headers, ) # key calculation r_key = resp.headers.get(hdrs.SEC_WEBSOCKET_ACCEPT, "") match = base64.b64encode(hashlib.sha1(sec_key + WS_KEY).digest()).decode() if r_key != match: raise WSServerHandshakeError( resp.request_info, resp.history, message="Invalid challenge response", status=resp.status, headers=resp.headers, ) # websocket protocol protocol = None if protocols and hdrs.SEC_WEBSOCKET_PROTOCOL in resp.headers: resp_protocols = [ proto.strip() for proto in resp.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",") ] for proto in resp_protocols: if proto in protocols: protocol = proto break # websocket compress notakeover = False if compress: compress_hdrs = resp.headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS) if compress_hdrs: try: compress, notakeover = ws_ext_parse(compress_hdrs) except WSHandshakeError as exc: raise WSServerHandshakeError( resp.request_info, resp.history, message=exc.args[0], status=resp.status, headers=resp.headers, ) from exc else: compress = 0 notakeover = False conn = resp.connection assert conn is not None conn_proto = conn.protocol assert conn_proto is not None # For WS connection the read_timeout must be either ws_timeout.ws_receive or greater # None == no timeout, i.e. infinite timeout, so None is the max timeout possible if ws_timeout.ws_receive is None: # Reset regardless conn_proto.read_timeout = None elif conn_proto.read_timeout is not None: conn_proto.read_timeout = max( ws_timeout.ws_receive, conn_proto.read_timeout ) transport = conn.transport assert transport is not None reader = WebSocketDataQueue(conn_proto, 2**16, loop=self._loop) conn_proto.set_parser(WebSocketReader(reader, max_msg_size), reader) writer = WebSocketWriter( conn_proto, transport, use_mask=True, compress=compress, notakeover=notakeover, ) except BaseException: resp.close() raise else: return self._ws_response_class( reader, writer, protocol, resp, ws_timeout, autoclose, autoping, self._loop, heartbeat=heartbeat, compress=compress, client_notakeover=notakeover, ) def _prepare_headers(self, headers: LooseHeaders | None) -> "CIMultiDict[str]": """Add default headers and transform it to CIMultiDict""" # Convert headers to MultiDict result = CIMultiDict(self._default_headers) if headers: if not isinstance(headers, (MultiDictProxy, MultiDict)): headers = CIMultiDict(headers) added_names: set[str] = set() for key, value in headers.items(): if key in added_names: result.add(key, value) else: result[key] = value added_names.add(key) return result def _get_netrc_auth(self, host: str) -> BasicAuth | None: """ Get auth from netrc for the given host. This method is designed to be called in an executor to avoid blocking I/O in the event loop. """ netrc_obj = netrc_from_env() try: return basicauth_from_netrc(netrc_obj, host) except LookupError: return None if sys.version_info >= (3, 11) and TYPE_CHECKING: def get( self, url: StrOrURL, **kwargs: Unpack[_RequestOptions], ) -> "_RequestContextManager": ... def options( self, url: StrOrURL, **kwargs: Unpack[_RequestOptions], ) -> "_RequestContextManager": ... def head( self, url: StrOrURL, **kwargs: Unpack[_RequestOptions], ) -> "_RequestContextManager": ... def post( self, url: StrOrURL, **kwargs: Unpack[_RequestOptions], ) -> "_RequestContextManager": ... def put( self, url: StrOrURL, **kwargs: Unpack[_RequestOptions], ) -> "_RequestContextManager": ... def patch( self, url: StrOrURL, **kwargs: Unpack[_RequestOptions], ) -> "_RequestContextManager": ... def delete( self, url: StrOrURL, **kwargs: Unpack[_RequestOptions], ) -> "_RequestContextManager": ... else: def get( self, url: StrOrURL, *, allow_redirects: bool = True, **kwargs: Any ) -> "_RequestContextManager": """Perform HTTP GET request.""" return _RequestContextManager( self._request( hdrs.METH_GET, url, allow_redirects=allow_redirects, **kwargs ) ) def options( self, url: StrOrURL, *, allow_redirects: bool = True, **kwargs: Any ) -> "_RequestContextManager": """Perform HTTP OPTIONS request.""" return _RequestContextManager( self._request( hdrs.METH_OPTIONS, url, allow_redirects=allow_redirects, **kwargs ) ) def head( self, url: StrOrURL, *, allow_redirects: bool = False, **kwargs: Any ) -> "_RequestContextManager": """Perform HTTP HEAD request.""" return _RequestContextManager( self._request( hdrs.METH_HEAD, url, allow_redirects=allow_redirects, **kwargs ) ) def post( self, url: StrOrURL, *, data: Any = None, **kwargs: Any ) -> "_RequestContextManager": """Perform HTTP POST request.""" return _RequestContextManager( self._request(hdrs.METH_POST, url, data=data, **kwargs) ) def put( self, url: StrOrURL, *, data: Any = None, **kwargs: Any ) -> "_RequestContextManager": """Perform HTTP PUT request.""" return _RequestContextManager( self._request(hdrs.METH_PUT, url, data=data, **kwargs) ) def patch( self, url: StrOrURL, *, data: Any = None, **kwargs: Any ) -> "_RequestContextManager": """Perform HTTP PATCH request.""" return _RequestContextManager( self._request(hdrs.METH_PATCH, url, data=data, **kwargs) ) def delete(self, url: StrOrURL, **kwargs: Any) -> "_RequestContextManager": """Perform HTTP DELETE request.""" return _RequestContextManager( self._request(hdrs.METH_DELETE, url, **kwargs) ) async def close(self) -> None: """Close underlying connector. Release all acquired resources. """ if not self.closed: if self._connector is not None and self._connector_owner: await self._connector.close() self._connector = None @property def closed(self) -> bool: """Is client session closed. A readonly property. """ return self._connector is None or self._connector.closed @property def connector(self) -> BaseConnector | None: """Connector instance used for the session.""" return self._connector @property def cookie_jar(self) -> AbstractCookieJar: """The session cookies.""" return self._cookie_jar @property def version(self) -> tuple[int, int]: """The session HTTP protocol version.""" return self._version @property def requote_redirect_url(self) -> bool: """Do URL requoting on redirection handling.""" return self._requote_redirect_url @property def timeout(self) -> ClientTimeout: """Timeout for the session.""" return self._timeout @property def headers(self) -> "CIMultiDict[str]": """The default headers of the client session.""" return self._default_headers @property def skip_auto_headers(self) -> frozenset[istr]: """Headers for which autogeneration should be skipped""" return self._skip_auto_headers @property def auth(self) -> BasicAuth | None: """An object that represents HTTP Basic Authorization""" return self._default_auth @property def json_serialize(self) -> JSONEncoder: """Json serializer callable""" return self._json_serialize @property def connector_owner(self) -> bool: """Should connector be closed on session closing""" return self._connector_owner @property def raise_for_status( self, ) -> bool | Callable[[ClientResponse], Awaitable[None]]: """Should `ClientResponse.raise_for_status()` be called for each response.""" return self._raise_for_status @property def auto_decompress(self) -> bool: """Should the body response be automatically decompressed.""" return self._auto_decompress @property def trust_env(self) -> bool: """ Should proxies information from environment or netrc be trusted. Information is from HTTP_PROXY / HTTPS_PROXY environment variables or ~/.netrc file if present. """ return self._trust_env @property def trace_configs(self) -> list[TraceConfig[Any]]: """A list of TraceConfig instances used for client tracing""" return self._trace_configs def detach(self) -> None: """Detach connector from session without closing the former. Session is switched to closed state anyway. """ self._connector = None async def __aenter__(self) -> "ClientSession": return self async def __aexit__( self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None, ) -> None: await self.close()
ClientSession
python
gevent__gevent
src/gevent/_imap.py
{ "start": 556, "end": 892 }
class ____(object): __slots__ = ('exc', 'raise_exception') def __init__(self, exc, raise_exception=None): self.exc = exc self.raise_exception = raise_exception def _raise_exc(failure): # For cython. if failure.raise_exception: failure.raise_exception() else: raise failure.exc
Failure
python
scipy__scipy
scipy/linalg/_matfuncs_inv_ssq.py
{ "start": 507, "end": 569 }
class ____(LogmRankWarning): pass
LogmExactlySingularWarning
python
huggingface__transformers
src/transformers/models/markuplm/modeling_markuplm.py
{ "start": 18560, "end": 20179 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([MarkupLMLayer(config) for i in range(config.num_hidden_layers)]) self.gradient_checkpointing = False @can_return_tuple def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, **kwargs, ) -> Union[tuple[torch.Tensor], BaseModelOutput]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states=hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, **kwargs, ) hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) @auto_docstring
MarkupLMEncoder
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/protocol33.py
{ "start": 565, "end": 708 }
class ____(Generic[T, U]): def f(self) -> T | U: raise NotImplementedError def g(self) -> BProto[T, U]: return B[T, U]()
B
python
aimacode__aima-python
gui/grid_mdp.py
{ "start": 16421, "end": 19454 }
class ____(tk.Frame): def __init__(self, parent, controller): """HomePage constructor""" tk.Frame.__init__(self, parent) self.controller = controller frame1 = tk.Frame(self) frame1.pack(side=tk.TOP) frame3 = tk.Frame(self) frame3.pack(side=tk.TOP) frame4 = tk.Frame(self) frame4.pack(side=tk.TOP) frame2 = tk.Frame(self) frame2.pack(side=tk.TOP) s = ttk.Style() s.theme_use('clam') s.configure('TButton', background=grayd, padding=0) s.configure('wall.TButton', background=gray2, foreground=white) s.configure('reward.TButton', background=gray9) s.configure('+term.TButton', background=green8) s.configure('-term.TButton', background=pblue, foreground=white) s.configure('=term.TButton', background=green4) label = ttk.Label(frame1, text='GridMDP builder', font=('Helvetica', 18, 'bold'), background=grayef) label.pack(pady=75, padx=50, side=tk.TOP) ec_btn = ttk.Button(frame3, text='Empty cells', width=20) ec_btn.pack(pady=0, padx=0, side=tk.LEFT, ipady=10) ec_btn.configure(style='TButton') w_btn = ttk.Button(frame3, text='Walls', width=20) w_btn.pack(pady=0, padx=0, side=tk.LEFT, ipady=10) w_btn.configure(style='wall.TButton') r_btn = ttk.Button(frame3, text='Rewards', width=20) r_btn.pack(pady=0, padx=0, side=tk.LEFT, ipady=10) r_btn.configure(style='reward.TButton') term_p = ttk.Button(frame3, text='Positive terminals', width=20) term_p.pack(pady=0, padx=0, side=tk.LEFT, ipady=10) term_p.configure(style='+term.TButton') term_z = ttk.Button(frame3, text='Neutral terminals', width=20) term_z.pack(pady=0, padx=0, side=tk.LEFT, ipady=10) term_z.configure(style='=term.TButton') term_n = ttk.Button(frame3, text='Negative terminals', width=20) term_n.pack(pady=0, padx=0, side=tk.LEFT, ipady=10) term_n.configure(style='-term.TButton') label = ttk.Label(frame4, text='Dimensions', font=('Verdana', 14), background=grayef) label.pack(pady=15, padx=10, side=tk.TOP) entry_h = tk.Entry(frame2, textvariable=self.controller.shared_data['height'], font=('Verdana', 10), width=3, justify=tk.CENTER) entry_h.pack(pady=10, padx=10, side=tk.LEFT) label_x = ttk.Label(frame2, text='X', font=('Verdana', 10), background=grayef) label_x.pack(pady=10, padx=4, side=tk.LEFT) entry_w = tk.Entry(frame2, textvariable=self.controller.shared_data['width'], font=('Verdana', 10), width=3, justify=tk.CENTER) entry_w.pack(pady=10, padx=10, side=tk.LEFT) button = ttk.Button(self, text='Build a GridMDP', command=lambda: controller.show_frame(BuildMDP, cb=True)) button.pack(pady=10, padx=10, side=tk.TOP, ipadx=20, ipady=10) button.configure(style='reward.TButton')
HomePage
python
django__django
tests/model_options/models/default_related_name.py
{ "start": 301, "end": 559 }
class ____(models.Model): title = models.CharField(max_length=128) authors = models.ManyToManyField(Author) editor = models.ForeignKey(Editor, models.CASCADE, related_name="edited_books") class Meta: default_related_name = "books"
Book
python
kamyu104__LeetCode-Solutions
Python/check-if-numbers-are-ascending-in-a-sentence.py
{ "start": 563, "end": 819 }
class ____(object): def areNumbersAscending(self, s): """ :type s: str :rtype: bool """ nums = [int(x) for x in s.split() if x.isdigit()] return all(nums[i] < nums[i+1] for i in xrange(len(nums)-1))
Solution2
python
spack__spack
lib/spack/spack/test/error_messages.py
{ "start": 2565, "end": 2750 }
class ____(Package): version("2.1") version("2.0") variant("v1", default=True) requires("~v1", when="@2.1") depends_on("w1") """, ) _pkgw2 = ( "w2", """\
W3
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-vectara/destination_vectara/config.py
{ "start": 705, "end": 4218 }
class ____(BaseModel): oauth2: OAuth2 customer_id: str = Field( ..., title="Customer ID", description="Your customer id as it is in the authenticaion url", order=2, group="account" ) corpus_name: str = Field(..., title="Corpus Name", description="The Name of Corpus to load data into", order=3, group="account") parallelize: Optional[bool] = Field( default=False, title="Parallelize", description="Parallelize indexing into Vectara with multiple threads", always_show=True, group="account", ) text_fields: Optional[List[str]] = Field( default=[], title="Text fields to index with Vectara", description="List of fields in the record that should be in the section of the document. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. `user.name` will access the `name` field in the `user` object. It's also possible to use wildcards to access all fields in an object, e.g. `users.*.name` will access all `names` fields in all entries of the `users` array.", always_show=True, examples=["text", "user.name", "users.*.name"], ) title_field: Optional[str] = Field( default="", title="Text field to use as document title with Vectara", description="A field that will be used to populate the `title` of each document. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered text fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. `user.name` will access the `name` field in the `user` object. It's also possible to use wildcards to access all fields in an object, e.g. `users.*.name` will access all `names` fields in all entries of the `users` array.", always_show=True, examples=["document_key"], ) metadata_fields: Optional[List[str]] = Field( default=[], title="Fields to store as metadata", description="List of fields in the record that should be stored as metadata. The field list is applied to all streams in the same way and non-existing fields are ignored. If none are defined, all fields are considered metadata fields. When specifying text fields, you can access nested fields in the record by using dot notation, e.g. `user.name` will access the `name` field in the `user` object. It's also possible to use wildcards to access all fields in an object, e.g. `users.*.name` will access all `names` fields in all entries of the `users` array. When specifying nested paths, all matching values are flattened into an array set to a field named by the path.", always_show=True, examples=["age", "user"], ) class Config: title = "Vectara Config" schema_extra = { "description": "Configuration to connect to the Vectara instance", "groups": [ {"id": "account", "title": "Account"}, {"id": "auth", "title": "Authentication"}, ], } @classmethod def schema(cls): """we're overriding the schema classmethod to enable some post-processing""" schema = super().schema() schema = resolve_refs(schema) return schema
VectaraConfig
python
streamlit__streamlit
lib/tests/streamlit/web/server/bidi_component_request_handler_test.py
{ "start": 1088, "end": 6751 }
class ____(tornado.testing.AsyncHTTPTestCase): def setUp(self) -> None: self.component_manager = BidiComponentManager() self.temp_dir = tempfile.TemporaryDirectory() super().setUp() # Create a fake package root with a component asset_dir self.package_root = Path(self.temp_dir.name) / "pkgroot" self.package_root.mkdir(parents=True, exist_ok=True) self.component_assets_dir = self.package_root / "test_component" self.component_assets_dir.mkdir(parents=True, exist_ok=True) # Create assets (self.component_assets_dir / "index.js").write_text( "console.log('test component');" ) (self.component_assets_dir / "index.html").write_text( "<div>Test Component</div>" ) (self.component_assets_dir / "styles.css").write_text("div { color: red; }") # Create a subdirectory to validate directory path handling (self.component_assets_dir / "subdir").mkdir(parents=True, exist_ok=True) # Register from a manifest that declares the asset_dir manifest = ComponentManifest( name="pkg", version="0.0.1", components=[ ComponentConfig(name="test_component", asset_dir="test_component") ], ) self.component_manager.register_from_manifest(manifest, self.package_root) def tearDown(self) -> None: super().tearDown() self.temp_dir.cleanup() def get_app(self) -> tornado.web.Application: return tornado.web.Application( [ ( r"/_stcore/bidi-components/(.*)", BidiComponentRequestHandler, {"component_manager": self.component_manager}, ) ] ) def test_get_component_file(self) -> None: # JS should be accessible response = self.fetch("/_stcore/bidi-components/pkg.test_component/index.js") assert response.code == 200 assert response.body.decode() == "console.log('test component');" # HTML files should be accessible response = self.fetch("/_stcore/bidi-components/pkg.test_component/index.html") assert response.code == 200 assert response.body.decode() == "<div>Test Component</div>" # CSS should be accessible response = self.fetch("/_stcore/bidi-components/pkg.test_component/styles.css") assert response.code == 200 assert response.body.decode() == "div { color: red; }" def test_component_not_found(self) -> None: response = self.fetch("/_stcore/bidi-components/nonexistent_component/index.js") assert response.code == 404 response = self.fetch( "/_stcore/bidi-components/pkg.nonexistent_component/index.js" ) assert response.code == 404 def test_disallow_path_traversal(self) -> None: # Attempt path traversal attack response = self.fetch( "/_stcore/bidi-components/pkg.test_component/../../../etc/passwd" ) assert response.code == 403 def test_file_not_found_in_component_dir(self) -> None: response = self.fetch( "/_stcore/bidi-components/pkg.test_component/nonexistent.js" ) assert response.code == 404 def test_get_url(self) -> None: url = BidiComponentRequestHandler.get_url("pkg.test_component/index.js") assert url == "_stcore/bidi-components/pkg.test_component/index.js" def test_missing_file_segment_returns_404_not_found(self) -> None: """Requesting component without a file should return 404 not found.""" response = self.fetch("/_stcore/bidi-components/pkg.test_component") assert response.code == 404 assert response.body == b"not found" def test_trailing_slash_returns_404_not_found(self) -> None: """Requesting component with trailing slash should return 404 not found.""" response = self.fetch("/_stcore/bidi-components/pkg.test_component/") assert response.code == 404 assert response.body == b"not found" def test_directory_path_returns_404_not_found(self) -> None: """Requesting a directory within component should return 404 not found.""" response = self.fetch("/_stcore/bidi-components/pkg.test_component/subdir/") assert response.code == 404 assert response.body == b"not found" def test_cors_all_origins_star(self) -> None: """When CORS allows all, Access-Control-Allow-Origin should be '*'.""" with mock.patch( "streamlit.web.server.routes.allow_all_cross_origin_requests", mock.MagicMock(return_value=True), ): response = self.fetch( "/_stcore/bidi-components/pkg.test_component/index.js" ) assert response.code == 200 assert response.headers["Access-Control-Allow-Origin"] == "*" @mock.patch( "streamlit.web.server.routes.allow_all_cross_origin_requests", mock.MagicMock(return_value=False), ) @patch_config_options({"server.corsAllowedOrigins": ["http://example.com"]}) def test_cors_allowlisted_origin_echo(self) -> None: """When origin is allowlisted, it should be echoed in the header.""" response = self.fetch( "/_stcore/bidi-components/pkg.test_component/index.js", headers={"Origin": "http://example.com"}, ) assert response.code == 200 assert response.headers["Access-Control-Allow-Origin"] == "http://example.com"
BidiComponentRequestHandlerTest
python
numba__numba
numba/core/errors.py
{ "start": 2221, "end": 2434 }
class ____(NumbaWarning): """ Warning category for reporting pedantic messages. """ def __init__(self, msg, **kwargs): super().__init__(f"{msg}\n{pedantic_warning_info}")
NumbaPedanticWarning
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/operators/test_cloud_formation.py
{ "start": 4500, "end": 6348 }
class ____: def test_init(self): op = CloudFormationDeleteStackOperator( task_id="cf_delete_stack_init", stack_name="fake-stack", # Generic hooks parameters aws_conn_id="fake-conn-id", region_name="us-east-1", verify=False, botocore_config={"read_timeout": 42}, ) assert op.hook.client_type == "cloudformation" assert op.hook.resource_type is None assert op.hook.aws_conn_id == "fake-conn-id" assert op.hook._region_name == "us-east-1" assert op.hook._verify is False assert op.hook._config is not None assert op.hook._config.read_timeout == 42 op = CloudFormationDeleteStackOperator(task_id="cf_delete_stack_init", stack_name="fake-stack") assert op.hook.aws_conn_id == "aws_default" assert op.hook._region_name is None assert op.hook._verify is None assert op.hook._config is None def test_delete_stack(self, mocked_hook_client): stack_name = "myStackToBeDeleted" operator = CloudFormationDeleteStackOperator( task_id="test_task", stack_name=stack_name, dag=DAG("test_dag_id", schedule=None, default_args=DEFAULT_ARGS), ) operator.execute(MagicMock()) mocked_hook_client.delete_stack.assert_any_call(StackName=stack_name) def test_template_fields(self): op = CloudFormationDeleteStackOperator( task_id="cf_delete_stack_init", stack_name="fake-stack", # Generic hooks parameters aws_conn_id="fake-conn-id", region_name="us-east-1", verify=False, botocore_config={"read_timeout": 42}, ) validate_template_fields(op)
TestCloudFormationDeleteStackOperator
python
pyinstaller__pyinstaller
bootloader/waflib/Utils.py
{ "start": 2352, "end": 3629 }
class ____(object): __slots__ = ('maxlen', 'table', 'head') def __init__(self, maxlen=100): self.maxlen = maxlen self.table = {} self.head = lru_node() self.head.next = self.head self.head.prev = self.head def __getitem__(self, key): node = self.table[key] if node is self.head: return node.val node.prev.next = node.next node.next.prev = node.prev node.next = self.head.next node.prev = self.head self.head = node.next.prev = node.prev.next = node return node.val def __setitem__(self, key, val): if key in self.table: node = self.table[key] node.val = val self.__getitem__(key) else: if len(self.table) < self.maxlen: node = lru_node() node.prev = self.head node.next = self.head.next node.prev.next = node.next.prev = node else: node = self.head = self.head.next try: del self.table[node.key] except KeyError: pass node.key = key node.val = val self.table[key] = node
lru_cache
python
pandas-dev__pandas
pandas/tests/series/indexing/test_getitem.py
{ "start": 13926, "end": 17608 }
class ____: def test_getitem_boolean(self, string_series): ser = string_series mask = ser > ser.median() # passing list is OK result = ser[list(mask)] expected = ser[mask] tm.assert_series_equal(result, expected) tm.assert_index_equal(result.index, ser.index[mask]) def test_getitem_boolean_empty(self): ser = Series([], dtype=np.int64) ser.index.name = "index_name" ser = ser[ser.isna()] assert ser.index.name == "index_name" assert ser.dtype == np.int64 # GH#5877 # indexing with empty series ser = Series(["A", "B"], dtype=object) expected = Series(dtype=object, index=Index([], dtype="int64")) result = ser[Series([], dtype=object)] tm.assert_series_equal(result, expected) # invalid because of the boolean indexer # that's empty or not-aligned msg = ( r"Unalignable boolean Series provided as indexer \(index of " r"the boolean Series and of the indexed object do not match" ) with pytest.raises(IndexingError, match=msg): ser[Series([], dtype=bool)] with pytest.raises(IndexingError, match=msg): ser[Series([True], dtype=bool)] def test_getitem_boolean_object(self, string_series): # using column from DataFrame ser = string_series mask = ser > ser.median() omask = mask.astype(object) # getitem result = ser[omask] expected = ser[mask] tm.assert_series_equal(result, expected) # setitem s2 = ser.copy() cop = ser.copy() cop[omask] = 5 s2[mask] = 5 tm.assert_series_equal(cop, s2) # nans raise exception omask[5:10] = np.nan msg = "Cannot mask with non-boolean array containing NA / NaN values" with pytest.raises(ValueError, match=msg): ser[omask] with pytest.raises(ValueError, match=msg): ser[omask] = 5 def test_getitem_boolean_dt64_copies(self): # GH#36210 dti = date_range("2016-01-01", periods=4, tz="US/Pacific") key = np.array([True, True, False, False]) ser = Series(dti._data) res = ser[key] assert res._values._ndarray.base is None # compare with numeric case for reference ser2 = Series(range(4)) res2 = ser2[key] assert res2._values.base is None def test_getitem_boolean_corner(self, datetime_series): ts = datetime_series mask_shifted = ts.shift(1, freq=BDay()) > ts.median() msg = ( r"Unalignable boolean Series provided as indexer \(index of " r"the boolean Series and of the indexed object do not match" ) with pytest.raises(IndexingError, match=msg): ts[mask_shifted] with pytest.raises(IndexingError, match=msg): ts.loc[mask_shifted] def test_getitem_boolean_different_order(self, string_series): ordered = string_series.sort_values() sel = string_series[ordered > 0] exp = string_series[string_series > 0] tm.assert_series_equal(sel, exp) def test_getitem_boolean_contiguous_preserve_freq(self): rng = date_range("1/1/2000", "3/1/2000", freq="B") mask = np.zeros(len(rng), dtype=bool) mask[10:20] = True masked = rng[mask] expected = rng[10:20] assert expected.freq == rng.freq tm.assert_index_equal(masked, expected) mask[22] = True masked = rng[mask] assert masked.freq is None
TestGetitemBooleanMask
python
run-llama__llama_index
llama-index-core/tests/memory/test_memory_schema.py
{ "start": 248, "end": 3411 }
class ____: """Test schema functionality in Memory class.""" def test_from_defaults_schema_parameter(self): """Test Memory.from_defaults with and without schema parameter.""" # Without schema memory_no_schema = Memory.from_defaults( token_limit=1000, table_name="test_memory", ) assert memory_no_schema.sql_store.db_schema is None assert memory_no_schema.sql_store.table_name == "test_memory" # With schema memory_with_schema = Memory.from_defaults( token_limit=1000, table_name="test_memory", db_schema="test_schema", ) assert memory_with_schema.sql_store.db_schema == "test_schema" assert memory_with_schema.sql_store.table_name == "test_memory" def test_schema_parameter_passing(self): """Test that schema parameter is correctly passed to SQLAlchemyChatStore.""" memory = Memory.from_defaults( table_name="param_test", async_database_uri="postgresql+asyncpg://user:pass@host/db", db_schema="param_schema", ) # Verify the SQL store is correctly configured assert isinstance(memory.sql_store, SQLAlchemyChatStore) assert memory.sql_store.db_schema == "param_schema" assert memory.sql_store.table_name == "param_test" assert ( memory.sql_store.async_database_uri == "postgresql+asyncpg://user:pass@host/db" ) @pytest.mark.asyncio async def test_memory_operations_with_schema(self): """Test that Memory operations work with schema.""" memory = Memory.from_defaults( token_limit=1000, table_name="integration_test", db_schema="integration_schema", ) # Add a message message = ChatMessage(role="user", content="Hello from Memory with schema!") await memory.aput(message) # Retrieve messages messages = await memory.aget() assert len(messages) >= 1 # Find our message (there might be system messages) user_messages = [m for m in messages if m.role == "user"] assert len(user_messages) == 1 assert user_messages[0].content == "Hello from Memory with schema!" # Verify schema is preserved assert memory.sql_store.db_schema == "integration_schema" @pytest.mark.asyncio async def test_memory_reset_preserves_schema(self): """Test that memory reset preserves schema configuration.""" memory = Memory.from_defaults( token_limit=1000, table_name="reset_test", db_schema="reset_schema", ) # Add a message await memory.aput(ChatMessage(role="user", content="Before reset")) # Reset memory await memory.areset() # Verify schema is still set assert memory.sql_store.db_schema == "reset_schema" # Verify messages are cleared messages = await memory.aget_all() user_messages = [m for m in messages if m.role == "user"] assert len(user_messages) == 0
TestMemorySchema
python
falconry__falcon
tests/test_after_hooks.py
{ "start": 4900, "end": 9189 }
class ____: # Test that the decorator skips non-callables on_delete = False hook_as_class = ResourceAwareFluffiness() def __init__(self): # Test that the decorator skips non-callables self.on_patch = [] @falcon.after(fluffiness) def on_get(self, req, resp): self._capture(req, resp) @falcon.after(fluffiness) def on_head(self, req, resp): self._capture(req, resp) @falcon.after(hook_as_class) def on_put(self, req, resp): self._capture(req, resp) @falcon.after(hook_as_class.__call__) def on_post(self, req, resp): self._capture(req, resp) def _capture(self, req, resp): self.req = req self.resp = resp # -------------------------------------------------------------------- # Tests # -------------------------------------------------------------------- def test_output_validator(client): result = client.simulate_get() assert result.status_code == 723 assert result.text == json.dumps({'title': 'Tricky'}) def test_serializer(client): result = client.simulate_put() assert result.text == json.dumps({'animal': 'falcon'}) def test_hook_as_callable_class(client): result = client.simulate_post() assert 'smart' == result.text @pytest.mark.parametrize( 'resource', [ClassResourceWithURIFields(), ClassResourceWithURIFieldsChild()] ) def test_resource_with_uri_fields(client, resource): client.app.add_route('/{field1}/{field2}', resource) result = client.simulate_get('/82074/58927') assert result.status_code == 200 assert result.headers['X-Fluffiness'] == 'fluffy' assert 'X-Cuteness' not in result.headers assert resource.fields == ('82074', '58927') def test_resource_with_uri_fields_async(util): app = util.create_app(asgi=True) resource = ClassResourceWithURIFieldsAsync() app.add_route('/{field1}/{field2}', resource) result = testing.simulate_get(app, '/a/b') assert result.status_code == 200 assert result.headers['X-Fluffiness'] == 'fluffy' assert resource.fields == ('a', 'b') async def test_direct(): resource = ClassResourceWithURIFieldsAsync() req = testing.create_asgi_req() resp = util.create_resp(True) await resource.on_get(req, resp, '1', '2') assert resource.fields == ('1', '2') falcon.async_to_sync(test_direct) @pytest.mark.parametrize( 'resource', [WrappedClassResource(), WrappedClassResourceChild()] ) def test_wrapped_resource(client, resource): client.app.add_route('/wrapped', resource) result = client.simulate_get('/wrapped') assert result.status_code == 200 assert result.text == 'fluffy and innocent' assert result.headers['X-Animal'] == 'kitten' result = client.simulate_head('/wrapped') assert result.status_code == 200 assert result.headers['X-Fluffiness'] == 'fluffy' assert result.headers['X-Cuteness'] == 'cute' assert result.headers['X-Animal'] == 'kitten' result = client.simulate_post('/wrapped') assert result.status_code == 405 result = client.simulate_patch('/wrapped') assert result.status_code == 405 # Decorator should not affect the default on_options responder result = client.simulate_options('/wrapped') assert result.status_code == 200 assert not result.text assert 'X-Animal' not in result.headers def test_wrapped_resource_with_hooks_aware_of_resource(client, wrapped_resource_aware): client.app.add_route('/wrapped_aware', wrapped_resource_aware) expected = 'fluffy and cute' result = client.simulate_get('/wrapped_aware') assert result.status_code == 200 assert expected == result.text for test in ( client.simulate_head, client.simulate_put, client.simulate_post, ): result = test(path='/wrapped_aware') assert result.status_code == 200 assert wrapped_resource_aware.resp.text == expected result = client.simulate_patch('/wrapped_aware') assert result.status_code == 405 # Decorator should not affect the default on_options responder result = client.simulate_options('/wrapped_aware') assert result.status_code == 200 assert not result.text
ClassResourceWithAwareHooks
python
django__django
tests/forms_tests/field_tests/test_nullbooleanfield.py
{ "start": 155, "end": 3617 }
class ____(FormFieldAssertionsMixin, SimpleTestCase): def test_nullbooleanfield_clean(self): f = NullBooleanField() self.assertIsNone(f.clean("")) self.assertTrue(f.clean(True)) self.assertFalse(f.clean(False)) self.assertIsNone(f.clean(None)) self.assertFalse(f.clean("0")) self.assertTrue(f.clean("1")) self.assertIsNone(f.clean("2")) self.assertIsNone(f.clean("3")) self.assertIsNone(f.clean("hello")) self.assertTrue(f.clean("true")) self.assertFalse(f.clean("false")) def test_nullbooleanfield_2(self): # The internal value is preserved if using HiddenInput (#7753). class HiddenNullBooleanForm(Form): hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True) hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False) f = HiddenNullBooleanForm() self.assertHTMLEqual( str(f), '<input type="hidden" name="hidden_nullbool1" value="True" ' 'id="id_hidden_nullbool1">' '<input type="hidden" name="hidden_nullbool2" value="False" ' 'id="id_hidden_nullbool2">', ) def test_nullbooleanfield_3(self): class HiddenNullBooleanForm(Form): hidden_nullbool1 = NullBooleanField(widget=HiddenInput, initial=True) hidden_nullbool2 = NullBooleanField(widget=HiddenInput, initial=False) f = HiddenNullBooleanForm( {"hidden_nullbool1": "True", "hidden_nullbool2": "False"} ) self.assertIsNone(f.full_clean()) self.assertTrue(f.cleaned_data["hidden_nullbool1"]) self.assertFalse(f.cleaned_data["hidden_nullbool2"]) def test_nullbooleanfield_4(self): # Make sure we're compatible with MySQL, which uses 0 and 1 for its # boolean values (#9609). NULLBOOL_CHOICES = (("1", "Yes"), ("0", "No"), ("", "Unknown")) class MySQLNullBooleanForm(Form): nullbool0 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES)) nullbool1 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES)) nullbool2 = NullBooleanField(widget=RadioSelect(choices=NULLBOOL_CHOICES)) f = MySQLNullBooleanForm({"nullbool0": "1", "nullbool1": "0", "nullbool2": ""}) self.assertIsNone(f.full_clean()) self.assertTrue(f.cleaned_data["nullbool0"]) self.assertFalse(f.cleaned_data["nullbool1"]) self.assertIsNone(f.cleaned_data["nullbool2"]) def test_nullbooleanfield_changed(self): f = NullBooleanField() self.assertTrue(f.has_changed(False, None)) self.assertTrue(f.has_changed(None, False)) self.assertFalse(f.has_changed(None, None)) self.assertFalse(f.has_changed(False, False)) self.assertTrue(f.has_changed(True, False)) self.assertTrue(f.has_changed(True, None)) self.assertTrue(f.has_changed(True, False)) # HiddenInput widget sends string values for boolean but doesn't clean # them in value_from_datadict. self.assertFalse(f.has_changed(False, "False")) self.assertFalse(f.has_changed(True, "True")) self.assertFalse(f.has_changed(None, "")) self.assertTrue(f.has_changed(False, "True")) self.assertTrue(f.has_changed(True, "False")) self.assertTrue(f.has_changed(None, "False"))
NullBooleanFieldTest
python
doocs__leetcode
solution/1600-1699/1686.Stone Game VI/Solution.py
{ "start": 0, "end": 415 }
class ____: def stoneGameVI(self, aliceValues: List[int], bobValues: List[int]) -> int: vals = [(a + b, i) for i, (a, b) in enumerate(zip(aliceValues, bobValues))] vals.sort(reverse=True) a = sum(aliceValues[i] for _, i in vals[::2]) b = sum(bobValues[i] for _, i in vals[1::2]) if a > b: return 1 if a < b: return -1 return 0
Solution
python
sympy__sympy
doc/ext/numpydoc.py
{ "start": 4554, "end": 4946 }
class ____: directive_mangling_map = {} def __init__(self, *a, **kw): super().__init__(*a, **kw) self.wrap_mangling_directives() def wrap_mangling_directives(self): for name, objtype in list(self.directive_mangling_map.items()): self.directives[name] = wrap_mangling_directive( self.directives[name], objtype)
ManglingDomainBase
python
getsentry__sentry
src/sentry/api/serializers/models/release.py
{ "start": 27757, "end": 29046 }
class ____(Serializer): """ The minimal representation of a release necessary for group events """ def get_attrs(self, item_list, user, **kwargs): last_commit_metadata_attrs = _get_last_commit_metadata(item_list, user) deploy_metadata_attrs = _get_last_deploy_metadata(item_list, user) result = {} for item in item_list: p = {} p.update(last_commit_metadata_attrs[item]) p.update(deploy_metadata_attrs[item]) result[item] = p return result def serialize(self, obj, attrs, user, **kwargs) -> GroupEventReleaseSerializerResponse: return { "id": obj.id, "commitCount": obj.commit_count, "data": obj.data, "dateCreated": obj.date_added, "dateReleased": obj.date_released, "deployCount": obj.total_deploys, "ref": obj.ref, "lastCommit": attrs.get("last_commit"), "lastDeploy": attrs.get("last_deploy"), "status": ReleaseStatus.to_string(obj.status), "url": obj.url, "userAgent": obj.user_agent, "version": obj.version, "versionInfo": expose_version_info(obj.version_info), }
GroupEventReleaseSerializer
python
tensorflow__tensorflow
tensorflow/python/keras/metrics.py
{ "start": 3512, "end": 13444 }
class ____(base_layer.Layer, metaclass=abc.ABCMeta): """Encapsulates metric logic and state. Args: name: (Optional) string name of the metric instance. dtype: (Optional) data type of the metric result. **kwargs: Additional layer keywords arguments. Standalone usage: ```python m = SomeMetric(...) for input in ...: m.update_state(input) print('Final result: ', m.result().numpy()) ``` Usage with `compile()` API: ```python model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(64, activation='relu')) model.add(tf.keras.layers.Dense(64, activation='relu')) model.add(tf.keras.layers.Dense(10, activation='softmax')) model.compile(optimizer=tf.keras.optimizers.RMSprop(0.01), loss=tf.keras.losses.CategoricalCrossentropy(), metrics=[tf.keras.metrics.CategoricalAccuracy()]) data = np.random.random((1000, 32)) labels = np.random.random((1000, 10)) dataset = tf.data.Dataset.from_tensor_slices((data, labels)) dataset = dataset.batch(32) model.fit(dataset, epochs=10) ``` To be implemented by subclasses: * `__init__()`: All state variables should be created in this method by calling `self.add_weight()` like: `self.var = self.add_weight(...)` * `update_state()`: Has all updates to the state variables like: self.var.assign_add(...). * `result()`: Computes and returns a value for the metric from the state variables. Example subclass implementation: ```python class BinaryTruePositives(tf.keras.metrics.Metric): def __init__(self, name='binary_true_positives', **kwargs): super(BinaryTruePositives, self).__init__(name=name, **kwargs) self.true_positives = self.add_weight(name='tp', initializer='zeros') def update_state(self, y_true, y_pred, sample_weight=None): y_true = tf.cast(y_true, tf.bool) y_pred = tf.cast(y_pred, tf.bool) values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True)) values = tf.cast(values, self.dtype) if sample_weight is not None: sample_weight = tf.cast(sample_weight, self.dtype) sample_weight = tf.broadcast_to(sample_weight, values.shape) values = tf.multiply(values, sample_weight) self.true_positives.assign_add(tf.reduce_sum(values)) def result(self): return self.true_positives ``` """ def __init__(self, name=None, dtype=None, **kwargs): super(Metric, self).__init__(name=name, dtype=dtype, **kwargs) self.stateful = True # All metric layers are stateful. self.built = True if not base_layer_utils.v2_dtype_behavior_enabled(): # We only do this when the V2 behavior is not enabled, as when it is # enabled, the dtype already defaults to floatx. self._dtype = (backend.floatx() if dtype is None else dtypes.as_dtype(dtype).name) def __new__(cls, *args, **kwargs): obj = super(Metric, cls).__new__(cls) # If `update_state` is not in eager/tf.function and it is not from a # built-in metric, wrap it in `tf.function`. This is so that users writing # custom metrics in v1 need not worry about control dependencies and # return ops. if (base_layer_utils.is_in_eager_or_tf_function() or is_built_in(cls)): obj_update_state = obj.update_state def update_state_fn(*args, **kwargs): control_status = ag_ctx.control_status_ctx() ag_update_state = autograph.tf_convert(obj_update_state, control_status) return ag_update_state(*args, **kwargs) else: if isinstance(obj.update_state, def_function.Function): update_state_fn = obj.update_state else: update_state_fn = def_function.function(obj.update_state) obj.update_state = types.MethodType( metrics_utils.update_state_wrapper(update_state_fn), obj) obj_result = obj.result def result_fn(*args, **kwargs): control_status = ag_ctx.control_status_ctx() ag_result = autograph.tf_convert(obj_result, control_status) return ag_result(*args, **kwargs) obj.result = types.MethodType(metrics_utils.result_wrapper(result_fn), obj) return obj def __call__(self, *args, **kwargs): """Accumulates statistics and then computes metric result value. Args: *args: **kwargs: A mini-batch of inputs to the Metric, passed on to `update_state()`. Returns: The metric value tensor. """ def replica_local_fn(*args, **kwargs): """Updates the state of the metric in a replica-local context.""" if any( isinstance(arg, keras_tensor.KerasTensor) for arg in nest.flatten((args, kwargs))): update_op = None else: update_op = self.update_state(*args, **kwargs) # pylint: disable=not-callable update_ops = [] if update_op is not None: update_ops.append(update_op) with ops.control_dependencies(update_ops): result_t = self.result() # pylint: disable=not-callable # We are adding the metric object as metadata on the result tensor. # This is required when we want to use a metric with `add_metric` API on # a Model/Layer in graph mode. This metric instance will later be used # to reset variable state after each epoch of training. # Example: # model = Model() # mean = Mean() # model.add_metric(mean(values), name='mean') result_t._metric_obj = self # pylint: disable=protected-access return result_t from tensorflow.python.keras.distribute import distributed_training_utils # pylint:disable=g-import-not-at-top return distributed_training_utils.call_replica_local_fn( replica_local_fn, *args, **kwargs) @property def dtype(self): return self._dtype def get_config(self): """Returns the serializable config of the metric.""" return {'name': self.name, 'dtype': self.dtype} def reset_state(self): """Resets all of the metric state variables. This function is called between epochs/steps, when a metric is evaluated during training. """ if not generic_utils.is_default(self.reset_states): warnings.warn('Metric %s implements a `reset_states()` method; rename it ' 'to `reset_state()` (without the final "s"). The name ' '`reset_states()` has been deprecated to improve API ' 'consistency.' % (self.__class__.__name__,)) return self.reset_states() else: backend.batch_set_value([(v, 0) for v in self.variables]) @abc.abstractmethod def update_state(self, *args, **kwargs): """Accumulates statistics for the metric. Note: This function is executed as a graph function in graph mode. This means: a) Operations on the same resource are executed in textual order. This should make it easier to do things like add the updated value of a variable to another, for example. b) You don't need to worry about collecting the update ops to execute. All update ops added to the graph by this function will be executed. As a result, code should generally work the same way with graph or eager execution. Args: *args: **kwargs: A mini-batch of inputs to the Metric. """ raise NotImplementedError('Must be implemented in subclasses.') @abc.abstractmethod def result(self): """Computes and returns the metric value tensor. Result computation is an idempotent operation that simply calculates the metric value using the state variables. """ raise NotImplementedError('Must be implemented in subclasses.') ### For use by subclasses ### @doc_controls.for_subclass_implementers def add_weight( self, name, shape=(), aggregation=variables_module.VariableAggregation.SUM, synchronization=variables_module.VariableSynchronization.ON_READ, initializer=None, dtype=None): """Adds state variable. Only for use by subclasses.""" if distribute_lib.has_strategy(): strategy = distribute_lib.get_strategy() else: strategy = None # TODO(b/120571621): Make `ON_READ` work with Keras metrics on TPU. if backend.is_tpu_strategy(strategy): synchronization = variables_module.VariableSynchronization.ON_WRITE with ops.init_scope(): return super(Metric, self).add_weight( name=name, shape=shape, dtype=self._dtype if dtype is None else dtype, trainable=False, initializer=initializer, collections=[], synchronization=synchronization, aggregation=aggregation) ### End: For use by subclasses ### @property def trainable_weights(self): # Overridden from Layer class to track submetric weights. if self.trainable: trainable_weights = self._trainable_weights for m in self._metrics: trainable_weights += m.trainable_weights return self._dedup_weights(trainable_weights) else: return [] @property def non_trainable_weights(self): # Overridden from Layer class to track submetric weights. if self.trainable: non_trainable_weights = self._non_trainable_weights for m in self._metrics: non_trainable_weights += m.non_trainable_weights else: non_trainable_weights = ( self._non_trainable_weights + self._trainable_weights) for m in self._metrics: non_trainable_weights += m.weights return self._dedup_weights(non_trainable_weights) @property def _trackable_saved_model_saver(self): return metric_serialization.MetricSavedModelSaver(self) @generic_utils.default @doc_controls.do_not_generate_docs def reset_states(self): # Backwards compatibility alias of `reset_state`. New classes should # only implement `reset_state`. return self.reset_state()
Metric
python
bokeh__bokeh
src/bokeh/models/renderers/renderer.py
{ "start": 1872, "end": 2376 }
class ____(Model): """A collection of renderers. """ # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) visible = Bool(default=True, help=""" Makes all grouped renderers visible or not. """) #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- @abstract
RendererGroup
python
tornadoweb__tornado
tornado/test/iostream_test.py
{ "start": 44036, "end": 47291 }
class ____(AsyncTestCase): # This test ensures that hostname checks are working correctly after # #3337 revealed that we have no test coverage in this area, and we # removed a manual hostname check that was needed only for very old # versions of python. def setUp(self): super().setUp() self.listener, self.port = bind_unused_port() def accept_callback(connection, address): ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_ctx.load_cert_chain( os.path.join(os.path.dirname(__file__), "test.crt"), os.path.join(os.path.dirname(__file__), "test.key"), ) connection = ssl_ctx.wrap_socket( connection, server_side=True, do_handshake_on_connect=False, ) SSLIOStream(connection) netutil.add_accept_handler(self.listener, accept_callback) # Our self-signed cert is its own CA. We have to pass the CA check before # the hostname check will be performed. self.client_ssl_ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) self.client_ssl_ctx.load_verify_locations( os.path.join(os.path.dirname(__file__), "test.crt") ) def tearDown(self): self.io_loop.remove_handler(self.listener.fileno()) self.listener.close() super().tearDown() @gen_test async def test_match(self): stream = SSLIOStream(socket.socket(), ssl_options=self.client_ssl_ctx) await stream.connect( ("127.0.0.1", self.port), server_hostname="foo.example.com", ) stream.close() @gen_test async def test_no_match(self): stream = SSLIOStream(socket.socket(), ssl_options=self.client_ssl_ctx) with ExpectLog( gen_log, ".*alert bad certificate", level=logging.WARNING, required=platform.system() != "Windows", ): with self.assertRaises(ssl.SSLCertVerificationError): with ExpectLog( gen_log, ".*(certificate verify failed: Hostname mismatch)", level=logging.WARNING, ): await stream.connect( ("127.0.0.1", self.port), server_hostname="bar.example.com", ) # The server logs a warning while cleaning up the failed connection. # Unfortunately there's no good hook to wait for this logging. # It doesn't seem to happen on windows; I'm not sure why. if platform.system() != "Windows": await asyncio.sleep(0.1) @gen_test async def test_check_disabled(self): # check_hostname can be set to false and the connection will succeed even though it doesn't # have the right hostname. self.client_ssl_ctx.check_hostname = False stream = SSLIOStream(socket.socket(), ssl_options=self.client_ssl_ctx) await stream.connect( ("127.0.0.1", self.port), server_hostname="bar.example.com", ) @skipIfNonUnix
TestIOStreamCheckHostname
python
getsentry__sentry
src/sentry/api/endpoints/frontend_version.py
{ "start": 324, "end": 596 }
class ____(Endpoint): owner = ApiOwner.HYBRID_CLOUD publish_status = {"GET": ApiPublishStatus.PRIVATE} permission_classes = () def get(self, request: Request) -> Response: return Response({"version": get_frontend_commit_sha()})
FrontendVersionEndpoint
python
sqlalchemy__sqlalchemy
lib/sqlalchemy/sql/schema.py
{ "start": 233279, "end": 239601 }
class ____(IdentityOptions, FetchedValue, SchemaItem): """Defines an identity column, i.e. "GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY" syntax. The :class:`.Identity` construct is an inline construct added to the argument list of a :class:`_schema.Column` object:: from sqlalchemy import Identity Table( "foo", metadata_obj, Column("id", Integer, Identity()), Column("description", Text), ) See the linked documentation below for complete details. .. versionadded:: 1.4 .. seealso:: :ref:`identity_ddl` """ __visit_name__ = "identity_column" is_identity = True @util.deprecated_params( order=( "2.1", "This parameter is supported only by Oracle Database, " "use ``oracle_order`` instead.", ), on_null=( "2.1", "This parameter is supported only by Oracle Database, " "use ``oracle_on_null`` instead.", ), ) def __init__( self, always: Optional[bool] = False, on_null: Optional[bool] = None, start: Optional[int] = None, increment: Optional[int] = None, minvalue: Optional[int] = None, maxvalue: Optional[int] = None, nominvalue: Optional[bool] = None, nomaxvalue: Optional[bool] = None, cycle: Optional[bool] = None, cache: Optional[int] = None, order: Optional[bool] = None, **dialect_kw: Any, ) -> None: """Construct a GENERATED { ALWAYS | BY DEFAULT } AS IDENTITY DDL construct to accompany a :class:`_schema.Column`. See the :class:`.Sequence` documentation for a complete description of most parameters. .. note:: MSSQL supports this construct as the preferred alternative to generate an IDENTITY on a column, but it uses non standard syntax that only support :paramref:`_schema.Identity.start` and :paramref:`_schema.Identity.increment`. All other parameters are ignored. :param always: A boolean, that indicates the type of identity column. If ``False`` is specified, the default, then the user-specified value takes precedence. If ``True`` is specified, a user-specified value is not accepted ( on some backends, like PostgreSQL, OVERRIDING SYSTEM VALUE, or similar, may be specified in an INSERT to override the sequence value). Some backends also have a default value for this parameter, ``None`` can be used to omit rendering this part in the DDL. It will be treated as ``False`` if a backend does not have a default value. :param on_null: Set to ``True`` to specify ON NULL in conjunction with a ``always=False`` identity column. This option is only supported on some backends, like Oracle Database. :param start: the starting index of the sequence. :param increment: the increment value of the sequence. :param minvalue: the minimum value of the sequence. :param maxvalue: the maximum value of the sequence. :param nominvalue: no minimum value of the sequence. :param nomaxvalue: no maximum value of the sequence. :param cycle: allows the sequence to wrap around when the maxvalue or minvalue has been reached. :param cache: optional integer value; number of future values in the sequence which are calculated in advance. :param order: optional boolean value; if true, renders the ORDER keyword. """ self.dialect_options if on_null is not None: if "oracle_on_null" in dialect_kw: raise exc.ArgumentError( "Cannot specify both 'on_null' and 'oracle_on_null'. " "Plese use only 'oracle_on_null'." ) dialect_kw["oracle_on_null"] = on_null IdentityOptions.__init__( self, start=start, increment=increment, minvalue=minvalue, maxvalue=maxvalue, nominvalue=nominvalue, nomaxvalue=nomaxvalue, cycle=cycle, cache=cache, order=order, **dialect_kw, ) self.always = always self.column = None @property def on_null(self) -> Optional[bool]: """Alias of the ``dialect_kwargs`` ``'oracle_on_null'``. .. deprecated:: 2.1 The 'on_null' attribute is deprecated. """ value: Optional[bool] = self.dialect_kwargs.get("oracle_on_null") return value def _set_parent(self, parent: SchemaEventTarget, **kw: Any) -> None: assert isinstance(parent, Column) if not isinstance( parent.server_default, (type(None), Identity) ) or not isinstance(parent.server_onupdate, type(None)): raise exc.ArgumentError( "A column with an Identity object cannot specify a " "server_default or a server_onupdate argument" ) if parent.autoincrement is False: raise exc.ArgumentError( "A column with an Identity object cannot specify " "autoincrement=False" ) self.column = parent parent.identity = self if parent._user_defined_nullable is NULL_UNSPECIFIED: parent.nullable = False parent.server_default = self def _as_for_update(self, for_update: bool) -> FetchedValue: return self @util.deprecated( "1.4", "The :meth:`_schema.Identity.copy` method is deprecated " "and will be removed in a future release.", ) def copy(self, **kw: Any) -> Identity: return self._copy(**kw) def _copy(self, **kw: Any) -> Identity: i = Identity(**self._as_dict(), **self.dialect_kwargs) return self._schema_item_copy(i) def _as_dict(self) -> Dict[str, Any]: return { # always=None means something different than always=False "always": self.always, **super()._as_dict(), }
Identity
python
kamyu104__LeetCode-Solutions
Python/sum-of-digits-of-string-after-convert.py
{ "start": 61, "end": 523 }
class ____(object): def getLucky(self, s, k): """ :type s: str :type k: int :rtype: int """ total = reduce(lambda total, x: total+sum(divmod((ord(x)-ord('a')+1), 10)), s, 0) while k > 1 and total > 9: new_total = 0 while total: total, x = divmod(total, 10) new_total += x total = new_total k -= 1 return total
Solution
python
pytorch__pytorch
torch/_inductor/template_heuristics/params.py
{ "start": 97, "end": 633 }
class ____(ABC): """Abstract base class for kernel template parameters.""" @abstractmethod def to_kwargs(self) -> dict[str, Any]: """Convert params to kwargs dict for template.choice_or_none()""" @abstractmethod def to_serializeable_dict(self) -> dict[str, Any]: """Convert params to serializable dict for storage/caching""" @classmethod @abstractmethod def from_dict(cls, data: dict[str, Any]) -> KernelTemplateParams: """Create params instance from dict"""
KernelTemplateParams
python
wandb__wandb
wandb/apis/public/files.py
{ "start": 1984, "end": 6936 }
class ____(SizedPaginator["File"]): """A lazy iterator over a collection of `File` objects. Access and manage files uploaded to W&B during a run. Handles pagination automatically when iterating through large collections of files. Example: ```python from wandb.apis.public.files import Files from wandb.apis.public.api import Api # Example run object run = Api().run("entity/project/run-id") # Create a Files object to iterate over files in the run files = Files(api.client, run) # Iterate over files for file in files: print(file.name) print(file.url) print(file.size) # Download the file file.download(root="download_directory", replace=True) ``` """ def _get_query(self): """Generate query dynamically based on server capabilities.""" with_internal_id = _server_provides_internal_id_for_project(self.client) return gql( f""" query RunFiles($project: String!, $entity: String!, $name: String!, $fileCursor: String, $fileLimit: Int = 50, $fileNames: [String] = [], $upload: Boolean = false, $pattern: String) {{ project(name: $project, entityName: $entity) {{ {"internalId" if with_internal_id else ""} run(name: $name) {{ fileCount ...RunFilesFragment }} }} }} {FILE_FRAGMENT} """ ) def __init__( self, client: RetryingClient, run: Run, names: list[str] | None = None, per_page: int = 50, upload: bool = False, pattern: str | None = None, ): """Initialize a lazy iterator over a collection of `File` objects. Files are retrieved in pages from the W&B server as needed. Args: client: The run object that contains the files run: The run object that contains the files names (list, optional): A list of file names to filter the files per_page (int, optional): The number of files to fetch per page upload (bool, optional): If `True`, fetch the upload URL for each file pattern (str, optional): Pattern to match when returning files from W&B This pattern uses mySQL's LIKE syntax, so matching all files that end with .json would be "%.json". If both names and pattern are provided, a ValueError will be raised. """ if names and pattern: raise ValueError( "Querying for files by both names and pattern is not supported." " Please provide either a list of names or a pattern to match.", ) self.run = run variables = { "project": run.project, "entity": run.entity, "name": run.id, "fileNames": names or [], "upload": upload, "pattern": pattern, } super().__init__(client, variables, per_page) def _update_response(self) -> None: """Fetch and store the response data for the next page using dynamic query.""" self.last_response = self.client.execute( self._get_query(), variable_values=self.variables ) @property def _length(self): """ Returns total number of files. <!-- lazydoc-ignore: internal --> """ if not self.last_response: self._load_page() return self.last_response["project"]["run"]["fileCount"] @property def more(self): """Returns whether there are more files to fetch. <!-- lazydoc-ignore: internal --> """ if self.last_response: return self.last_response["project"]["run"]["files"]["pageInfo"][ "hasNextPage" ] else: return True @property def cursor(self): """Returns the cursor position for pagination of file results. <!-- lazydoc-ignore: internal --> """ if self.last_response: return self.last_response["project"]["run"]["files"]["edges"][-1]["cursor"] else: return None def update_variables(self): """Updates the GraphQL query variables for pagination. <!-- lazydoc-ignore: internal --> """ self.variables.update({"fileLimit": self.per_page, "fileCursor": self.cursor}) def convert_objects(self): """Converts GraphQL edges to File objects. <!-- lazydoc-ignore: internal --> """ return [ File(self.client, r["node"], self.run) for r in self.last_response["project"]["run"]["files"]["edges"] ] def __repr__(self): return "<Files {} ({})>".format("/".join(self.run.path), len(self))
Files
python
ray-project__ray
rllib/models/preprocessors.py
{ "start": 10612, "end": 12332 }
class ____(Preprocessor): """Preprocesses each dict value, then flattens it all into a vector. RLlib models will unpack the flattened output before _build_layers_v2(). """ @override(Preprocessor) def _init_shape(self, obs_space: gym.Space, options: dict) -> List[int]: assert isinstance(self._obs_space, gym.spaces.Dict) size = 0 self.preprocessors = [] for space in self._obs_space.spaces.values(): logger.debug("Creating sub-preprocessor for {}".format(space)) preprocessor_class = get_preprocessor(space) if preprocessor_class is not None: preprocessor = preprocessor_class(space, self._options) size += preprocessor.size else: preprocessor = None size += int(np.prod(space.shape)) self.preprocessors.append(preprocessor) return (size,) @override(Preprocessor) def transform(self, observation: TensorType) -> np.ndarray: self.check_shape(observation) array = np.zeros(self.shape, dtype=np.float32) self.write(observation, array, 0) return array @override(Preprocessor) def write(self, observation: TensorType, array: np.ndarray, offset: int) -> None: if not isinstance(observation, OrderedDict): observation = OrderedDict(sorted(observation.items())) assert len(observation) == len(self.preprocessors), ( len(observation), len(self.preprocessors), ) for o, p in zip(observation.values(), self.preprocessors): p.write(o, array, offset) offset += p.size @OldAPIStack
DictFlatteningPreprocessor
python
altair-viz__altair
altair/expr/__init__.py
{ "start": 2097, "end": 81699 }
class ____(_ExprRef, metaclass=_ExprMeta): """ Utility providing *constants* and *classmethods* to construct expressions. `Expressions`_ can be used to write basic formulas that enable custom interactions. Alternatively, an `inline expression`_ may be defined via :class:`expr()`. Parameters ---------- expr: str A `vega expression`_ string. Returns ------- ``ExprRef`` .. _Expressions: https://altair-viz.github.io/user_guide/interactions/expressions.html .. _inline expression: https://altair-viz.github.io/user_guide/interactions/expressions.html#inline-expressions .. _vega expression: https://vega.github.io/vega/docs/expressions/ Examples -------- >>> import altair as alt >>> bind_range = alt.binding_range(min=100, max=300, name="Slider value: ") >>> param_width = alt.param(bind=bind_range, name="param_width") >>> param_color = alt.param( ... expr=alt.expr.if_(param_width < 200, "red", "black"), ... name="param_color", ... ) >>> y = alt.Y("yval").axis(titleColor=param_color) >>> y Y({ axis: {'titleColor': Parameter('param_color', VariableParameter({ expr: if((param_width < 200),'red','black'), name: 'param_color' }))}, shorthand: 'yval' }) .. _Number.isNaN: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/isNan .. _Number.isFinite: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/isFinite .. _Math.abs: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/abs .. _Math.acos: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/acos .. _Math.asin: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/asin .. _Math.atan: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/atan .. _Math.atan2: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/atan2 .. _Math.ceil: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/ceil .. _Math.cos: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/cos .. _Math.exp: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/exp .. _Math.floor: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/floor .. _Math.hypot: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/hypot .. _Math.log: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/log .. _Math.max: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/max .. _Math.min: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/min .. _Math.pow: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/pow .. _Math.random: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/random .. _Math.round: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/round .. _Math.sin: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/sin .. _Math.sqrt: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/sqrt .. _Math.tan: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/tan .. _normal (Gaussian) probability distribution: https://en.wikipedia.org/wiki/Normal_distribution .. _cumulative distribution function: https://en.wikipedia.org/wiki/Cumulative_distribution_function .. _probability density function: https://en.wikipedia.org/wiki/Probability_density_function .. _log-normal probability distribution: https://en.wikipedia.org/wiki/Log-normal_distribution .. _continuous uniform probability distribution: https://en.wikipedia.org/wiki/Continuous_uniform_distribution .. _*unit*: https://vega.github.io/vega/docs/api/time/#time-units .. _ascending from Vega Utils: https://vega.github.io/vega/docs/api/util/#ascending .. _JavaScript's String.replace: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace .. _Base64: https://developer.mozilla.org/en-US/docs/Glossary/Base64 .. _ASCII: https://developer.mozilla.org/en-US/docs/Glossary/ASCII .. _Window.btoa(): https://developer.mozilla.org/en-US/docs/Web/API/Window/btoa .. _Window.atob(): https://developer.mozilla.org/en-US/docs/Web/API/Window/atob .. _d3-format specifier: https://github.com/d3/d3-format/ .. _*units*: https://vega.github.io/vega/docs/api/time/#time-units .. _timeUnitSpecifier API documentation: https://vega.github.io/vega/docs/api/time/#timeUnitSpecifier .. _timeFormat: https://vega.github.io/vega/docs/expressions/#timeFormat .. _utcFormat: https://vega.github.io/vega/docs/expressions/#utcFormat .. _d3-time-format specifier: https://github.com/d3/d3-time-format/ .. _TimeMultiFormat object: https://vega.github.io/vega/docs/types/#TimeMultiFormat .. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time .. _JavaScript's RegExp: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/RegExp .. _RGB: https://en.wikipedia.org/wiki/RGB_color_model .. _d3-color's rgb function: https://github.com/d3/d3-color#rgb .. _HSL: https://en.wikipedia.org/wiki/HSL_and_HSV .. _d3-color's hsl function: https://github.com/d3/d3-color#hsl .. _CIE LAB: https://en.wikipedia.org/wiki/Lab_color_space#CIELAB .. _d3-color's lab function: https://github.com/d3/d3-color#lab .. _HCL: https://en.wikipedia.org/wiki/Lab_color_space#CIELAB .. _d3-color's hcl function: https://github.com/d3/d3-color#hcl .. _W3C Web Content Accessibility Guidelines: https://www.w3.org/TR/2008/REC-WCAG20-20081211/#contrast-ratiodef .. _continuous color scheme: https://vega.github.io/vega/docs/schemes .. _geoArea: https://github.com/d3/d3-geo#geoArea .. _path.area: https://github.com/d3/d3-geo#path_area .. _geoBounds: https://github.com/d3/d3-geo#geoBounds .. _path.bounds: https://github.com/d3/d3-geo#path_bounds .. _geoCentroid: https://github.com/d3/d3-geo#geoCentroid .. _path.centroid: https://github.com/d3/d3-geo#path_centroid .. _window.screen: https://developer.mozilla.org/en-US/docs/Web/API/Window/screen """ @override def __new__(cls: type[_ExprRef], expr: str) -> _ExprRef: # type: ignore[misc] return _ExprRef(expr=expr) @classmethod def isArray(cls, value: IntoExpression, /) -> Expression: """Returns true if ``value`` is an array, false otherwise.""" return FunctionExpression("isArray", (value,)) @classmethod def isBoolean(cls, value: IntoExpression, /) -> Expression: """Returns true if ``value`` is a boolean (``true`` or ``false``), false otherwise.""" return FunctionExpression("isBoolean", (value,)) @classmethod def isDate(cls, value: IntoExpression, /) -> Expression: """ Returns true if ``value`` is a Date object, false otherwise. This method will return false for timestamp numbers or date-formatted strings; it recognizes Date objects only. """ return FunctionExpression("isDate", (value,)) @classmethod def isDefined(cls, value: IntoExpression, /) -> Expression: """ Returns true if ``value`` is a defined value, false if ``value`` equals ``undefined``. This method will return true for ``null`` and ``NaN`` values. """ return FunctionExpression("isDefined", (value,)) @classmethod def isNumber(cls, value: IntoExpression, /) -> Expression: """ Returns true if ``value`` is a number, false otherwise. ``NaN`` and ``Infinity`` are considered numbers. """ return FunctionExpression("isNumber", (value,)) @classmethod def isObject(cls, value: IntoExpression, /) -> Expression: """Returns true if ``value`` is an object (including arrays and Dates), false otherwise.""" return FunctionExpression("isObject", (value,)) @classmethod def isRegExp(cls, value: IntoExpression, /) -> Expression: """Returns true if ``value`` is a RegExp (regular expression) object, false otherwise.""" return FunctionExpression("isRegExp", (value,)) @classmethod def isString(cls, value: IntoExpression, /) -> Expression: """Returns true if ``value`` is a string, false otherwise.""" return FunctionExpression("isString", (value,)) @classmethod def isValid(cls, value: IntoExpression, /) -> Expression: """Returns true if ``value`` is not ``null``, ``undefined``, or ``NaN``, false otherwise.""" return FunctionExpression("isValid", (value,)) @classmethod def toBoolean(cls, value: IntoExpression, /) -> Expression: """ Coerces the input ``value`` to a string. Null values and empty strings are mapped to ``null``. """ return FunctionExpression("toBoolean", (value,)) @classmethod def toDate(cls, value: IntoExpression, /) -> Expression: """ Coerces the input ``value`` to a Date instance. Null values and empty strings are mapped to ``null``. If an optional *parser* function is provided, it is used to perform date parsing, otherwise ``Date.parse`` is used. Be aware that ``Date.parse`` has different implementations across browsers! """ return FunctionExpression("toDate", (value,)) @classmethod def toNumber(cls, value: IntoExpression, /) -> Expression: """ Coerces the input ``value`` to a number. Null values and empty strings are mapped to ``null``. """ return FunctionExpression("toNumber", (value,)) @classmethod def toString(cls, value: IntoExpression, /) -> Expression: """ Coerces the input ``value`` to a string. Null values and empty strings are mapped to ``null``. """ return FunctionExpression("toString", (value,)) @classmethod def if_( cls, test: IntoExpression, thenValue: IntoExpression, elseValue: IntoExpression, /, ) -> Expression: """ If ``test`` is truthy, returns ``thenValue``. Otherwise, returns ``elseValue``. The *if* function is equivalent to the ternary operator ``a ? b : c``. """ return FunctionExpression("if", (test, thenValue, elseValue)) @classmethod def isNaN(cls, value: IntoExpression, /) -> Expression: """ Returns true if ``value`` is not a number. Same as JavaScript's `Number.isNaN`_. .. _Number.isNaN: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/isNan """ return FunctionExpression("isNaN", (value,)) @classmethod def isFinite(cls, value: IntoExpression, /) -> Expression: """ Returns true if ``value`` is a finite number. Same as JavaScript's `Number.isFinite`_. .. _Number.isFinite: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/isFinite """ return FunctionExpression("isFinite", (value,)) @classmethod def abs(cls, value: IntoExpression, /) -> Expression: """ Returns the absolute value of ``value``. Same as JavaScript's `Math.abs`_. .. _Math.abs: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/abs """ return FunctionExpression("abs", (value,)) @classmethod def acos(cls, value: IntoExpression, /) -> Expression: """ Trigonometric arccosine. Same as JavaScript's `Math.acos`_. .. _Math.acos: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/acos """ return FunctionExpression("acos", (value,)) @classmethod def asin(cls, value: IntoExpression, /) -> Expression: """ Trigonometric arcsine. Same as JavaScript's `Math.asin`_. .. _Math.asin: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/asin """ return FunctionExpression("asin", (value,)) @classmethod def atan(cls, value: IntoExpression, /) -> Expression: """ Trigonometric arctangent. Same as JavaScript's `Math.atan`_. .. _Math.atan: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/atan """ return FunctionExpression("atan", (value,)) @classmethod def atan2(cls, dy: IntoExpression, dx: IntoExpression, /) -> Expression: """ Returns the arctangent of *dy / dx*. Same as JavaScript's `Math.atan2`_. .. _Math.atan2: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/atan2 """ return FunctionExpression("atan2", (dy, dx)) @classmethod def ceil(cls, value: IntoExpression, /) -> Expression: """ Rounds ``value`` to the nearest integer of equal or greater value. Same as JavaScript's `Math.ceil`_. .. _Math.ceil: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/ceil """ return FunctionExpression("ceil", (value,)) @classmethod def clamp( cls, value: IntoExpression, min: IntoExpression, max: IntoExpression, / ) -> Expression: """Restricts ``value`` to be between the specified ``min`` and ``max``.""" return FunctionExpression("clamp", (value, min, max)) @classmethod def cos(cls, value: IntoExpression, /) -> Expression: """ Trigonometric cosine. Same as JavaScript's `Math.cos`_. .. _Math.cos: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/cos """ return FunctionExpression("cos", (value,)) @classmethod def exp(cls, exponent: IntoExpression, /) -> Expression: """ Returns the value of *e* raised to the provided ``exponent``. Same as JavaScript's `Math.exp`_. .. _Math.exp: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/exp """ return FunctionExpression("exp", (exponent,)) @classmethod def floor(cls, value: IntoExpression, /) -> Expression: """ Rounds ``value`` to the nearest integer of equal or lower value. Same as JavaScript's `Math.floor`_. .. _Math.floor: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/floor """ return FunctionExpression("floor", (value,)) @classmethod def hypot(cls, value: IntoExpression, /) -> Expression: """ Returns the square root of the sum of squares of its arguments. Same as JavaScript's `Math.hypot`_. .. _Math.hypot: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/hypot """ return FunctionExpression("hypot", (value,)) @classmethod def log(cls, value: IntoExpression, /) -> Expression: """ Returns the natural logarithm of ``value``. Same as JavaScript's `Math.log`_. .. _Math.log: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/log """ return FunctionExpression("log", (value,)) @classmethod def max( cls, value1: IntoExpression, value2: IntoExpression, *args: Any ) -> Expression: """ Returns the maximum argument value. Same as JavaScript's `Math.max`_. .. _Math.max: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/max """ return FunctionExpression("max", (value1, value2, *args)) @classmethod def min( cls, value1: IntoExpression, value2: IntoExpression, *args: Any ) -> Expression: """ Returns the minimum argument value. Same as JavaScript's `Math.min`_. .. _Math.min: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/min """ return FunctionExpression("min", (value1, value2, *args)) @classmethod def pow(cls, value: IntoExpression, exponent: IntoExpression, /) -> Expression: """ Returns ``value`` raised to the given ``exponent``. Same as JavaScript's `Math.pow`_. .. _Math.pow: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/pow """ return FunctionExpression("pow", (value, exponent)) @classmethod def random(cls) -> Expression: """ Returns a pseudo-random number in the range [0,1). Same as JavaScript's `Math.random`_. .. _Math.random: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/random """ return FunctionExpression("random", ()) @classmethod def round(cls, value: IntoExpression, /) -> Expression: """ Rounds ``value`` to the nearest integer. Same as JavaScript's `Math.round`_. .. _Math.round: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/round """ return FunctionExpression("round", (value,)) @classmethod def sin(cls, value: IntoExpression, /) -> Expression: """ Trigonometric sine. Same as JavaScript's `Math.sin`_. .. _Math.sin: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/sin """ return FunctionExpression("sin", (value,)) @classmethod def sqrt(cls, value: IntoExpression, /) -> Expression: """ Square root function. Same as JavaScript's `Math.sqrt`_. .. _Math.sqrt: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/sqrt """ return FunctionExpression("sqrt", (value,)) @classmethod def tan(cls, value: IntoExpression, /) -> Expression: """ Trigonometric tangent. Same as JavaScript's `Math.tan`_. .. _Math.tan: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math/tan """ return FunctionExpression("tan", (value,)) @classmethod def sampleNormal( cls, mean: IntoExpression = None, stdev: IntoExpression = None, / ) -> Expression: """ Returns a sample from a univariate `normal (Gaussian) probability distribution`_ with specified ``mean`` and standard deviation ``stdev``. If unspecified, the mean defaults to ``0`` and the standard deviation defaults to ``1``. .. _normal (Gaussian) probability distribution: https://en.wikipedia.org/wiki/Normal_distribution """ return FunctionExpression("sampleNormal", (mean, stdev)) @classmethod def cumulativeNormal( cls, value: IntoExpression, mean: IntoExpression = None, stdev: IntoExpression = None, /, ) -> Expression: """ Returns the value of the `cumulative distribution function`_ at the given input domain ``value`` for a normal distribution with specified ``mean`` and standard deviation ``stdev``. If unspecified, the mean defaults to ``0`` and the standard deviation defaults to ``1``. .. _cumulative distribution function: https://en.wikipedia.org/wiki/Cumulative_distribution_function """ return FunctionExpression("cumulativeNormal", (value, mean, stdev)) @classmethod def densityNormal( cls, value: IntoExpression, mean: IntoExpression = None, stdev: IntoExpression = None, /, ) -> Expression: """ Returns the value of the `probability density function`_ at the given input domain ``value``, for a normal distribution with specified ``mean`` and standard deviation ``stdev``. If unspecified, the mean defaults to ``0`` and the standard deviation defaults to ``1``. .. _probability density function: https://en.wikipedia.org/wiki/Probability_density_function """ return FunctionExpression("densityNormal", (value, mean, stdev)) @classmethod def quantileNormal( cls, probability: IntoExpression, mean: IntoExpression = None, stdev: IntoExpression = None, /, ) -> Expression: """ Returns the quantile value (the inverse of the `cumulative distribution function`_) for the given input ``probability``, for a normal distribution with specified ``mean`` and standard deviation ``stdev``. If unspecified, the mean defaults to ``0`` and the standard deviation defaults to ``1``. .. _cumulative distribution function: https://en.wikipedia.org/wiki/Cumulative_distribution_function """ return FunctionExpression("quantileNormal", (probability, mean, stdev)) @classmethod def sampleLogNormal( cls, mean: IntoExpression = None, stdev: IntoExpression = None, / ) -> Expression: """ Returns a sample from a univariate `log-normal probability distribution`_ with specified log ``mean`` and log standard deviation ``stdev``. If unspecified, the log mean defaults to ``0`` and the log standard deviation defaults to ``1``. .. _log-normal probability distribution: https://en.wikipedia.org/wiki/Log-normal_distribution """ return FunctionExpression("sampleLogNormal", (mean, stdev)) @classmethod def cumulativeLogNormal( cls, value: IntoExpression, mean: IntoExpression = None, stdev: IntoExpression = None, /, ) -> Expression: """ Returns the value of the `cumulative distribution function`_ at the given input domain ``value`` for a log-normal distribution with specified log ``mean`` and log standard deviation ``stdev``. If unspecified, the log mean defaults to ``0`` and the log standard deviation defaults to ``1``. .. _cumulative distribution function: https://en.wikipedia.org/wiki/Cumulative_distribution_function """ return FunctionExpression("cumulativeLogNormal", (value, mean, stdev)) @classmethod def densityLogNormal( cls, value: IntoExpression, mean: IntoExpression = None, stdev: IntoExpression = None, /, ) -> Expression: """ Returns the value of the `probability density function`_ at the given input domain ``value``, for a log-normal distribution with specified log ``mean`` and log standard deviation ``stdev``. If unspecified, the log mean defaults to ``0`` and the log standard deviation defaults to ``1``. .. _probability density function: https://en.wikipedia.org/wiki/Probability_density_function """ return FunctionExpression("densityLogNormal", (value, mean, stdev)) @classmethod def quantileLogNormal( cls, probability: IntoExpression, mean: IntoExpression = None, stdev: IntoExpression = None, /, ) -> Expression: """ Returns the quantile value (the inverse of the `cumulative distribution function`_) for the given input ``probability``, for a log-normal distribution with specified log ``mean`` and log standard deviation ``stdev``. If unspecified, the log mean defaults to ``0`` and the log standard deviation defaults to ``1``. .. _cumulative distribution function: https://en.wikipedia.org/wiki/Cumulative_distribution_function """ return FunctionExpression("quantileLogNormal", (probability, mean, stdev)) @classmethod def sampleUniform( cls, min: IntoExpression = None, max: IntoExpression = None, / ) -> Expression: """ Returns a sample from a univariate `continuous uniform probability distribution`_ over the interval [``min``, ``max``). If unspecified, ``min`` defaults to ``0`` and ``max`` defaults to ``1``. If only one argument is provided, it is interpreted as the ``max`` value. .. _continuous uniform probability distribution: https://en.wikipedia.org/wiki/Continuous_uniform_distribution """ return FunctionExpression("sampleUniform", (min, max)) @classmethod def cumulativeUniform( cls, value: IntoExpression, min: IntoExpression = None, max: IntoExpression = None, /, ) -> Expression: """ Returns the value of the `cumulative distribution function`_ at the given input domain ``value`` for a uniform distribution over the interval [``min``, ``max``). If unspecified, ``min`` defaults to ``0`` and ``max`` defaults to ``1``. If only one argument is provided, it is interpreted as the ``max`` value. .. _cumulative distribution function: https://en.wikipedia.org/wiki/Cumulative_distribution_function """ return FunctionExpression("cumulativeUniform", (value, min, max)) @classmethod def densityUniform( cls, value: IntoExpression, min: IntoExpression = None, max: IntoExpression = None, /, ) -> Expression: """ Returns the value of the `probability density function`_ at the given input domain ``value``, for a uniform distribution over the interval [``min``, ``max``). If unspecified, ``min`` defaults to ``0`` and ``max`` defaults to ``1``. If only one argument is provided, it is interpreted as the ``max`` value. .. _probability density function: https://en.wikipedia.org/wiki/Probability_density_function """ return FunctionExpression("densityUniform", (value, min, max)) @classmethod def quantileUniform( cls, probability: IntoExpression, min: IntoExpression = None, max: IntoExpression = None, /, ) -> Expression: """ Returns the quantile value (the inverse of the `cumulative distribution function`_) for the given input ``probability``, for a uniform distribution over the interval [``min``, ``max``). If unspecified, ``min`` defaults to ``0`` and ``max`` defaults to ``1``. If only one argument is provided, it is interpreted as the ``max`` value. .. _cumulative distribution function: https://en.wikipedia.org/wiki/Cumulative_distribution_function """ return FunctionExpression("quantileUniform", (probability, min, max)) @classmethod def now(cls) -> Expression: """Returns the timestamp for the current time.""" return FunctionExpression("now", ()) @classmethod def datetime( cls, year: IntoExpression, month: IntoExpression, day: IntoExpression = None, hour: IntoExpression = None, min: IntoExpression = None, sec: IntoExpression = None, millisec: IntoExpression = None, /, ) -> Expression: """ Returns a new ``Date`` instance. The ``month`` is 0-based, such that ``1`` represents February. """ return FunctionExpression( "datetime", (year, month, day, hour, min, sec, millisec) ) @classmethod def date(cls, datetime: IntoExpression, /) -> Expression: """Returns the day of the month for the given ``datetime`` value, in local time.""" return FunctionExpression("date", (datetime,)) @classmethod def day(cls, datetime: IntoExpression, /) -> Expression: """Returns the day of the week for the given ``datetime`` value, in local time.""" return FunctionExpression("day", (datetime,)) @classmethod def dayofyear(cls, datetime: IntoExpression, /) -> Expression: """Returns the one-based day of the year for the given ``datetime`` value, in local time.""" return FunctionExpression("dayofyear", (datetime,)) @classmethod def year(cls, datetime: IntoExpression, /) -> Expression: """Returns the year for the given ``datetime`` value, in local time.""" return FunctionExpression("year", (datetime,)) @classmethod def quarter(cls, datetime: IntoExpression, /) -> Expression: """Returns the quarter of the year (0-3) for the given ``datetime`` value, in local time.""" return FunctionExpression("quarter", (datetime,)) @classmethod def month(cls, datetime: IntoExpression, /) -> Expression: """Returns the (zero-based) month for the given ``datetime`` value, in local time.""" return FunctionExpression("month", (datetime,)) @classmethod def week(cls, date: IntoExpression, /) -> Expression: """ Returns the week number of the year for the given *datetime*, in local time. This function assumes Sunday-based weeks. Days before the first Sunday of the year are considered to be in week 0, the first Sunday of the year is the start of week 1, the second Sunday week 2, *etc.*. """ return FunctionExpression("week", (date,)) @classmethod def hours(cls, datetime: IntoExpression, /) -> Expression: """Returns the hours component for the given ``datetime`` value, in local time.""" return FunctionExpression("hours", (datetime,)) @classmethod def minutes(cls, datetime: IntoExpression, /) -> Expression: """Returns the minutes component for the given ``datetime`` value, in local time.""" return FunctionExpression("minutes", (datetime,)) @classmethod def seconds(cls, datetime: IntoExpression, /) -> Expression: """Returns the seconds component for the given ``datetime`` value, in local time.""" return FunctionExpression("seconds", (datetime,)) @classmethod def milliseconds(cls, datetime: IntoExpression, /) -> Expression: """Returns the milliseconds component for the given ``datetime`` value, in local time.""" return FunctionExpression("milliseconds", (datetime,)) @classmethod def time(cls, datetime: IntoExpression, /) -> Expression: """Returns the epoch-based timestamp for the given ``datetime`` value.""" return FunctionExpression("time", (datetime,)) @classmethod def timezoneoffset(cls, datetime: IntoExpression, /) -> Expression: """Returns the timezone offset from the local timezone to UTC for the given ``datetime`` value.""" return FunctionExpression("timezoneoffset", (datetime,)) @classmethod def timeOffset( cls, unit: IntoExpression, date: IntoExpression, step: IntoExpression = None, / ) -> Expression: """ Returns a new ``Date`` instance that offsets the given ``date`` by the specified time `*unit*`_ in the local timezone. The optional ``step`` argument indicates the number of time unit steps to offset by (default 1). .. _*unit*: https://vega.github.io/vega/docs/api/time/#time-units """ return FunctionExpression("timeOffset", (unit, date, step)) @classmethod def timeSequence( cls, unit: IntoExpression, start: IntoExpression, stop: IntoExpression, step: IntoExpression = None, /, ) -> Expression: """ Returns an array of ``Date`` instances from ``start`` (inclusive) to ``stop`` (exclusive), with each entry separated by the given time `*unit*`_ in the local timezone. The optional ``step`` argument indicates the number of time unit steps to take between each sequence entry (default 1). .. _*unit*: https://vega.github.io/vega/docs/api/time/#time-units """ return FunctionExpression("timeSequence", (unit, start, stop, step)) @classmethod def utc( cls, year: IntoExpression, month: IntoExpression, day: IntoExpression = None, hour: IntoExpression = None, min: IntoExpression = None, sec: IntoExpression = None, millisec: IntoExpression = None, /, ) -> Expression: """ Returns a timestamp for the given UTC date. The ``month`` is 0-based, such that ``1`` represents February. """ return FunctionExpression("utc", (year, month, day, hour, min, sec, millisec)) @classmethod def utcdate(cls, datetime: IntoExpression, /) -> Expression: """Returns the day of the month for the given ``datetime`` value, in UTC time.""" return FunctionExpression("utcdate", (datetime,)) @classmethod def utcday(cls, datetime: IntoExpression, /) -> Expression: """Returns the day of the week for the given ``datetime`` value, in UTC time.""" return FunctionExpression("utcday", (datetime,)) @classmethod def utcdayofyear(cls, datetime: IntoExpression, /) -> Expression: """Returns the one-based day of the year for the given ``datetime`` value, in UTC time.""" return FunctionExpression("utcdayofyear", (datetime,)) @classmethod def utcyear(cls, datetime: IntoExpression, /) -> Expression: """Returns the year for the given ``datetime`` value, in UTC time.""" return FunctionExpression("utcyear", (datetime,)) @classmethod def utcquarter(cls, datetime: IntoExpression, /) -> Expression: """Returns the quarter of the year (0-3) for the given ``datetime`` value, in UTC time.""" return FunctionExpression("utcquarter", (datetime,)) @classmethod def utcmonth(cls, datetime: IntoExpression, /) -> Expression: """Returns the (zero-based) month for the given ``datetime`` value, in UTC time.""" return FunctionExpression("utcmonth", (datetime,)) @classmethod def utcweek(cls, date: IntoExpression, /) -> Expression: """ Returns the week number of the year for the given *datetime*, in UTC time. This function assumes Sunday-based weeks. Days before the first Sunday of the year are considered to be in week 0, the first Sunday of the year is the start of week 1, the second Sunday week 2, *etc.*. """ return FunctionExpression("utcweek", (date,)) @classmethod def utchours(cls, datetime: IntoExpression, /) -> Expression: """Returns the hours component for the given ``datetime`` value, in UTC time.""" return FunctionExpression("utchours", (datetime,)) @classmethod def utcminutes(cls, datetime: IntoExpression, /) -> Expression: """Returns the minutes component for the given ``datetime`` value, in UTC time.""" return FunctionExpression("utcminutes", (datetime,)) @classmethod def utcseconds(cls, datetime: IntoExpression, /) -> Expression: """Returns the seconds component for the given ``datetime`` value, in UTC time.""" return FunctionExpression("utcseconds", (datetime,)) @classmethod def utcmilliseconds(cls, datetime: IntoExpression, /) -> Expression: """Returns the milliseconds component for the given ``datetime`` value, in UTC time.""" return FunctionExpression("utcmilliseconds", (datetime,)) @classmethod def utcOffset( cls, unit: IntoExpression, date: IntoExpression, step: IntoExpression = None, / ) -> Expression: """ Returns a new ``Date`` instance that offsets the given ``date`` by the specified time `*unit*`_ in UTC time. The optional ``step`` argument indicates the number of time unit steps to offset by (default 1). .. _*unit*: https://vega.github.io/vega/docs/api/time/#time-units """ return FunctionExpression("utcOffset", (unit, date, step)) @classmethod def utcSequence( cls, unit: IntoExpression, start: IntoExpression, stop: IntoExpression, step: IntoExpression = None, /, ) -> Expression: """ Returns an array of ``Date`` instances from ``start`` (inclusive) to ``stop`` (exclusive), with each entry separated by the given time `*unit*`_ in UTC time. The optional ``step`` argument indicates the number of time unit steps to take between each sequence entry (default 1). .. _*unit*: https://vega.github.io/vega/docs/api/time/#time-units """ return FunctionExpression("utcSequence", (unit, start, stop, step)) @classmethod def extent(cls, array: IntoExpression, /) -> Expression: """Returns a new *[min, max]* array with the minimum and maximum values of the input array, ignoring ``null``, ``undefined``, and ``NaN`` values.""" return FunctionExpression("extent", (array,)) @classmethod def clampRange( cls, range: IntoExpression, min: IntoExpression, max: IntoExpression, / ) -> Expression: """ Clamps a two-element ``range`` array in a span-preserving manner. If the span of the input ``range`` is less than *(max - min)* and an endpoint exceeds either the ``min`` or ``max`` value, the range is translated such that the span is preserved and one endpoint touches the boundary of the *[min, max]* range. If the span exceeds *(max - min)*, the range *[min, max]* is returned. """ return FunctionExpression("clampRange", (range, min, max)) @classmethod def indexof(cls, array: IntoExpression, value: IntoExpression, /) -> Expression: """Returns the first index of ``value`` in the input ``array``.""" return FunctionExpression("indexof", (array, value)) @classmethod def inrange(cls, value: IntoExpression, range: IntoExpression, /) -> Expression: """Tests whether ``value`` lies within (or is equal to either) the first and last values of the ``range`` array.""" return FunctionExpression("inrange", (value, range)) @classmethod def join( cls, array: IntoExpression, separator: IntoExpression = None, / ) -> Expression: """Returns a new string by concatenating all of the elements of the input ``array``, separated by commas or a specified ``separator`` string.""" return FunctionExpression("join", (array, separator)) @classmethod def lastindexof(cls, array: IntoExpression, value: IntoExpression, /) -> Expression: """Returns the last index of ``value`` in the input ``array``.""" return FunctionExpression("lastindexof", (array, value)) @classmethod def length(cls, array: IntoExpression, /) -> Expression: """Returns the length of the input ``array``.""" return FunctionExpression("length", (array,)) @classmethod def lerp(cls, array: IntoExpression, fraction: IntoExpression, /) -> Expression: """ Returns the linearly interpolated value between the first and last entries in the ``array`` for the provided interpolation ``fraction`` (typically between 0 and 1). For example, ``alt.expr.lerp([0, 50], 0.5)`` returns 25. """ return FunctionExpression("lerp", (array, fraction)) @classmethod def peek(cls, array: IntoExpression, /) -> Expression: """ Returns the last element in the input ``array``. Similar to the built-in ``Array.pop`` method, except that it does not remove the last element. This method is a convenient shorthand for ``array[array.length - 1]``. """ return FunctionExpression("peek", (array,)) @classmethod def pluck(cls, array: IntoExpression, field: IntoExpression, /) -> Expression: """ Retrieves the value for the specified ``field`` from a given ``array`` of objects. The input ``field`` string may include nested properties (e.g., ``foo.bar.bz``). """ return FunctionExpression("pluck", (array, field)) @classmethod def reverse(cls, array: IntoExpression, /) -> Expression: """ Returns a new array with elements in a reverse order of the input ``array``. The first array element becomes the last, and the last array element becomes the first. """ return FunctionExpression("reverse", (array,)) @classmethod def sequence(cls, *args: Any) -> Expression: """ Returns an array containing an arithmetic sequence of numbers. If ``step`` is omitted, it defaults to 1. If ``start`` is omitted, it defaults to 0. The ``stop`` value is exclusive; it is not included in the result. If ``step`` is positive, the last element is the largest *start + i * step* less than ``stop``; if ``step`` is negative, the last element is the smallest *start + i * step* greater than ``stop``. If the returned array would contain an infinite number of values, an empty range is returned. The arguments are not required to be integers. """ return FunctionExpression("sequence", args) @classmethod def slice( cls, array: IntoExpression, start: IntoExpression, end: IntoExpression = None, / ) -> Expression: """ Returns a section of ``array`` between the ``start`` and ``end`` indices. If the ``end`` argument is negative, it is treated as an offset from the end of the array (*alt.expr.length(array) + end*). """ return FunctionExpression("slice", (array, start, end)) @classmethod def sort(cls, array: IntoExpression, /) -> Expression: """ Sorts the array in natural order using `ascending from Vega Utils`_. .. _ascending from Vega Utils: https://vega.github.io/vega/docs/api/util/#ascending """ return FunctionExpression("sort", (array,)) @classmethod def span(cls, array: IntoExpression, /) -> Expression: """Returns the span of ``array``: the difference between the last and first elements, or *array[array.length-1] - array[0]*.""" return FunctionExpression("span", (array,)) @classmethod def lower(cls, string: IntoExpression, /) -> Expression: """Transforms ``string`` to lower-case letters.""" return FunctionExpression("lower", (string,)) @classmethod def pad( cls, string: IntoExpression, length: IntoExpression, character: IntoExpression = None, align: IntoExpression = None, /, ) -> Expression: """ Pads a ``string`` value with repeated instances of a ``character`` up to a specified ``length``. If ``character`` is not specified, a space (' ') is used. By default, padding is added to the end of a string. An optional ``align`` parameter specifies if padding should be added to the ``'left'`` (beginning), ``'center'``, or ``'right'`` (end) of the input string. """ return FunctionExpression("pad", (string, length, character, align)) @classmethod def parseFloat(cls, string: IntoExpression, /) -> Expression: """ Parses the input ``string`` to a floating-point value. Same as JavaScript's ``parseFloat``. """ return FunctionExpression("parseFloat", (string,)) @classmethod def parseInt(cls, string: IntoExpression, /) -> Expression: """ Parses the input ``string`` to an integer value. Same as JavaScript's ``parseInt``. """ return FunctionExpression("parseInt", (string,)) @classmethod def replace( cls, string: IntoExpression, pattern: IntoExpression, replacement: IntoExpression, /, ) -> Expression: """ Returns a new string with some or all matches of ``pattern`` replaced by a ``replacement`` string. The ``pattern`` can be a string or a regular expression. If ``pattern`` is a string, only the first instance will be replaced. Same as `JavaScript's String.replace`_. .. _JavaScript's String.replace: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String/replace """ return FunctionExpression("replace", (string, pattern, replacement)) @classmethod def substring( cls, string: IntoExpression, start: IntoExpression, end: IntoExpression = None, /, ) -> Expression: """Returns a section of ``string`` between the ``start`` and ``end`` indices.""" return FunctionExpression("substring", (string, start, end)) @classmethod def trim(cls, string: IntoExpression, /) -> Expression: """Returns a trimmed string with preceding and trailing whitespace removed.""" return FunctionExpression("trim", (string,)) @classmethod def truncate( cls, string: IntoExpression, length: IntoExpression, align: IntoExpression = None, ellipsis: IntoExpression = None, /, ) -> Expression: """ Truncates an input ``string`` to a target ``length``. The optional ``align`` argument indicates what part of the string should be truncated: ``'left'`` (the beginning), ``'center'``, or ``'right'`` (the end). By default, the ``'right'`` end of the string is truncated. The optional ``ellipsis`` argument indicates the string to use to indicate truncated content; by default the ellipsis character ``…`` (``\u2026``) is used. """ return FunctionExpression("truncate", (string, length, align, ellipsis)) @classmethod def upper(cls, string: IntoExpression, /) -> Expression: """Transforms ``string`` to upper-case letters.""" return FunctionExpression("upper", (string,)) @classmethod def btoa(cls, string: IntoExpression, /) -> Expression: """ Creates a `Base64`_-encoded `ASCII`_ string. Same as JavaScript's `Window.alt.expr.btoa()`_. .. _Base64: https://developer.mozilla.org/en-US/docs/Glossary/Base64 .. _ASCII: https://developer.mozilla.org/en-US/docs/Glossary/ASCII .. _Window.alt.expr.btoa(): https://developer.mozilla.org/en-US/docs/Web/API/Window/btoa """ return FunctionExpression("btoa", (string,)) @classmethod def atob(cls, string: IntoExpression, /) -> Expression: """ Decodes an `ASCII`_ string that was encoded with `Base64`_. Same as JavaScript's `Window.alt.expr.atob()`_. .. _ASCII: https://developer.mozilla.org/en-US/docs/Glossary/ASCII .. _Base64: https://developer.mozilla.org/en-US/docs/Glossary/Base64 .. _Window.alt.expr.atob(): https://developer.mozilla.org/en-US/docs/Web/API/Window/atob """ return FunctionExpression("atob", (string,)) @classmethod def merge( cls, object1: IntoExpression, object2: IntoExpression = None, *args: Any ) -> Expression: """ Merges the input objects ``object1``, ``object2``, etc into a new output object. Inputs are visited in sequential order, such that key values from later arguments can overwrite those from earlier arguments. Example: ``alt.expr.merge({a:1, b:2}, {a:3}) -> {a:3, b:2}``. """ return FunctionExpression("merge", (object1, object2, *args)) @classmethod def dayFormat(cls, day: IntoExpression, /) -> Expression: """ Formats a (0-6) *weekday* number as a full week day name, according to the current locale. For example: ``alt.expr.dayFormat(0) -> "Sunday"``. """ return FunctionExpression("dayFormat", (day,)) @classmethod def dayAbbrevFormat(cls, day: IntoExpression, /) -> Expression: """ Formats a (0-6) *weekday* number as an abbreviated week day name, according to the current locale. For example: ``alt.expr.dayAbbrevFormat(0) -> "Sun"``. """ return FunctionExpression("dayAbbrevFormat", (day,)) @classmethod def format(cls, value: IntoExpression, specifier: IntoExpression, /) -> Expression: """ Formats a numeric ``value`` as a string. The ``specifier`` must be a valid `d3-format specifier`_ (e.g., ``alt.expr.format(value, ',.2f')``. Null values are formatted as ``"null"``. .. _d3-format specifier: https://github.com/d3/d3-format/ """ return FunctionExpression("format", (value, specifier)) @classmethod def monthFormat(cls, month: IntoExpression, /) -> Expression: """ Formats a (zero-based) ``month`` number as a full month name, according to the current locale. For example: ``alt.expr.monthFormat(0) -> "January"``. """ return FunctionExpression("monthFormat", (month,)) @classmethod def monthAbbrevFormat(cls, month: IntoExpression, /) -> Expression: """ Formats a (zero-based) ``month`` number as an abbreviated month name, according to the current locale. For example: ``alt.expr.monthAbbrevFormat(0) -> "Jan"``. """ return FunctionExpression("monthAbbrevFormat", (month,)) @classmethod def timeUnitSpecifier( cls, units: IntoExpression, specifiers: IntoExpression = None, / ) -> Expression: """ Returns a time format specifier string for the given time `*units*`_. The optional ``specifiers`` object provides a set of specifier sub-strings for customizing the format; for more, see the `timeUnitSpecifier API documentation`_. The resulting specifier string can then be used as input to the `timeFormat`_ or `utcFormat`_ functions, or as the *format* parameter of an axis or legend. For example: ``alt.expr.timeFormat(date, alt.expr.timeUnitSpecifier('year'))`` or ``alt.expr.timeFormat(date, alt.expr.timeUnitSpecifier(['hours', 'minutes']))``. .. _*units*: https://vega.github.io/vega/docs/api/time/#time-units .. _timeUnitSpecifier API documentation: https://vega.github.io/vega/docs/api/time/#timeUnitSpecifier .. _timeFormat: https://vega.github.io/vega/docs/expressions/#timeFormat .. _utcFormat: https://vega.github.io/vega/docs/expressions/#utcFormat """ return FunctionExpression("timeUnitSpecifier", (units, specifiers)) @classmethod def timeFormat( cls, value: IntoExpression, specifier: IntoExpression, / ) -> Expression: """ Formats a datetime ``value`` (either a ``Date`` object or timestamp) as a string, according to the local time. The ``specifier`` must be a valid `d3-time-format specifier`_ or `TimeMultiFormat object`_. For example: ``alt.expr.timeFormat(timestamp, '%A')``. Null values are formatted as ``"null"``. .. _d3-time-format specifier: https://github.com/d3/d3-time-format/ .. _TimeMultiFormat object: https://vega.github.io/vega/docs/types/#TimeMultiFormat """ return FunctionExpression("timeFormat", (value, specifier)) @classmethod def timeParse( cls, string: IntoExpression, specifier: IntoExpression, / ) -> Expression: """ Parses a ``string`` value to a Date object, according to the local time. The ``specifier`` must be a valid `d3-time-format specifier`_. For example: ``alt.expr.timeParse('June 30, 2015', '%B %d, %Y')``. .. _d3-time-format specifier: https://github.com/d3/d3-time-format/ """ return FunctionExpression("timeParse", (string, specifier)) @classmethod def utcFormat( cls, value: IntoExpression, specifier: IntoExpression, / ) -> Expression: """ Formats a datetime ``value`` (either a ``Date`` object or timestamp) as a string, according to `UTC`_ time. The ``specifier`` must be a valid `d3-time-format specifier`_ or `TimeMultiFormat object`_. For example: ``alt.expr.utcFormat(timestamp, '%A')``. Null values are formatted as ``"null"``. .. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time .. _d3-time-format specifier: https://github.com/d3/d3-time-format/ .. _TimeMultiFormat object: https://vega.github.io/vega/docs/types/#TimeMultiFormat """ return FunctionExpression("utcFormat", (value, specifier)) @classmethod def utcParse( cls, value: IntoExpression, specifier: IntoExpression, / ) -> Expression: """ Parses a *string* value to a Date object, according to `UTC`_ time. The ``specifier`` must be a valid `d3-time-format specifier`_. For example: ``alt.expr.utcParse('June 30, 2015', '%B %d, %Y')``. .. _UTC: https://en.wikipedia.org/wiki/Coordinated_Universal_Time .. _d3-time-format specifier: https://github.com/d3/d3-time-format/ """ return FunctionExpression("utcParse", (value, specifier)) @classmethod def regexp( cls, pattern: IntoExpression, flags: IntoExpression = None, / ) -> Expression: """ Creates a regular expression instance from an input ``pattern`` string and optional ``flags``. Same as `JavaScript's RegExp`_. .. _JavaScript's RegExp: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/RegExp """ return FunctionExpression("regexp", (pattern, flags)) @classmethod def test( cls, regexp: IntoExpression, string: IntoExpression = None, / ) -> Expression: r""" Evaluates a regular expression ``regexp`` against the input ``string``, returning ``true`` if the string matches the pattern, ``false`` otherwise. For example: ``alt.expr.test(/\\d{3}/, "32-21-9483") -> true``. """ return FunctionExpression("test", (regexp, string)) @classmethod def rgb(cls, *args: Any) -> Expression: """ Constructs a new `RGB`_ color. If ``r``, ``g`` and ``b`` are specified, these represent the channel values of the returned color; an ``opacity`` may also be specified. If a CSS Color Module Level 3 *specifier* string is specified, it is parsed and then converted to the RGB color space. Uses `d3-color's rgb function`_. .. _RGB: https://en.wikipedia.org/wiki/RGB_color_model .. _d3-color's rgb function: https://github.com/d3/d3-color#rgb """ return FunctionExpression("rgb", args) @classmethod def hsl(cls, *args: Any) -> Expression: """ Constructs a new `HSL`_ color. If ``h``, ``s`` and ``l`` are specified, these represent the channel values of the returned color; an ``opacity`` may also be specified. If a CSS Color Module Level 3 *specifier* string is specified, it is parsed and then converted to the HSL color space. Uses `d3-color's hsl function`_. .. _HSL: https://en.wikipedia.org/wiki/HSL_and_HSV .. _d3-color's hsl function: https://github.com/d3/d3-color#hsl """ return FunctionExpression("hsl", args) @classmethod def lab(cls, *args: Any) -> Expression: """ Constructs a new `CIE LAB`_ color. If ``l``, ``a`` and ``b`` are specified, these represent the channel values of the returned color; an ``opacity`` may also be specified. If a CSS Color Module Level 3 *specifier* string is specified, it is parsed and then converted to the LAB color space. Uses `d3-color's lab function`_. .. _CIE LAB: https://en.wikipedia.org/wiki/Lab_color_space#CIELAB .. _d3-color's lab function: https://github.com/d3/d3-color#lab """ return FunctionExpression("lab", args) @classmethod def hcl(cls, *args: Any) -> Expression: """ Constructs a new `HCL`_ (hue, chroma, luminance) color. If ``h``, ``c`` and ``l`` are specified, these represent the channel values of the returned color; an ``opacity`` may also be specified. If a CSS Color Module Level 3 *specifier* string is specified, it is parsed and then converted to the HCL color space. Uses `d3-color's hcl function`_. .. _HCL: https://en.wikipedia.org/wiki/Lab_color_space#CIELAB .. _d3-color's hcl function: https://github.com/d3/d3-color#hcl """ return FunctionExpression("hcl", args) @classmethod def luminance(cls, specifier: IntoExpression, /) -> Expression: """ Returns the luminance for the given color ``specifier`` (compatible with `d3-color's rgb function`_). The luminance is calculated according to the `W3C Web Content Accessibility Guidelines`_. .. _d3-color's rgb function: https://github.com/d3/d3-color#rgb .. _W3C Web Content Accessibility Guidelines: https://www.w3.org/TR/2008/REC-WCAG20-20081211/#relativeluminancedef """ return FunctionExpression("luminance", (specifier,)) @classmethod def contrast( cls, specifier1: IntoExpression, specifier2: IntoExpression, / ) -> Expression: """ Returns the contrast ratio between the input color specifiers as a float between 1 and 21. The contrast is calculated according to the `W3C Web Content Accessibility Guidelines`_. .. _W3C Web Content Accessibility Guidelines: https://www.w3.org/TR/2008/REC-WCAG20-20081211/#contrast-ratiodef """ return FunctionExpression("contrast", (specifier1, specifier2)) @classmethod def item(cls) -> Expression: """Returns the current scenegraph item that is the target of the event.""" return FunctionExpression("item", ()) @classmethod def group(cls, name: IntoExpression = None, /) -> Expression: """ Returns the scenegraph group mark item in which the current event has occurred. If no arguments are provided, the immediate parent group is returned. If a group name is provided, the matching ancestor group item is returned. """ return FunctionExpression("group", (name,)) @classmethod def xy(cls, item: IntoExpression = None, /) -> Expression: """ Returns the x- and y-coordinates for the current event as a two-element array. If no arguments are provided, the top-level coordinate space of the view is used. If a scenegraph ``item`` (or string group name) is provided, the coordinate space of the group item is used. """ return FunctionExpression("xy", (item,)) @classmethod def x(cls, item: IntoExpression = None, /) -> Expression: """ Returns the x coordinate for the current event. If no arguments are provided, the top-level coordinate space of the view is used. If a scenegraph ``item`` (or string group name) is provided, the coordinate space of the group item is used. """ return FunctionExpression("x", (item,)) @classmethod def y(cls, item: IntoExpression = None, /) -> Expression: """ Returns the y coordinate for the current event. If no arguments are provided, the top-level coordinate space of the view is used. If a scenegraph ``item`` (or string group name) is provided, the coordinate space of the group item is used. """ return FunctionExpression("y", (item,)) @classmethod def pinchDistance(cls, event: IntoExpression, /) -> Expression: """Returns the pixel distance between the first two touch points of a multi-touch event.""" return FunctionExpression("pinchDistance", (event,)) @classmethod def pinchAngle(cls, event: IntoExpression, /) -> Expression: """Returns the angle of the line connecting the first two touch points of a multi-touch event.""" return FunctionExpression("pinchAngle", (event,)) @classmethod def inScope(cls, item: IntoExpression, /) -> Expression: """Returns true if the given scenegraph ``item`` is a descendant of the group mark in which the event handler was defined, false otherwise.""" return FunctionExpression("inScope", (item,)) @classmethod def data(cls, name: IntoExpression, /) -> Expression: """ Returns the array of data objects for the Vega data set with the given ``name``. If the data set is not found, returns an empty array. """ return FunctionExpression("data", (name,)) @classmethod def indata( cls, name: IntoExpression, field: IntoExpression, value: IntoExpression, / ) -> Expression: """ Tests if the data set with a given ``name`` contains a datum with a ``field`` value that matches the input ``value``. For example: ``alt.expr.indata('table', 'category', value)``. """ return FunctionExpression("indata", (name, field, value)) @classmethod def scale( cls, name: IntoExpression, value: IntoExpression, group: IntoExpression = None, /, ) -> Expression: """ Applies the named scale transform (or projection) to the specified ``value``. The optional ``group`` argument takes a scenegraph group mark item to indicate the specific scope in which to look up the scale or projection. """ return FunctionExpression("scale", (name, value, group)) @classmethod def invert( cls, name: IntoExpression, value: IntoExpression, group: IntoExpression = None, /, ) -> Expression: """ Inverts the named scale transform (or projection) for the specified ``value``. The optional ``group`` argument takes a scenegraph group mark item to indicate the specific scope in which to look up the scale or projection. """ return FunctionExpression("invert", (name, value, group)) @classmethod def copy(cls, name: IntoExpression, group: IntoExpression = None, /) -> Expression: # type: ignore[override] """ Returns a copy (a new cloned instance) of the named scale transform of projection, or ``undefined`` if no scale or projection is found. The optional ``group`` argument takes a scenegraph group mark item to indicate the specific scope in which to look up the scale or projection. """ return FunctionExpression("copy", (name, group)) @classmethod def domain( cls, name: IntoExpression, group: IntoExpression = None, / ) -> Expression: """ Returns the scale domain array for the named scale transform, or an empty array if the scale is not found. The optional ``group`` argument takes a scenegraph group mark item to indicate the specific scope in which to look up the scale. """ return FunctionExpression("domain", (name, group)) @classmethod def range(cls, name: IntoExpression, group: IntoExpression = None, /) -> Expression: """ Returns the scale range array for the named scale transform, or an empty array if the scale is not found. The optional ``group`` argument takes a scenegraph group mark item to indicate the specific scope in which to look up the scale. """ return FunctionExpression("range", (name, group)) @classmethod def bandwidth( cls, name: IntoExpression, group: IntoExpression = None, / ) -> Expression: """ Returns the current band width for the named band scale transform, or zero if the scale is not found or is not a band scale. The optional ``group`` argument takes a scenegraph group mark item to indicate the specific scope in which to look up the scale. """ return FunctionExpression("bandwidth", (name, group)) @classmethod def bandspace( cls, count: IntoExpression, paddingInner: IntoExpression = None, paddingOuter: IntoExpression = None, /, ) -> Expression: """ Returns the number of steps needed within a band scale, based on the ``count`` of domain elements and the inner and outer padding values. While normally calculated within the scale itself, this function can be helpful for determining the size of a chart's layout. """ return FunctionExpression("bandspace", (count, paddingInner, paddingOuter)) @classmethod def gradient( cls, scale: IntoExpression, p0: IntoExpression, p1: IntoExpression, count: IntoExpression = None, /, ) -> Expression: """ Returns a linear color gradient for the ``scale`` (whose range must be a `continuous color scheme`_) and starting and ending points ``p0`` and ``p1``, each an *[x, y]* array. The points ``p0`` and ``p1`` should be expressed in normalized coordinates in the domain [0, 1], relative to the bounds of the item being colored. If unspecified, ``p0`` defaults to ``[0, 0]`` and ``p1`` defaults to ``[1, 0]``, for a horizontal gradient that spans the full bounds of an item. The optional ``count`` argument indicates a desired target number of sample points to take from the color scale. .. _continuous color scheme: https://vega.github.io/vega/docs/schemes """ return FunctionExpression("gradient", (scale, p0, p1, count)) @classmethod def panLinear(cls, domain: IntoExpression, delta: IntoExpression, /) -> Expression: """ Given a linear scale ``domain`` array with numeric or datetime values, returns a new two-element domain array that is the result of panning the domain by a fractional ``delta``. The ``delta`` value represents fractional units of the scale range; for example, ``0.5`` indicates panning the scale domain to the right by half the scale range. """ return FunctionExpression("panLinear", (domain, delta)) @classmethod def panLog(cls, domain: IntoExpression, delta: IntoExpression, /) -> Expression: """ Given a log scale ``domain`` array with numeric or datetime values, returns a new two-element domain array that is the result of panning the domain by a fractional ``delta``. The ``delta`` value represents fractional units of the scale range; for example, ``0.5`` indicates panning the scale domain to the right by half the scale range. """ return FunctionExpression("panLog", (domain, delta)) @classmethod def panPow( cls, domain: IntoExpression, delta: IntoExpression, exponent: IntoExpression, / ) -> Expression: """ Given a power scale ``domain`` array with numeric or datetime values and the given ``exponent``, returns a new two-element domain array that is the result of panning the domain by a fractional ``delta``. The ``delta`` value represents fractional units of the scale range; for example, ``0.5`` indicates panning the scale domain to the right by half the scale range. """ return FunctionExpression("panPow", (domain, delta, exponent)) @classmethod def panSymlog( cls, domain: IntoExpression, delta: IntoExpression, constant: IntoExpression, / ) -> Expression: """ Given a symmetric log scale ``domain`` array with numeric or datetime values parameterized by the given ``constant``, returns a new two-element domain array that is the result of panning the domain by a fractional ``delta``. The ``delta`` value represents fractional units of the scale range; for example, ``0.5`` indicates panning the scale domain to the right by half the scale range. """ return FunctionExpression("panSymlog", (domain, delta, constant)) @classmethod def zoomLinear( cls, domain: IntoExpression, anchor: IntoExpression, scaleFactor: IntoExpression, /, ) -> Expression: """ Given a linear scale ``domain`` array with numeric or datetime values, returns a new two-element domain array that is the result of zooming the domain by a ``scaleFactor``, centered at the provided fractional ``anchor``. The ``anchor`` value represents the zoom position in terms of fractional units of the scale range; for example, ``0.5`` indicates a zoom centered on the mid-point of the scale range. """ return FunctionExpression("zoomLinear", (domain, anchor, scaleFactor)) @classmethod def zoomLog( cls, domain: IntoExpression, anchor: IntoExpression, scaleFactor: IntoExpression, /, ) -> Expression: """ Given a log scale ``domain`` array with numeric or datetime values, returns a new two-element domain array that is the result of zooming the domain by a ``scaleFactor``, centered at the provided fractional ``anchor``. The ``anchor`` value represents the zoom position in terms of fractional units of the scale range; for example, ``0.5`` indicates a zoom centered on the mid-point of the scale range. """ return FunctionExpression("zoomLog", (domain, anchor, scaleFactor)) @classmethod def zoomPow( cls, domain: IntoExpression, anchor: IntoExpression, scaleFactor: IntoExpression, exponent: IntoExpression, /, ) -> Expression: """ Given a power scale ``domain`` array with numeric or datetime values and the given ``exponent``, returns a new two-element domain array that is the result of zooming the domain by a ``scaleFactor``, centered at the provided fractional ``anchor``. The ``anchor`` value represents the zoom position in terms of fractional units of the scale range; for example, ``0.5`` indicates a zoom centered on the mid-point of the scale range. """ return FunctionExpression("zoomPow", (domain, anchor, scaleFactor, exponent)) @classmethod def zoomSymlog( cls, domain: IntoExpression, anchor: IntoExpression, scaleFactor: IntoExpression, constant: IntoExpression, /, ) -> Expression: """ Given a symmetric log scale ``domain`` array with numeric or datetime values parameterized by the given ``constant``, returns a new two-element domain array that is the result of zooming the domain by a ``scaleFactor``, centered at the provided fractional ``anchor``. The ``anchor`` value represents the zoom position in terms of fractional units of the scale range; for example, ``0.5`` indicates a zoom centered on the mid-point of the scale range. """ return FunctionExpression("zoomSymlog", (domain, anchor, scaleFactor, constant)) @classmethod def geoArea( cls, projection: IntoExpression, feature: IntoExpression, group: IntoExpression = None, /, ) -> Expression: """ Returns the projected planar area (typically in square pixels) of a GeoJSON ``feature`` according to the named ``projection``. If the ``projection`` argument is ``null``, computes the spherical area in steradians using unprojected longitude, latitude coordinates. The optional ``group`` argument takes a scenegraph group mark item to indicate the specific scope in which to look up the projection. Uses d3-geo's `geoArea`_ and `path.area`_ methods. .. _geoArea: https://github.com/d3/d3-geo#geoArea .. _path.area: https://github.com/d3/d3-geo#path_area """ return FunctionExpression("geoArea", (projection, feature, group)) @classmethod def geoBounds( cls, projection: IntoExpression, feature: IntoExpression, group: IntoExpression = None, /, ) -> Expression: """ Returns the projected planar bounding box (typically in pixels) for the specified GeoJSON ``feature``, according to the named ``projection``. The bounding box is represented by a two-dimensional array: [[*x₀*, *y₀*], [*x₁*, *y₁*]], where *x₀* is the minimum x-coordinate, *y₀* is the minimum y-coordinate, *x₁* is the maximum x-coordinate, and *y₁* is the maximum y-coordinate. If the ``projection`` argument is ``null``, computes the spherical bounding box using unprojected longitude, latitude coordinates. The optional ``group`` argument takes a scenegraph group mark item to indicate the specific scope in which to look up the projection. Uses d3-geo's `geoBounds`_ and `path.bounds`_ methods. .. _geoBounds: https://github.com/d3/d3-geo#geoBounds .. _path.bounds: https://github.com/d3/d3-geo#path_bounds """ return FunctionExpression("geoBounds", (projection, feature, group)) @classmethod def geoCentroid( cls, projection: IntoExpression, feature: IntoExpression, group: IntoExpression = None, /, ) -> Expression: """ Returns the projected planar centroid (typically in pixels) for the specified GeoJSON ``feature``, according to the named ``projection``. If the ``projection`` argument is ``null``, computes the spherical centroid using unprojected longitude, latitude coordinates. The optional ``group`` argument takes a scenegraph group mark item to indicate the specific scope in which to look up the projection. Uses d3-geo's `geoCentroid`_ and `path.centroid`_ methods. .. _geoCentroid: https://github.com/d3/d3-geo#geoCentroid .. _path.centroid: https://github.com/d3/d3-geo#path_centroid """ return FunctionExpression("geoCentroid", (projection, feature, group)) @classmethod def geoScale( cls, projection: IntoExpression, group: IntoExpression = None, / ) -> Expression: """ Returns the scale value for the named ``projection``. The optional ``group`` argument takes a scenegraph group mark item to indicate the specific scope in which to look up the projection. """ return FunctionExpression("geoScale", (projection, group)) @classmethod def treePath( cls, name: IntoExpression, source: IntoExpression, target: IntoExpression, / ) -> Expression: """ For the hierarchy data set with the given ``name``, returns the shortest path through from the ``source`` node id to the ``target`` node id. The path starts at the ``source`` node, ascends to the least common ancestor of the ``source`` node and the ``target`` node, and then descends to the ``target`` node. """ return FunctionExpression("treePath", (name, source, target)) @classmethod def treeAncestors(cls, name: IntoExpression, node: IntoExpression, /) -> Expression: """For the hierarchy data set with the given ``name``, returns the array of ancestors nodes, starting with the input ``node``, then followed by each parent up to the root.""" return FunctionExpression("treeAncestors", (name, node)) @classmethod def containerSize(cls) -> Expression: """ Returns the current CSS box size (``[el.clientWidth, el.clientHeight]``) of the parent DOM element that contains the Vega view. If there is no container element, returns ``[undefined, undefined]``. """ return FunctionExpression("containerSize", ()) @classmethod def screen(cls) -> Expression: """ Returns the `window.screen`_ object, or ``{}`` if Vega is not running in a browser environment. .. _window.screen: https://developer.mozilla.org/en-US/docs/Web/API/Window/screen """ return FunctionExpression("screen", ()) @classmethod def windowSize(cls) -> Expression: """Returns the current window size (``[window.innerWidth, window.innerHeight]``) or ``[undefined, undefined]`` if Vega is not running in a browser environment.""" return FunctionExpression("windowSize", ()) @classmethod def warn( cls, value1: IntoExpression, value2: IntoExpression = None, *args: Any ) -> Expression: """ Logs a warning message and returns the last argument. For the message to appear in the console, the visualization view must have the appropriate logging level set. """ return FunctionExpression("warn", (value1, value2, *args)) @classmethod def info( cls, value1: IntoExpression, value2: IntoExpression = None, *args: Any ) -> Expression: """ Logs an informative message and returns the last argument. For the message to appear in the console, the visualization view must have the appropriate logging level set. """ return FunctionExpression("info", (value1, value2, *args)) @classmethod def debug( cls, value1: IntoExpression, value2: IntoExpression = None, *args: Any ) -> Expression: """ Logs a debugging message and returns the last argument. For the message to appear in the console, the visualization view must have the appropriate logging level set. """ return FunctionExpression("debug", (value1, value2, *args)) _ExprType = expr # NOTE: Compatibility alias for previous type of `alt.expr`. # `_ExprType` was not referenced in any internal imports/tests.
expr
python
huggingface__transformers
src/transformers/cache_utils.py
{ "start": 52131, "end": 60193 }
class ____(Cache): """ Base, abstract class for all encoder-decoder caches. Can be used to hold combinations of self-attention and cross-attention caches. See `Cache` for details on common methods that are implemented by all cache classes. Args: caches (`Iterable`): Usually an iterable of length 2, containing 2 `Cache` objects, the first one for self-attention, the second one for cross-attention. Can optionally also be an iterable of length 1, containing a `tuple[tuple[torch.Tensor]]` (usually used for compatibility with torch dp and ddp). Example: ```python >>> from transformers import AutoProcessor, AutoModelForCausalLM, DynamicCache, EncoderDecoderCache >>> model = AutoModelForCausalLM.from_pretrained("openai/whisper-small") >>> processor = AutoProcessor.from_pretrained("openai/whisper-small") >>> inputs = processor(audio=YOUR-AUDIO, return_tensors="pt") >>> # Prepare cache classes for encoder and decoder and pass it to model's forward >>> self_attention_cache = DynamicCache(config=self.config) >>> cross_attention_cache = DynamicCache(config=self.config) >>> past_key_values = EncoderDecoderCache(self_attention_cache, cross_attention_cache) >>> outputs = model(**inputs, past_key_values=past_key_values, use_cache=True) >>> outputs.past_key_values # access cache filled with key/values from generation EncoderDecoderCache() ``` """ def __init__(self, *caches) -> None: # For dp and ddp support, if only one argument is passed, it should be an iterable of DynamicCache ddp data if len(caches) == 1: self_attention_cache_data, cross_attention_cache_data = [], [] for combined_cache_data in caches[0]: if len(combined_cache_data) == 6: # two tuple of style (self_attn_k, self_attn_v, self_attn_sliding) self_attention_cache_data.append(combined_cache_data[:3]) cross_attention_cache_data.append(combined_cache_data[3:]) # To support old DDP-style init, we handle the case where the tuple has no sliding window tensor elif len(combined_cache_data) == 4: # two tuple of style (self_attn_k, self_attn_v) self_attention_cache_data.append(combined_cache_data[:2]) cross_attention_cache_data.append(combined_cache_data[2:]) else: raise ValueError(f"Expected {len(combined_cache_data) = } to be 4 or 6.\n{combined_cache_data = }") self.self_attention_cache = DynamicCache(self_attention_cache_data) self.cross_attention_cache = DynamicCache(cross_attention_cache_data) # Otherwise, we should get two arguments, a self-attention cache and a cross-attention cache elif len(caches) == 2: if not isinstance(caches[0], Cache) or not isinstance(caches[1], Cache): raise TypeError(f"One of the two arguments is not a Cache: {type(caches[0]) = }, {type(caches[1]) = }") self.self_attention_cache = caches[0] self.cross_attention_cache = caches[1] # Error case else: raise ValueError(f"Expected 1 or 2 arguments, got {len(caches)}") self.is_updated = {} for layer_idx in range(len(self.cross_attention_cache)): self.is_updated[layer_idx] = bool(self.cross_attention_cache.get_seq_length(layer_idx) > 0) def __iter__(self): """Returns tuples of style (self_attn_k, self_attn_v, self_attn_sliding, cross_attn_k, cross_attn_v, cross_attn_sliding)""" for self_attention_layer, cross_attention_layer in zip(self.self_attention_cache, self.cross_attention_cache): yield self_attention_layer + cross_attention_layer def __repr__(self) -> str: return ( f"{self.__class__.__name__}(self_attention_cache={self.self_attention_cache}, cross_attention_cache=" f"{self.cross_attention_cache})" ) def __len__(self): """ Support for backwards-compatible `past_key_values` length, e.g. `len(past_key_values)`. This value corresponds to the number of layers in the model. """ return len(self.self_attention_cache) def get_seq_length(self, layer_idx: int = 0) -> int: """Returns the sequence length of the cached states. A layer index can be optionally passed.""" return self.self_attention_cache.get_seq_length(layer_idx) def reset(self): self.self_attention_cache.reset() self.cross_attention_cache.reset() for layer_idx in self.is_updated: self.is_updated[layer_idx] = False def reorder_cache(self, beam_idx: torch.LongTensor): """Reorders the cache for beam search, given the selected beam indices.""" self.self_attention_cache.reorder_cache(beam_idx) self.cross_attention_cache.reorder_cache(beam_idx) def check_dynamic_cache(self, method: str): if not ( isinstance(self.self_attention_cache, DynamicCache) and isinstance(self.cross_attention_cache, DynamicCache) ): raise TypeError( f"`{method}` is only defined for dynamic cache, got {self.self_attention_cache.__str__()} for the self " f"attention cache and {self.cross_attention_cache.__str__()} for the cross attention cache." ) # TODO(gante, sanchit-gandhi): move following functionality into `.generate` def crop(self, maximum_length: int): """ Crop the past key values up to a new `maximum_length` in terms of tokens. `maximum_length` can also be negative to remove `maximum_length` tokens. This is used in assisted decoding and contrastive search (on the Hub). """ self.check_dynamic_cache(self.crop.__name__) self.self_attention_cache.crop(maximum_length) def batch_split(self, full_batch_size: int, split_size: int) -> "list[EncoderDecoderCache]": """ Split the current instance into a list of `DynamicCache` by the batch size. This will be used by `_split_model_inputs()` in `generation.utils` """ self.check_dynamic_cache(self.batch_split.__name__) self_attention_cache = self.self_attention_cache.batch_split(full_batch_size, split_size) cross_attention_cache = self.cross_attention_cache.batch_split(full_batch_size, split_size) out = [] for self_attn, cross_attn in zip(self_attention_cache, cross_attention_cache): out.append(EncoderDecoderCache(self_attn, cross_attn)) return out def batch_repeat_interleave(self, repeats: int): """Repeat the cache `repeats` times in the batch dimension. Used in contrastive search (on the Hub).""" self.check_dynamic_cache(self.batch_repeat_interleave.__name__) self.self_attention_cache.batch_repeat_interleave(repeats) self.cross_attention_cache.batch_repeat_interleave(repeats) def batch_select_indices(self, indices: torch.Tensor): """Only keep the `indices` in the batch dimension of the cache. Used in contrastive search (on the Hub).""" self.check_dynamic_cache(self.batch_select_indices.__name__) self.self_attention_cache.batch_select_indices(indices) self.cross_attention_cache.batch_select_indices(indices) def get_max_cache_shape(self) -> int: """Returns the maximum sequence length (i.e. max capacity) of the cache object""" return self.self_attention_cache.get_max_cache_shape() def get_mask_sizes(self, cache_position: torch.Tensor, layer_idx: int) -> tuple[int, int]: return self.self_attention_cache.get_mask_sizes(cache_position, layer_idx) @property def is_sliding(self): return self.self_attention_cache.is_sliding @property def is_compileable(self) -> bool: return self.self_attention_cache.is_compileable ### Deprecated classes
EncoderDecoderCache
python
huggingface__transformers
src/transformers/models/megatron_bert/modeling_megatron_bert.py
{ "start": 15590, "end": 19355 }
class ____(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([MegatronBertLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]) # The final layer norm. We removed the 1st LN, moved LN to each hidden layer and this one # is simply the final LN (Transformer's BERT has it attached to each hidden layer). self.ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Cache] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, BaseModelOutputWithPastAndCrossAttentions]: if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False if use_cache and past_key_values is None: past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config)) all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, output_attentions, cache_position, ) # Because we moved the layer-norm at the end of the hidden layer, we have non-normali- # zed data here. If that's really needed, we must apply LN to match Transformer's BERT. hidden_states = layer_outputs[0] if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) # Finalize the hidden states. hidden_states = self.ln(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->MegatronBert
MegatronBertEncoder
python
keras-team__keras
keras/src/regularizers/regularizers.py
{ "start": 7945, "end": 8769 }
class ____(Regularizer): """A regularizer that applies a L2 regularization penalty. The L2 regularization penalty is computed as: `loss = l2 * reduce_sum(square(x))` L2 may be passed to a layer as a string identifier: >>> dense = Dense(3, kernel_regularizer='l2') In this case, the default value used is `l2=0.01`. Arguments: l2: float, L2 regularization factor. """ def __init__(self, l2=0.01): l2 = 0.01 if l2 is None else l2 validate_float_arg(l2, name="l2") self.l2 = l2 def __call__(self, x): return self.l2 * ops.sum(ops.square(x)) def get_config(self): return {"l2": float(self.l2)} @keras_export( [ "keras.regularizers.OrthogonalRegularizer", "keras.regularizers.orthogonal_regularizer", ] )
L2
python
pyqtgraph__pyqtgraph
pyqtgraph/widgets/ScatterPlotWidget.py
{ "start": 403, "end": 11483 }
class ____(QtWidgets.QSplitter): """ This is a high-level widget for exploring relationships in tabular data. Given a multi-column record array, the widget displays a scatter plot of a specific subset of the data. Includes controls for selecting the columns to plot, filtering data, and determining symbol color and shape. The widget consists of four components: 1) A list of column names from which the user may select 1 or 2 columns to plot. If one column is selected, the data for that column will be plotted in a histogram-like manner by using :func:`pseudoScatter() <pyqtgraph.pseudoScatter>`. If two columns are selected, then the scatter plot will be generated with x determined by the first column that was selected and y by the second. 2) A DataFilter that allows the user to select a subset of the data by specifying multiple selection criteria. 3) A ColorMap that allows the user to determine how points are colored by specifying multiple criteria. 4) A PlotWidget for displaying the data. """ sigScatterPlotClicked = QtCore.Signal(object, object, object) sigScatterPlotHovered = QtCore.Signal(object, object, object) def __init__(self, parent=None): QtWidgets.QSplitter.__init__(self, QtCore.Qt.Orientation.Horizontal) self.ctrlPanel = QtWidgets.QSplitter(QtCore.Qt.Orientation.Vertical) self.addWidget(self.ctrlPanel) self.fieldList = QtWidgets.QListWidget() self.fieldList.setSelectionMode(self.fieldList.SelectionMode.ExtendedSelection) self.ptree = ptree.ParameterTree(showHeader=False) self.filter = DataFilterParameter() self.colorMap = ColorMapParameter() self.params = ptree.Parameter.create(name='params', type='group', children=[self.filter, self.colorMap]) self.ptree.setParameters(self.params, showTop=False) self.plot = PlotWidget() self.ctrlPanel.addWidget(self.fieldList) self.ctrlPanel.addWidget(self.ptree) self.addWidget(self.plot) fg = fn.mkColor(getConfigOption('foreground')) fg.setAlpha(150) self.filterText = TextItem(border=getConfigOption('foreground'), color=fg) self.filterText.setPos(60,20) self.filterText.setParentItem(self.plot.plotItem) self.data = None self.indices = None self.mouseOverField = None self.scatterPlot = None self.selectionScatter = None self.selectedIndices = [] self.style = dict(pen=None, symbol='o') self._visibleXY = None # currently plotted points self._visibleData = None # currently plotted records self._visibleIndices = None self._indexMap = None self.fieldList.itemSelectionChanged.connect(self.fieldSelectionChanged) self.filter.sigFilterChanged.connect(self.filterChanged) self.colorMap.sigColorMapChanged.connect(self.updatePlot) def setFields(self, fields, mouseOverField=None): """ Set the list of field names/units to be processed. The format of *fields* is the same as used by :meth:`~pyqtgraph.widgets.ColorMapWidget.ColorMapParameter.setFields` """ self.fields = OrderedDict(fields) self.mouseOverField = mouseOverField self.fieldList.clear() for f,opts in fields: item = QtWidgets.QListWidgetItem(f) item.opts = opts item = self.fieldList.addItem(item) self.filter.setFields(fields) self.colorMap.setFields(fields) def setSelectedFields(self, *fields): self.fieldList.itemSelectionChanged.disconnect(self.fieldSelectionChanged) try: self.fieldList.clearSelection() for f in fields: i = list(self.fields.keys()).index(f) item = self.fieldList.item(i) item.setSelected(True) finally: self.fieldList.itemSelectionChanged.connect(self.fieldSelectionChanged) self.fieldSelectionChanged() def setData(self, data): """ Set the data to be processed and displayed. Argument must be a numpy record array. """ self.data = data self.indices = np.arange(len(data)) self.filtered = None self.filteredIndices = None self.updatePlot() def setSelectedIndices(self, inds): """Mark the specified indices as selected. Must be a sequence of integers that index into the array given in setData(). """ self.selectedIndices = inds self.updateSelected() def setSelectedPoints(self, points): """Mark the specified points as selected. Must be a list of points as generated by the sigScatterPlotClicked signal. """ self.setSelectedIndices([pt.originalIndex for pt in points]) def fieldSelectionChanged(self): sel = self.fieldList.selectedItems() if len(sel) > 2: self.fieldList.blockSignals(True) try: for item in sel[1:-1]: item.setSelected(False) finally: self.fieldList.blockSignals(False) self.updatePlot() def filterChanged(self, f): self.filtered = None self.updatePlot() desc = self.filter.describe() if len(desc) == 0: self.filterText.setVisible(False) else: self.filterText.setText('\n'.join(desc)) self.filterText.setVisible(True) def updatePlot(self): self.plot.clear() if self.data is None or len(self.data) == 0: return if self.filtered is None: mask = self.filter.generateMask(self.data) self.filtered = self.data[mask] self.filteredIndices = self.indices[mask] data = self.filtered if len(data) == 0: return colors = np.array([fn.mkBrush(*x) for x in self.colorMap.map(data)]) style = self.style.copy() ## Look up selected columns and units sel = list([str(item.text()) for item in self.fieldList.selectedItems()]) units = list([item.opts.get('units', '') for item in self.fieldList.selectedItems()]) if len(sel) == 0: self.plot.setTitle('') return if len(sel) == 1: self.plot.setLabels(left=('N', ''), bottom=(sel[0], units[0]), title='') if len(data) == 0: return #x = data[sel[0]] #y = None xy = [data[sel[0]], None] elif len(sel) == 2: self.plot.setLabels(left=(sel[1],units[1]), bottom=(sel[0],units[0])) if len(data) == 0: return xy = [data[sel[0]], data[sel[1]]] #xydata = [] #for ax in [0,1]: #d = data[sel[ax]] ### scatter catecorical values just a bit so they show up better in the scatter plot. ##if sel[ax] in ['MorphologyBSMean', 'MorphologyTDMean', 'FIType']: ##d += np.random.normal(size=len(cells), scale=0.1) #xydata.append(d) #x,y = xydata ## convert enum-type fields to float, set axis labels enum = [False, False] for i in [0,1]: axis = self.plot.getAxis(['bottom', 'left'][i]) if xy[i] is not None and (self.fields[sel[i]].get('mode', None) == 'enum' or xy[i].dtype.kind in ('S', 'O')): vals = self.fields[sel[i]].get('values', list(set(xy[i]))) xy[i] = np.array([vals.index(x) if x in vals else len(vals) for x in xy[i]], dtype=float) axis.setTicks([list(enumerate(vals))]) enum[i] = True else: axis.setTicks(None) # reset to automatic ticking ## mask out any nan values mask = np.ones(len(xy[0]), dtype=bool) if xy[0].dtype.kind == 'f': mask &= np.isfinite(xy[0]) if xy[1] is not None and xy[1].dtype.kind == 'f': mask &= np.isfinite(xy[1]) xy[0] = xy[0][mask] style['symbolBrush'] = colors[mask] data = data[mask] indices = self.filteredIndices[mask] ## Scatter y-values for a histogram-like appearance if xy[1] is None: ## column scatter plot xy[1] = fn.pseudoScatter(xy[0]) else: ## beeswarm plots xy[1] = xy[1][mask] for ax in [0,1]: if not enum[ax]: continue imax = int(xy[ax].max()) if len(xy[ax]) > 0 else 0 for i in range(imax+1): keymask = xy[ax] == i scatter = fn.pseudoScatter(xy[1-ax][keymask], bidir=True) if len(scatter) == 0: continue smax = np.abs(scatter).max() if smax != 0: scatter *= 0.2 / smax xy[ax][keymask] += scatter if self.scatterPlot is not None: try: self.scatterPlot.sigPointsClicked.disconnect(self.plotClicked) except: pass self._visibleXY = xy self._visibleData = data self._visibleIndices = indices self._indexMap = None self.scatterPlot = self.plot.plot(xy[0], xy[1], data=data, **style) self.scatterPlot.sigPointsClicked.connect(self.plotClicked) self.scatterPlot.sigPointsHovered.connect(self.plotHovered) self.updateSelected() def updateSelected(self): if self._visibleXY is None: return # map from global index to visible index indMap = self._getIndexMap() inds = [indMap[i] for i in self.selectedIndices if i in indMap] x,y = self._visibleXY[0][inds], self._visibleXY[1][inds] if self.selectionScatter is not None: self.plot.plotItem.removeItem(self.selectionScatter) if len(x) == 0: return self.selectionScatter = self.plot.plot(x, y, pen=None, symbol='s', symbolSize=12, symbolBrush=None, symbolPen='y') def _getIndexMap(self): # mapping from original data index to visible point index if self._indexMap is None: self._indexMap = {j:i for i,j in enumerate(self._visibleIndices)} return self._indexMap def plotClicked(self, plot, points, ev): # Tag each point with its index into the original dataset for pt in points: pt.originalIndex = self._visibleIndices[pt.index()] self.sigScatterPlotClicked.emit(self, points, ev) def plotHovered(self, plot, points, ev): self.sigScatterPlotHovered.emit(self, points, ev)
ScatterPlotWidget
python
h5py__h5py
examples/swmr_inotify_example.py
{ "start": 924, "end": 2673 }
class ____(pyinotify.ProcessEvent): def monitor_dataset(self, filename, datasetname): logging.info("Opening file %s", filename) self.f = h5py.File(filename, 'r', libver='latest', swmr=True) logging.debug("Looking up dataset %s"%datasetname) self.dset = self.f[datasetname] self.get_dset_shape() def get_dset_shape(self): logging.debug("Refreshing dataset") self.dset.refresh() logging.debug("Getting shape") shape = self.dset.shape logging.info("Read data shape: %s"%str(shape)) return shape def read_dataset(self, latest): logging.info("Reading out dataset [%d]"%latest) self.dset[latest:] def process_IN_MODIFY(self, event): logging.debug("File modified!") shape = self.get_dset_shape() self.read_dataset(shape[0]) def process_IN_CLOSE_WRITE(self, event): logging.info("File writer closed file") self.get_dset_shape() logging.debug("Good bye!") sys.exit(0) if __name__ == "__main__": logging.basicConfig(format='%(asctime)s %(levelname)s\t%(message)s',level=logging.INFO) file_name = "swmr.h5" if len(sys.argv) > 1: file_name = sys.argv[1] dataset_name = "data" if len(sys.argv) > 2: dataset_name = sys.argv[2] wm = pyinotify.WatchManager() # Watch Manager mask = pyinotify.IN_MODIFY | pyinotify.IN_CLOSE_WRITE evh = EventHandler() evh.monitor_dataset( file_name, dataset_name ) notifier = pyinotify.AsyncNotifier(wm, evh) wdd = wm.add_watch(file_name, mask, rec=False) # Sit in this loop() until the file writer closes the file # or the user hits ctrl-c asyncore.loop()
EventHandler
python
dagster-io__dagster
examples/airlift-migration-tutorial/tutorial_example/airflow_dags/dags.py
{ "start": 1652, "end": 4097 }
class ____(BaseOperator): def __init__( self, table_name: str, csv_path: Path, duckdb_path: Path, duckdb_database_name: str, *args, duckdb_schema: Optional[str] = None, **kwargs, ): self._table_name = table_name self._csv_path = csv_path self._duckdb_path = duckdb_path self._duckdb_schema = duckdb_schema self._duckdb_database_name = duckdb_database_name super().__init__(*args, **kwargs) def execute(self, context) -> None: export_duckdb_to_csv( ExportDuckDbToCsvArgs( table_name=self._table_name, csv_path=self._csv_path, duckdb_path=self._duckdb_path, duckdb_database_name=self._duckdb_database_name, ) ) default_args = { "owner": "airflow", "depends_on_past": False, "start_date": get_current_datetime_midnight(), "retries": 1, "retry_delay": timedelta(minutes=5), } DBT_DIR = os.getenv("TUTORIAL_DBT_PROJECT_DIR") # Create the DAG with the specified schedule interval dag = DAG( "rebuild_customers_list", default_args=default_args, schedule="@daily", is_paused_upon_creation=False, ) load_raw_customers = LoadCSVToDuckDB( task_id="load_raw_customers", dag=dag, table_name="raw_customers", csv_path=Path(__file__).parent / "raw_customers.csv", duckdb_path=Path(os.environ["AIRFLOW_HOME"]) / "jaffle_shop.duckdb", column_names=[ "id", "first_name", "last_name", ], duckdb_schema="raw_data", duckdb_database_name="jaffle_shop", ) args = f"--project-dir {DBT_DIR} --profiles-dir {DBT_DIR}" run_dbt_model = BashOperator(task_id="build_dbt_models", bash_command=f"dbt build {args}", dag=dag) export_customers = ExportDuckDBToCSV( task_id="export_customers", dag=dag, duckdb_path=Path(os.environ["AIRFLOW_HOME"]) / "jaffle_shop.duckdb", duckdb_database_name="jaffle_shop", table_name="customers", csv_path=Path(os.environ["TUTORIAL_EXAMPLE_DIR"]) / "customers.csv", ) load_raw_customers >> run_dbt_model >> export_customers # type: ignore # Set this to True to begin the proxying process PROXYING = False if PROXYING: proxying_to_dagster( global_vars=globals(), proxied_state=load_proxied_state_from_yaml(Path(__file__).parent / "proxied_state"), )
ExportDuckDBToCSV
python
huggingface__transformers
src/transformers/models/efficientnet/modeling_efficientnet.py
{ "start": 6192, "end": 7570 }
class ____(nn.Module): r""" This corresponds to the Squeeze and Excitement phase of each block in the original implementation. """ def __init__(self, config: EfficientNetConfig, in_dim: int, expand_dim: int, expand: bool = False): super().__init__() self.dim = expand_dim if expand else in_dim self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio)) self.squeeze = nn.AdaptiveAvgPool2d(output_size=1) self.reduce = nn.Conv2d( in_channels=self.dim, out_channels=self.dim_se, kernel_size=1, padding="same", ) self.expand = nn.Conv2d( in_channels=self.dim_se, out_channels=self.dim, kernel_size=1, padding="same", ) self.act_reduce = ACT2FN[config.hidden_act] self.act_expand = nn.Sigmoid() def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: inputs = hidden_states hidden_states = self.squeeze(hidden_states) hidden_states = self.reduce(hidden_states) hidden_states = self.act_reduce(hidden_states) hidden_states = self.expand(hidden_states) hidden_states = self.act_expand(hidden_states) hidden_states = torch.mul(inputs, hidden_states) return hidden_states
EfficientNetSqueezeExciteLayer
python
wandb__wandb
wandb/vendor/graphql-core-1.1/wandb_graphql/language/parser.py
{ "start": 827, "end": 1125 }
class ____(object): __slots__ = 'lexer', 'source', 'options', 'prev_end', 'token' def __init__(self, source, options): self.lexer = Lexer(source) self.source = source self.options = options self.prev_end = 0 self.token = self.lexer.next_token()
Parser
python
numba__numba
numba/tests/test_mixed_tuple_unroller.py
{ "start": 2313, "end": 2597 }
class ____(FunctionPass): _name = "reset_the_type_information" def __init__(self): FunctionPass.__init__(self) def run_pass(self, state): state.typemap = None state.return_type = None state.calltypes = None return True
ResetTypeInfo
python
getsentry__sentry
src/sentry/relocation/services/relocation_export/impl.py
{ "start": 1285, "end": 5694 }
class ____(RegionRelocationExportService): def request_new_export( self, *, relocation_uuid: str, requesting_region_name: str, replying_region_name: str, org_slug: str, encrypt_with_public_key: bytes, ) -> None: logger_data = { "uuid": relocation_uuid, "requesting_region_name": requesting_region_name, "replying_region_name": replying_region_name, "org_slug": org_slug, "encrypted_bytes_size": len(encrypt_with_public_key), } logger.info("SaaS -> SaaS request received in exporting region", extra=logger_data) # This task will do the actual work of performing the export and saving it to this regions # "relocation" GCS bucket. It is annotated with the appropriate retry, back-off etc logic # for robustness' sake. The last action performed by this task is to call an instance of # `ControlRelocationExportService.reply_with_export` via a manually-scheduled # `RegionOutbox`, which will handle the task of asynchronously delivering the encrypted, # newly-exported bytes. fulfill_cross_region_export_request.apply_async( args=[ relocation_uuid, requesting_region_name, replying_region_name, org_slug, base64.b64encode(encrypt_with_public_key).decode("utf8"), int(round(datetime.now(tz=UTC).timestamp())), ] ) logger.info("SaaS -> SaaS exporting task scheduled", extra=logger_data) def reply_with_export( self, *, relocation_uuid: str, requesting_region_name: str, replying_region_name: str, org_slug: str, encrypted_bytes: list[int], # TODO(azaslavsky): finish transfer from `encrypted_contents` -> `encrypted_bytes`. encrypted_contents: bytes | None = None, ) -> None: with atomic_transaction( using=( router.db_for_write(Relocation), router.db_for_write(RelocationFile), router.db_for_write(File), ) ): logger_data = { "uuid": relocation_uuid, "requesting_region_name": requesting_region_name, "replying_region_name": replying_region_name, "org_slug": org_slug, # TODO(azaslavsky): finish transfer from `encrypted_contents` -> `encrypted_bytes`. "encrypted_bytes_size": len(encrypted_bytes or []), } logger.info("SaaS -> SaaS reply received in triggering region", extra=logger_data) try: relocation: Relocation = Relocation.objects.get(uuid=relocation_uuid) except Relocation.DoesNotExist as e: logger.exception("Could not locate Relocation model by UUID: %s", relocation_uuid) capture_exception(e) return # TODO(azaslavsky): finish transfer from `encrypted_contents` -> `encrypted_bytes`. fp = BytesIO(bytes(encrypted_bytes or [])) file = File.objects.create(name="raw-relocation-data.tar", type=RELOCATION_FILE_TYPE) file.putfile(fp, blob_size=RELOCATION_BLOB_SIZE, logger=logger) logger.info("SaaS -> SaaS relocation underlying File created", extra=logger_data) # This write ensures that the entire chain triggered by `uploading_start` remains # idempotent, since only one (relocation_uuid, relocation_file_kind) pairing can exist # in that database's table at a time. If we try to write a second, it will fail due to # that unique constraint. try: RelocationFile.objects.create( relocation=relocation, file=file, kind=RelocationFile.Kind.RAW_USER_DATA.value, ) except IntegrityError: # We already have the file, we can proceed. pass logger.info("SaaS -> SaaS relocation RelocationFile saved", extra=logger_data) uploading_complete.apply_async(args=[relocation.uuid]) logger.info("SaaS -> SaaS relocation next task scheduled", extra=logger_data)
DBBackedRelocationExportService
python
realpython__materials
arcade-platformer/arcade_platformer/13_pause_view.py
{ "start": 3289, "end": 4717 }
class ____(arcade.View): """Show instructions to the player""" def __init__(self) -> None: """Create instructions screen""" super().__init__() # Find the instructions image in the image folder instructions_image_path = ( ASSETS_PATH / "images" / "instructions_image.png" ) # Load our title image self.instructions_image = arcade.load_texture(instructions_image_path) def on_draw(self) -> None: # Start the rendering loop arcade.start_render() # Draw a rectangle filled with the instructions image arcade.draw_texture_rectangle( center_x=SCREEN_WIDTH / 2, center_y=SCREEN_HEIGHT / 2, width=SCREEN_WIDTH, height=SCREEN_HEIGHT, texture=self.instructions_image, ) def on_key_press(self, key: int, modifiers: int) -> None: """Start the game when the user presses Enter Arguments: key -- Which key was pressed modifiers -- What modifiers were active """ if key == arcade.key.RETURN: game_view = PlatformerView() game_view.setup() self.window.show_view(game_view) elif key == arcade.key.ESCAPE: title_view = TitleView() self.window.show_view(title_view) # Pause view, used when the player pauses the game
InstructionsView
python
pennersr__django-allauth
allauth/socialaccount/providers/shopify/views.py
{ "start": 1965, "end": 3592 }
class ____(OAuth2LoginView): def dispatch(self, request, *args, **kwargs): is_embedded = ( getattr(settings, "SOCIALACCOUNT_PROVIDERS", {}) .get("shopify", {}) .get("IS_EMBEDDED", False) ) if is_embedded: # TODO: This bypasses LOGIN_ON_GET, but: # # The Embedded App SDK (EASDK) and backwards compatibility layer # are being removed from Shopify on January 1, 2022. # # So this needs to be dropped/revisitted anyway. response = super().dispatch(request, *args, **kwargs) """ Shopify embedded apps (that run within an iFrame) require a JS (not server) redirect for starting the oauth2 process. See Also: https://help.shopify.com/api/sdks/embedded-app-sdk/getting-started#oauth """ js = "".join( ( "<!DOCTYPE html><html><head>" '<script type="text/javascript">', 'window.top.location.href = "{url}";'.format(url=response.url), "</script></head><body></body></html>", ) ) response = HttpResponse(content=js) # Because this view will be within shopify's iframe response.xframe_options_exempt = True return response return super().dispatch(request, *args, **kwargs) oauth2_login = ShopifyOAuth2LoginView.adapter_view(ShopifyOAuth2Adapter) oauth2_callback = OAuth2CallbackView.adapter_view(ShopifyOAuth2Adapter)
ShopifyOAuth2LoginView
python
django__django
django/contrib/admin/templatetags/admin_list.py
{ "start": 11648, "end": 19156 }
class ____(list): """ Wrapper class used to return items in a list_editable changelist, annotated with the form object for error reporting purposes. Needed to maintain backwards compatibility with existing admin templates. """ def __init__(self, form, *items): self.form = form super().__init__(*items) def results(cl): if cl.formset: for res, form in zip(cl.result_list, cl.formset.forms): yield ResultList(form, items_for_result(cl, res, form)) else: for res in cl.result_list: yield ResultList(None, items_for_result(cl, res, None)) def result_hidden_fields(cl): if cl.formset: for res, form in zip(cl.result_list, cl.formset.forms): if form[cl.model._meta.pk.name].is_hidden: yield mark_safe(form[cl.model._meta.pk.name]) def result_list(cl): """ Display the headers and data list together. """ headers = list(result_headers(cl)) num_sorted_fields = 0 for h in headers: if h["sortable"] and h["sorted"]: num_sorted_fields += 1 return { "cl": cl, "result_hidden_fields": list(result_hidden_fields(cl)), "result_headers": headers, "num_sorted_fields": num_sorted_fields, "results": list(results(cl)), } @register.tag(name="result_list") def result_list_tag(parser, token): return InclusionAdminNode( parser, token, func=result_list, template_name="change_list_results.html", takes_context=False, ) def date_hierarchy(cl): """ Display the date hierarchy for date drill-down functionality. """ if cl.date_hierarchy: field_name = cl.date_hierarchy field = get_fields_from_path(cl.model, field_name)[-1] if isinstance(field, models.DateTimeField): dates_or_datetimes = "datetimes" else: dates_or_datetimes = "dates" year_field = "%s__year" % field_name month_field = "%s__month" % field_name day_field = "%s__day" % field_name field_generic = "%s__" % field_name year_lookup = cl.params.get(year_field) month_lookup = cl.params.get(month_field) day_lookup = cl.params.get(day_field) def link(filters): return cl.get_query_string(filters, [field_generic]) if not (year_lookup or month_lookup or day_lookup): # select appropriate start level date_range = cl.queryset.aggregate( first=models.Min(field_name), last=models.Max(field_name) ) if date_range["first"] and date_range["last"]: if dates_or_datetimes == "datetimes": date_range = { k: timezone.localtime(v) if timezone.is_aware(v) else v for k, v in date_range.items() } if date_range["first"].year == date_range["last"].year: year_lookup = date_range["first"].year if date_range["first"].month == date_range["last"].month: month_lookup = date_range["first"].month if year_lookup and month_lookup and day_lookup: day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup)) return { "show": True, "back": { "link": link({year_field: year_lookup, month_field: month_lookup}), "title": capfirst(formats.date_format(day, "YEAR_MONTH_FORMAT")), }, "choices": [ {"title": capfirst(formats.date_format(day, "MONTH_DAY_FORMAT"))} ], } elif year_lookup and month_lookup: days = getattr(cl.queryset, dates_or_datetimes)(field_name, "day") return { "show": True, "back": { "link": link({year_field: year_lookup}), "title": str(year_lookup), }, "choices": [ { "link": link( { year_field: year_lookup, month_field: month_lookup, day_field: day.day, } ), "title": capfirst(formats.date_format(day, "MONTH_DAY_FORMAT")), } for day in days ], } elif year_lookup: months = getattr(cl.queryset, dates_or_datetimes)(field_name, "month") return { "show": True, "back": {"link": link({}), "title": _("All dates")}, "choices": [ { "link": link( {year_field: year_lookup, month_field: month.month} ), "title": capfirst( formats.date_format(month, "YEAR_MONTH_FORMAT") ), } for month in months ], } else: years = getattr(cl.queryset, dates_or_datetimes)(field_name, "year") return { "show": True, "back": None, "choices": [ { "link": link({year_field: str(year.year)}), "title": str(year.year), } for year in years ], } @register.tag(name="date_hierarchy") def date_hierarchy_tag(parser, token): return InclusionAdminNode( parser, token, func=date_hierarchy, template_name="date_hierarchy.html", takes_context=False, ) def search_form(cl): """ Display a search form for searching the list. """ return { "cl": cl, "show_result_count": cl.result_count != cl.full_result_count, "search_var": SEARCH_VAR, "is_popup_var": IS_POPUP_VAR, "is_facets_var": IS_FACETS_VAR, } @register.tag(name="search_form") def search_form_tag(parser, token): return InclusionAdminNode( parser, token, func=search_form, template_name="search_form.html", takes_context=False, ) @register.simple_tag def admin_list_filter(cl, spec): tpl = get_template(spec.template) return tpl.render( { "title": spec.title, "choices": list(spec.choices(cl)), "spec": spec, } ) def admin_actions(context): """ Track the number of times the action field has been rendered on the page, so we know which value to use. """ context["action_index"] = context.get("action_index", -1) + 1 return context @register.tag(name="admin_actions") def admin_actions_tag(parser, token): return InclusionAdminNode( parser, token, func=admin_actions, template_name="actions.html" ) @register.tag(name="change_list_object_tools") def change_list_object_tools_tag(parser, token): """Display the row of change list object tools.""" return InclusionAdminNode( parser, token, func=lambda context: context, template_name="change_list_object_tools.html", )
ResultList
python
lepture__authlib
authlib/oauth2/rfc6749/grants/authorization_code.py
{ "start": 529, "end": 15604 }
class ____(BaseGrant, AuthorizationEndpointMixin, TokenEndpointMixin): """The authorization code grant type is used to obtain both access tokens and refresh tokens and is optimized for confidential clients. Since this is a redirection-based flow, the client must be capable of interacting with the resource owner's user-agent (typically a web browser) and capable of receiving incoming requests (via redirection) from the authorization server:: +----------+ | Resource | | Owner | | | +----------+ ^ | (B) +----|-----+ Client Identifier +---------------+ | -+----(A)-- & Redirection URI ---->| | | User- | | Authorization | | Agent -+----(B)-- User authenticates --->| Server | | | | | | -+----(C)-- Authorization Code ---<| | +-|----|---+ +---------------+ | | ^ v (A) (C) | | | | | | ^ v | | +---------+ | | | |>---(D)-- Authorization Code ---------' | | Client | & Redirection URI | | | | | |<---(E)----- Access Token -------------------' +---------+ (w/ Optional Refresh Token) """ #: Allowed client auth methods for token endpoint TOKEN_ENDPOINT_AUTH_METHODS = ["client_secret_basic", "client_secret_post"] #: Generated "code" length AUTHORIZATION_CODE_LENGTH = 48 RESPONSE_TYPES = {"code"} GRANT_TYPE = "authorization_code" def validate_authorization_request(self): """The client constructs the request URI by adding the following parameters to the query component of the authorization endpoint URI using the "application/x-www-form-urlencoded" format. Per `Section 4.1.1`_. response_type REQUIRED. Value MUST be set to "code". client_id REQUIRED. The client identifier as described in Section 2.2. redirect_uri OPTIONAL. As described in Section 3.1.2. scope OPTIONAL. The scope of the access request as described by Section 3.3. state RECOMMENDED. An opaque value used by the client to maintain state between the request and callback. The authorization server includes this value when redirecting the user-agent back to the client. The parameter SHOULD be used for preventing cross-site request forgery as described in Section 10.12. The client directs the resource owner to the constructed URI using an HTTP redirection response, or by other means available to it via the user-agent. For example, the client directs the user-agent to make the following HTTP request using TLS (with extra line breaks for display purposes only): .. code-block:: http GET /authorize?response_type=code&client_id=s6BhdRkqt3&state=xyz &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb HTTP/1.1 Host: server.example.com The authorization server validates the request to ensure that all required parameters are present and valid. If the request is valid, the authorization server authenticates the resource owner and obtains an authorization decision (by asking the resource owner or by establishing approval via other means). .. _`Section 4.1.1`: https://tools.ietf.org/html/rfc6749#section-4.1.1 """ return validate_code_authorization_request(self) def create_authorization_response(self, redirect_uri: str, grant_user): """If the resource owner grants the access request, the authorization server issues an authorization code and delivers it to the client by adding the following parameters to the query component of the redirection URI using the "application/x-www-form-urlencoded" format. Per `Section 4.1.2`_. code REQUIRED. The authorization code generated by the authorization server. The authorization code MUST expire shortly after it is issued to mitigate the risk of leaks. A maximum authorization code lifetime of 10 minutes is RECOMMENDED. The client MUST NOT use the authorization code more than once. If an authorization code is used more than once, the authorization server MUST deny the request and SHOULD revoke (when possible) all tokens previously issued based on that authorization code. The authorization code is bound to the client identifier and redirection URI. state REQUIRED if the "state" parameter was present in the client authorization request. The exact value received from the client. For example, the authorization server redirects the user-agent by sending the following HTTP response. .. code-block:: http HTTP/1.1 302 Found Location: https://client.example.com/cb?code=SplxlOBeZQQYbYS6WxSbIA &state=xyz .. _`Section 4.1.2`: https://tools.ietf.org/html/rfc6749#section-4.1.2 :param redirect_uri: Redirect to the given URI for the authorization :param grant_user: if resource owner granted the request, pass this resource owner, otherwise pass None. :returns: (status_code, body, headers) """ if not grant_user: raise AccessDeniedError(redirect_uri=redirect_uri) self.request.user = grant_user code = self.generate_authorization_code() self.save_authorization_code(code, self.request) params = [("code", code)] if self.request.payload.state: params.append(("state", self.request.payload.state)) uri = add_params_to_uri(redirect_uri, params) headers = [("Location", uri)] return 302, "", headers @hooked def validate_token_request(self): """The client makes a request to the token endpoint by sending the following parameters using the "application/x-www-form-urlencoded" format per `Section 4.1.3`_: grant_type REQUIRED. Value MUST be set to "authorization_code". code REQUIRED. The authorization code received from the authorization server. redirect_uri REQUIRED, if the "redirect_uri" parameter was included in the authorization request as described in Section 4.1.1, and their values MUST be identical. client_id REQUIRED, if the client is not authenticating with the authorization server as described in Section 3.2.1. If the client type is confidential or the client was issued client credentials (or assigned other authentication requirements), the client MUST authenticate with the authorization server as described in Section 3.2.1. For example, the client makes the following HTTP request using TLS: .. code-block:: http POST /token HTTP/1.1 Host: server.example.com Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW Content-Type: application/x-www-form-urlencoded grant_type=authorization_code&code=SplxlOBeZQQYbYS6WxSbIA &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb .. _`Section 4.1.3`: https://tools.ietf.org/html/rfc6749#section-4.1.3 """ # ignore validate for grant_type, since it is validated by # check_token_endpoint # authenticate the client if client authentication is included client = self.authenticate_token_endpoint_client() log.debug("Validate token request of %r", client) if not client.check_grant_type(self.GRANT_TYPE): raise UnauthorizedClientError( f"The client is not authorized to use 'grant_type={self.GRANT_TYPE}'" ) code = self.request.form.get("code") if code is None: raise InvalidRequestError("Missing 'code' in request.") # ensure that the authorization code was issued to the authenticated # confidential client, or if the client is public, ensure that the # code was issued to "client_id" in the request authorization_code = self.query_authorization_code(code, client) if not authorization_code: raise InvalidGrantError("Invalid 'code' in request.") # validate redirect_uri parameter log.debug("Validate token redirect_uri of %r", client) redirect_uri = self.request.payload.redirect_uri original_redirect_uri = authorization_code.get_redirect_uri() if original_redirect_uri and redirect_uri != original_redirect_uri: raise InvalidGrantError("Invalid 'redirect_uri' in request.") # save for create_token_response self.request.client = client self.request.authorization_code = authorization_code @hooked def create_token_response(self): """If the access token request is valid and authorized, the authorization server issues an access token and optional refresh token as described in Section 5.1. If the request client authentication failed or is invalid, the authorization server returns an error response as described in Section 5.2. Per `Section 4.1.4`_. An example successful response: .. code-block:: http HTTP/1.1 200 OK Content-Type: application/json Cache-Control: no-store Pragma: no-cache { "access_token":"2YotnFZFEjr1zCsicMWpAA", "token_type":"example", "expires_in":3600, "refresh_token":"tGzv3JOkF0XG5Qx2TlKWIA", "example_parameter":"example_value" } :returns: (status_code, body, headers) .. _`Section 4.1.4`: https://tools.ietf.org/html/rfc6749#section-4.1.4 """ client = self.request.client authorization_code = self.request.authorization_code user = self.authenticate_user(authorization_code) if not user: raise InvalidGrantError("There is no 'user' for this code.") self.request.user = user scope = authorization_code.get_scope() token = self.generate_token( user=user, scope=scope, include_refresh_token=client.check_grant_type("refresh_token"), ) log.debug("Issue token %r to %r", token, client) self.save_token(token) self.delete_authorization_code(authorization_code) return 200, token, self.TOKEN_RESPONSE_HEADER def generate_authorization_code(self): """ "The method to generate "code" value for authorization code data. Developers may rewrite this method, or customize the code length with:: class MyAuthorizationCodeGrant(AuthorizationCodeGrant): AUTHORIZATION_CODE_LENGTH = 32 # default is 48 """ return generate_token(self.AUTHORIZATION_CODE_LENGTH) def save_authorization_code(self, code, request): """Save authorization_code for later use. Developers MUST implement it in subclass. Here is an example:: def save_authorization_code(self, code, request): client = request.client item = AuthorizationCode( code=code, client_id=client.client_id, redirect_uri=request.payload.redirect_uri, scope=request.payload.scope, user_id=request.user.id, ) item.save() """ raise NotImplementedError() def query_authorization_code(self, code, client): # pragma: no cover """Get authorization_code from previously savings. Developers MUST implement it in subclass:: def query_authorization_code(self, code, client): return Authorization.get(code=code, client_id=client.client_id) :param code: a string represent the code. :param client: client related to this code. :return: authorization_code object """ raise NotImplementedError() def delete_authorization_code(self, authorization_code): """Delete authorization code from database or cache. Developers MUST implement it in subclass, e.g.:: def delete_authorization_code(self, authorization_code): authorization_code.delete() :param authorization_code: the instance of authorization_code """ raise NotImplementedError() def authenticate_user(self, authorization_code): """Authenticate the user related to this authorization_code. Developers MUST implement this method in subclass, e.g.:: def authenticate_user(self, authorization_code): return User.get(authorization_code.user_id) :param authorization_code: AuthorizationCode object :return: user """ raise NotImplementedError() def validate_code_authorization_request(grant): request = grant.request client_id = request.payload.client_id log.debug("Validate authorization request of %r", client_id) if client_id is None: raise InvalidClientError( description="Missing 'client_id' parameter.", ) client = grant.server.query_client(client_id) if not client: raise InvalidClientError( description="The client does not exist on this server.", ) redirect_uri = grant.validate_authorization_redirect_uri(request, client) response_type = request.payload.response_type if not client.check_response_type(response_type): raise UnauthorizedClientError( f"The client is not authorized to use 'response_type={response_type}'", redirect_uri=redirect_uri, ) grant.request.client = client @hooked def validate_authorization_request_payload(grant, redirect_uri): grant.validate_requested_scope() try: validate_authorization_request_payload(grant, redirect_uri) except OAuth2Error as error: error.redirect_uri = redirect_uri raise error return redirect_uri
AuthorizationCodeGrant
python
doocs__leetcode
solution/0400-0499/0497.Random Point in Non-overlapping Rectangles/Solution.py
{ "start": 0, "end": 579 }
class ____: def __init__(self, rects: List[List[int]]): self.rects = rects self.s = [0] * len(rects) for i, (x1, y1, x2, y2) in enumerate(rects): self.s[i] = self.s[i - 1] + (x2 - x1 + 1) * (y2 - y1 + 1) def pick(self) -> List[int]: v = random.randint(1, self.s[-1]) idx = bisect_left(self.s, v) x1, y1, x2, y2 = self.rects[idx] return [random.randint(x1, x2), random.randint(y1, y2)] # Your Solution object will be instantiated and called as such: # obj = Solution(rects) # param_1 = obj.pick()
Solution
python
getsentry__sentry
fixtures/page_objects/global_selection.py
{ "start": 29, "end": 1703 }
class ____(BasePage): def get_selected_project_slug(self): return self.browser.element('[data-test-id="page-filter-project-selector"]').text def get_selected_environment(self): return self.browser.element('[data-test-id="page-filter-environment-selector"]').text def get_selected_date(self): return self.browser.element('[data-test-id="page-filter-timerange-selector"]').text def go_back_to_issues(self): self.browser.click('[data-test-id="breadcrumb-link"]') def open_project_selector(self): self.browser.click('[data-test-id="page-filter-project-selector"]') def select_project_by_slug(self, slug): project_item_selector = f'//*[@data-test-id="menu-list-item-label" and text()="{slug}"]' self.open_project_selector() self.browser.wait_until(xpath=project_item_selector) self.browser.click(xpath=project_item_selector) def open_environment_selector(self): self.browser.click('[data-test-id="page-filter-environment-selector"]') def select_environment(self, environment): environment_path = f'//*[@data-test-id="menu-list-item-label" and text()="{environment}"]' self.open_environment_selector() self.browser.wait_until(xpath=environment_path) self.browser.click(xpath=environment_path) def open_date_selector(self): self.browser.click('[data-test-id="page-filter-timerange-selector"]') def select_date(self, date): date_path = f'//*[text()="{date}"]' self.open_date_selector() self.browser.wait_until(xpath=date_path) self.browser.click(xpath=date_path)
GlobalSelectionPage
python
langchain-ai__langchain
libs/langchain/langchain_classic/retrievers/multi_query.py
{ "start": 1667, "end": 7770 }
class ____(BaseRetriever): """Given a query, use an LLM to write a set of queries. Retrieve docs for each query. Return the unique union of all retrieved docs. """ retriever: BaseRetriever llm_chain: Runnable verbose: bool = True parser_key: str = "lines" """DEPRECATED. parser_key is no longer used and should not be specified.""" include_original: bool = False """Whether to include the original query in the list of generated queries.""" @classmethod def from_llm( cls, retriever: BaseRetriever, llm: BaseLanguageModel, prompt: BasePromptTemplate = DEFAULT_QUERY_PROMPT, parser_key: str | None = None, # noqa: ARG003 include_original: bool = False, # noqa: FBT001,FBT002 ) -> "MultiQueryRetriever": """Initialize from llm using default template. Args: retriever: retriever to query documents from llm: llm for query generation using DEFAULT_QUERY_PROMPT prompt: The prompt which aims to generate several different versions of the given user query parser_key: DEPRECATED. `parser_key` is no longer used and should not be specified. include_original: Whether to include the original query in the list of generated queries. Returns: MultiQueryRetriever """ output_parser = LineListOutputParser() llm_chain = prompt | llm | output_parser return cls( retriever=retriever, llm_chain=llm_chain, include_original=include_original, ) async def _aget_relevant_documents( self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun, ) -> list[Document]: """Get relevant documents given a user query. Args: query: user query run_manager: the callback handler to use. Returns: Unique union of relevant documents from all generated queries """ queries = await self.agenerate_queries(query, run_manager) if self.include_original: queries.append(query) documents = await self.aretrieve_documents(queries, run_manager) return self.unique_union(documents) async def agenerate_queries( self, question: str, run_manager: AsyncCallbackManagerForRetrieverRun, ) -> list[str]: """Generate queries based upon user input. Args: question: user query run_manager: the callback handler to use. Returns: List of LLM generated queries that are similar to the user input """ response = await self.llm_chain.ainvoke( {"question": question}, config={"callbacks": run_manager.get_child()}, ) lines = response["text"] if isinstance(self.llm_chain, LLMChain) else response if self.verbose: logger.info("Generated queries: %s", lines) return lines async def aretrieve_documents( self, queries: list[str], run_manager: AsyncCallbackManagerForRetrieverRun, ) -> list[Document]: """Run all LLM generated queries. Args: queries: query list run_manager: the callback handler to use Returns: List of retrieved Documents """ document_lists = await asyncio.gather( *( self.retriever.ainvoke( query, config={"callbacks": run_manager.get_child()}, ) for query in queries ), ) return [doc for docs in document_lists for doc in docs] def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun, ) -> list[Document]: """Get relevant documents given a user query. Args: query: user query run_manager: the callback handler to use. Returns: Unique union of relevant documents from all generated queries """ queries = self.generate_queries(query, run_manager) if self.include_original: queries.append(query) documents = self.retrieve_documents(queries, run_manager) return self.unique_union(documents) def generate_queries( self, question: str, run_manager: CallbackManagerForRetrieverRun, ) -> list[str]: """Generate queries based upon user input. Args: question: user query run_manager: run manager for callbacks Returns: List of LLM generated queries that are similar to the user input """ response = self.llm_chain.invoke( {"question": question}, config={"callbacks": run_manager.get_child()}, ) lines = response["text"] if isinstance(self.llm_chain, LLMChain) else response if self.verbose: logger.info("Generated queries: %s", lines) return lines def retrieve_documents( self, queries: list[str], run_manager: CallbackManagerForRetrieverRun, ) -> list[Document]: """Run all LLM generated queries. Args: queries: query list run_manager: run manager for callbacks Returns: List of retrieved Documents """ documents = [] for query in queries: docs = self.retriever.invoke( query, config={"callbacks": run_manager.get_child()}, ) documents.extend(docs) return documents def unique_union(self, documents: list[Document]) -> list[Document]: """Get unique Documents. Args: documents: List of retrieved Documents Returns: List of unique retrieved Documents """ return _unique_documents(documents)
MultiQueryRetriever
python
ansible__ansible
test/units/module_utils/facts/test_collector.py
{ "start": 1972, "end": 4610 }
class ____(unittest.TestCase): def _assert_equal_detail(self, obj1, obj2): msg = 'objects are not equal\n%s\n\n!=\n\n%s' % (pprint.pformat(obj1), pprint.pformat(obj2)) return self.assertEqual(obj1, obj2, msg) def test(self): collector_names = ['distribution', 'all_ipv4_addresses', 'local', 'pkg_mgr'] all_fact_subsets = self._all_fact_subsets() res = collector.select_collector_classes(collector_names, all_fact_subsets) expected = [default_collectors.DistributionFactCollector, default_collectors.PkgMgrFactCollector] self._assert_equal_detail(res, expected) def test_default_collectors(self): platform_info = {'system': 'Generic'} compat_platforms = [platform_info] collectors_for_platform = collector.find_collectors_for_platform(default_collectors.collectors, compat_platforms) all_fact_subsets, aliases_map = collector.build_fact_id_to_collector_map(collectors_for_platform) all_valid_subsets = frozenset(all_fact_subsets.keys()) collector_names = collector.get_collector_names(valid_subsets=all_valid_subsets, aliases_map=aliases_map, platform_info=platform_info) complete_collector_names = collector._solve_deps(collector_names, all_fact_subsets) dep_map = collector.build_dep_data(complete_collector_names, all_fact_subsets) ordered_deps = collector.tsort(dep_map) ordered_collector_names = [x[0] for x in ordered_deps] res = collector.select_collector_classes(ordered_collector_names, all_fact_subsets) assert res.index(default_collectors.ServiceMgrFactCollector) > res.index(default_collectors.DistributionFactCollector) assert res.index(default_collectors.ServiceMgrFactCollector) > res.index(default_collectors.PlatformFactCollector) def _all_fact_subsets(self, data=None): all_fact_subsets = defaultdict(list) _data = {'pkg_mgr': [default_collectors.PkgMgrFactCollector], 'distribution': [default_collectors.DistributionFactCollector], 'network': [default_collectors.LinuxNetworkCollector]} data = data or _data for key, value in data.items(): all_fact_subsets[key] = value return all_fact_subsets
TestSelectCollectorNames
python
tensorflow__tensorflow
tensorflow/python/types/trace.py
{ "start": 8579, "end": 9013 }
class ____(metaclass=abc.ABCMeta): """Contains information scoped to the tracing of multiple objects. `TracingContext` is a container class for flags and variables that have any kind of influence on the tracing behaviour of the class implementing the __tf_tracing_type__. This context will be shared across all __tf_tracing_type__ calls while constructing the TraceType for a particular set of objects. """
TracingContext
python
redis__redis-py
redis/asyncio/multidb/failure_detector.py
{ "start": 98, "end": 618 }
class ____(ABC): @abstractmethod async def register_failure(self, exception: Exception, cmd: tuple) -> None: """Register a failure that occurred during command execution.""" pass @abstractmethod async def register_command_execution(self, cmd: tuple) -> None: """Register a command execution.""" pass @abstractmethod def set_command_executor(self, command_executor) -> None: """Set the command executor for this failure.""" pass
AsyncFailureDetector
python
pytorch__pytorch
torch/_export/db/examples/list_unpack.py
{ "start": 42, "end": 568 }
class ____(torch.nn.Module): """ Lists are treated as static construct, therefore unpacking should be erased after tracing. """ def forward(self, args: list[torch.Tensor]): """ Lists are treated as static construct, therefore unpacking should be erased after tracing. """ x, *y = args return x + y[0] example_args = ([torch.randn(3, 2), torch.tensor(4), torch.tensor(5)],) tags = {"python.control-flow", "python.data-structure"} model = ListUnpack()
ListUnpack
python
wandb__wandb
wandb/vendor/watchdog_0_9_0/wandb_watchdog/events.py
{ "start": 16607, "end": 18623 }
class ____(LoggingEventHandler): """ For backwards-compatibility. Please use :class:`LoggingEventHandler` instead. """ def generate_sub_moved_events(src_dir_path, dest_dir_path): """Generates an event list of :class:`DirMovedEvent` and :class:`FileMovedEvent` objects for all the files and directories within the given moved directory that were moved along with the directory. :param src_dir_path: The source path of the moved directory. :param dest_dir_path: The destination path of the moved directory. :returns: An iterable of file system events of type :class:`DirMovedEvent` and :class:`FileMovedEvent`. """ for root, directories, filenames in os.walk(dest_dir_path): for directory in directories: full_path = os.path.join(root, directory) renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None yield DirMovedEvent(renamed_path, full_path) for filename in filenames: full_path = os.path.join(root, filename) renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None yield FileMovedEvent(renamed_path, full_path) def generate_sub_created_events(src_dir_path): """Generates an event list of :class:`DirCreatedEvent` and :class:`FileCreatedEvent` objects for all the files and directories within the given moved directory that were moved along with the directory. :param src_dir_path: The source path of the created directory. :returns: An iterable of file system events of type :class:`DirCreatedEvent` and :class:`FileCreatedEvent`. """ for root, directories, filenames in os.walk(src_dir_path): for directory in directories: yield DirCreatedEvent(os.path.join(root, directory)) for filename in filenames: yield FileCreatedEvent(os.path.join(root, filename))
LoggingFileSystemEventHandler
python
encode__django-rest-framework
tests/test_fields.py
{ "start": 61319, "end": 62040 }
class ____(FieldValues): """ Values for `DurationField` with a custom output format. """ valid_inputs = { '13': datetime.timedelta(seconds=13), 'P3DT08H32M01.000123S': datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123), 'PT8H1M': datetime.timedelta(hours=8, minutes=1), '-P999999999D': datetime.timedelta(days=-999999999), 'P999999999D': datetime.timedelta(days=999999999) } invalid_inputs = {} outputs = { datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): 'P3DT08H32M01.000123S' } field = serializers.DurationField(format='iso-8601') # Choice types...
TestISOOutputFormatDurationField
python
apache__airflow
providers/google/tests/unit/google/cloud/sensors/test_gcs.py
{ "start": 14827, "end": 16546 }
class ____: OPERATOR = GCSObjectsWithPrefixExistenceSensor( task_id="gcs-obj-prefix", bucket=TEST_BUCKET, prefix=TEST_OBJECT, google_cloud_conn_id=TEST_GCP_CONN_ID, deferrable=True, ) @mock.patch("airflow.providers.google.cloud.sensors.gcs.GCSHook") def test_gcs_object_with_prefix_existence_async_sensor(self, mock_hook): """ Asserts that a task is deferred and a GCSPrefixBlobTrigger will be fired when the GCSObjectsWithPrefixExistenceAsyncSensor is executed. """ mock_hook.return_value.list.return_value = False with pytest.raises(TaskDeferred) as exc: self.OPERATOR.execute(mock.MagicMock()) assert isinstance(exc.value.trigger, GCSPrefixBlobTrigger), "Trigger is not a GCSPrefixBlobTrigger" def test_gcs_object_with_prefix_existence_async_sensor_execute_failure( self, ): """Tests that an AirflowException is raised in case of error event""" with pytest.raises(AirflowException): self.OPERATOR.execute_complete( context={}, event={"status": "error", "message": "test failure message"} ) def test_gcs_object_with_prefix_existence_async_sensor_execute_complete(self): """Asserts that logging occurs as expected""" with mock.patch.object(self.OPERATOR.log, "info") as mock_log_info: self.OPERATOR.execute_complete( context={}, event={"status": "success", "message": "Job completed", "matches": [TEST_OBJECT]}, ) mock_log_info.assert_called_with("Resuming from trigger and checking status")
TestGCSObjectsWithPrefixExistenceAsyncSensor
python
automl__auto-sklearn
autosklearn/pipeline/components/feature_preprocessing/nystroem_sampler.py
{ "start": 551, "end": 4578 }
class ____(AutoSklearnPreprocessingAlgorithm): def __init__( self, kernel, n_components, gamma=1.0, degree=3, coef0=1, random_state=None ): self.kernel = kernel self.n_components = n_components self.gamma = gamma self.degree = degree self.coef0 = coef0 self.random_state = random_state def fit(self, X, Y=None): import scipy.sparse import sklearn.kernel_approximation self.n_components = int(self.n_components) self.gamma = float(self.gamma) self.degree = int(self.degree) self.coef0 = float(self.coef0) self.preprocessor = sklearn.kernel_approximation.Nystroem( kernel=self.kernel, n_components=self.n_components, gamma=self.gamma, degree=self.degree, coef0=self.coef0, random_state=self.random_state, ) # Because the pipeline guarantees that each feature is positive, # clip all values below zero to zero if self.kernel == "chi2": if scipy.sparse.issparse(X): X.data[X.data < 0] = 0.0 else: X[X < 0] = 0.0 self.preprocessor.fit(X) return self def transform(self, X): import scipy.sparse # Because the pipeline guarantees that each feature is positive, # clip all values below zero to zero if self.kernel == "chi2": if scipy.sparse.issparse(X): X.data[X.data < 0] = 0.0 else: X[X < 0] = 0.0 if self.preprocessor is None: raise NotImplementedError() return self.preprocessor.transform(X) @staticmethod def get_properties(dataset_properties=None): data_type = UNSIGNED_DATA if dataset_properties is not None: signed = dataset_properties.get("signed") if signed is not None: data_type = SIGNED_DATA if signed is True else UNSIGNED_DATA return { "shortname": "Nystroem", "name": "Nystroem kernel approximation", "handles_regression": True, "handles_classification": True, "handles_multiclass": True, "handles_multilabel": True, "handles_multioutput": True, "is_deterministic": True, "input": (SPARSE, DENSE, data_type), "output": (INPUT, UNSIGNED_DATA), } @staticmethod def get_hyperparameter_search_space( feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None ): if dataset_properties is not None and ( dataset_properties.get("sparse") is True or dataset_properties.get("signed") is False ): allow_chi2 = False else: allow_chi2 = True possible_kernels = ["poly", "rbf", "sigmoid", "cosine"] if allow_chi2: possible_kernels.append("chi2") kernel = CategoricalHyperparameter("kernel", possible_kernels, "rbf") n_components = UniformIntegerHyperparameter( "n_components", 50, 10000, default_value=100, log=True ) gamma = UniformFloatHyperparameter( "gamma", 3.0517578125e-05, 8, log=True, default_value=0.1 ) degree = UniformIntegerHyperparameter("degree", 2, 5, 3) coef0 = UniformFloatHyperparameter("coef0", -1, 1, default_value=0) cs = ConfigurationSpace() cs.add_hyperparameters([kernel, degree, gamma, coef0, n_components]) degree_depends_on_poly = EqualsCondition(degree, kernel, "poly") coef0_condition = InCondition(coef0, kernel, ["poly", "sigmoid"]) gamma_kernels = ["poly", "rbf", "sigmoid"] if allow_chi2: gamma_kernels.append("chi2") gamma_condition = InCondition(gamma, kernel, gamma_kernels) cs.add_conditions([degree_depends_on_poly, coef0_condition, gamma_condition]) return cs
Nystroem
python
numpy__numpy
benchmarks/benchmarks/bench_ufunc.py
{ "start": 7840, "end": 8348 }
class ____(Benchmark): param_names = ['margs', 'msize'] params = [[0, (0, 0), (-1, 0), [0, -1]], ['small', 'big']] def setup(self, margs, msize): self.xs = np.random.uniform(-1, 1, 6).reshape(2, 3) self.xl = np.random.uniform(-1, 1, 50 * 50).reshape(50, 50) def time_methods_getitem(self, margs, msize): if msize == 'small': mdat = self.xs elif msize == 'big': mdat = self.xl mdat.__getitem__(margs)
NDArrayGetItem
python
django__django
tests/postgres_tests/test_search.py
{ "start": 5242, "end": 6377 }
class ____(GrailTestData, PostgreSQLTestCase): def test_existing_vector(self): Line.objects.update(dialogue_search_vector=SearchVector("dialogue")) searched = Line.objects.filter( dialogue_search_vector=SearchQuery("Robin killed") ) self.assertSequenceEqual(searched, [self.verse0]) def test_existing_vector_config_explicit(self): Line.objects.update(dialogue_search_vector=SearchVector("dialogue")) searched = Line.objects.filter( dialogue_search_vector=SearchQuery("cadeaux", config="french") ) self.assertSequenceEqual(searched, [self.french]) def test_single_coalesce_expression(self): searched = Line.objects.annotate(search=SearchVector("dialogue")).filter( search="cadeaux" ) self.assertNotIn("COALESCE(COALESCE", str(searched.query)) def test_values_with_percent(self): searched = Line.objects.annotate( search=SearchVector(Value("This week everything is 10% off")) ).filter(search="10 % off") self.assertEqual(len(searched), 9)
SearchVectorFieldTest
python
django__django
tests/admin_autodiscover/tests.py
{ "start": 74, "end": 742 }
class ____(SimpleTestCase): """ Test for bug #8245 - don't raise an AlreadyRegistered exception when using autodiscover() and an admin.py module contains an error. """ def test_double_call_autodiscover(self): # The first time autodiscover is called, we should get our real error. with self.assertRaisesMessage(Exception, "Bad admin module"): admin.autodiscover() # Calling autodiscover again should raise the very same error it did # the first time, not an AlreadyRegistered error. with self.assertRaisesMessage(Exception, "Bad admin module"): admin.autodiscover()
AdminAutoDiscoverTests
python
huggingface__transformers
src/transformers/models/mvp/modeling_mvp.py
{ "start": 39430, "end": 46491 }
class ____(MvpPreTrainedModel): _keys_to_ignore_on_load_unexpected = ["final_logits_bias"] _tied_weights_keys = { "encoder.embed_tokens.weight": "shared.weight", "decoder.embed_tokens.weight": "shared.weight", } def __init__(self, config: MvpConfig): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size self.use_prompt = config.use_prompt self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = MvpEncoder(config, config.use_prompt) self.decoder = MvpDecoder(config, config.use_prompt) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def set_lightweight_tuning(self): assert self.use_prompt, "If you want to use lightweight tuning, make sure that `use_prompt=True`." self.requires_grad_(False) self.encoder.self_attn_prompt.requires_grad_(True) self.decoder.self_attn_prompt.requires_grad_(True) self.decoder.cross_attn_prompt.requires_grad_(True) @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, encoder_outputs: Optional[list[torch.FloatTensor]] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, cache_position: Optional[torch.Tensor] = None, ) -> Union[tuple, Seq2SeqModelOutput]: r""" decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) Mvp uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). For translation and summarization training, `decoder_input_ids` should be provided. If no `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_mvp._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the default strategy. """ # different to other models, Mvp automatically creates decoder_input_ids from # input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError( "If no `decoder_input_ids` or `decoder_inputs_embeds` are " "passed, `input_ids` cannot be `None`. Please pass either " "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." ) decoder_input_ids = shift_tokens_right( input_ids, self.config.pad_token_id, self.config.decoder_start_token_id ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, cache_position=cache_position, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @auto_docstring( custom_intro=""" The MVP Model with a language modeling head. Can be used for various text generation tasks. """ )
MvpModel
python
pypa__pip
src/pip/_internal/models/candidate.py
{ "start": 220, "end": 753 }
class ____: """Represents a potential "candidate" for installation.""" __slots__ = ["name", "version", "link"] name: str version: Version link: Link def __init__(self, name: str, version: str, link: Link) -> None: object.__setattr__(self, "name", name) object.__setattr__(self, "version", parse_version(version)) object.__setattr__(self, "link", link) def __str__(self) -> str: return f"{self.name!r} candidate (version {self.version} at {self.link})"
InstallationCandidate
python
pypa__pipenv
pipenv/exceptions.py
{ "start": 8628, "end": 9231 }
class ____(VirtualenvException): def __init__(self, message=None, **kwargs): if not message: message = "Failed to create virtual environment." self.message = message extra = kwargs.pop("extra", None) if extra is not None and isinstance(extra, str): extra = unstyle(f"{extra}") if "KeyboardInterrupt" in extra: extra = "[red][/bold]Virtualenv creation interrupted by user[red][/bold]" self.extra = extra = [extra] VirtualenvException.__init__(self, message, extra=extra)
VirtualenvCreationException
python
airbytehq__airbyte
airbyte-integrations/connectors/source-github/source_github/github_schema.py
{ "start": 1472787, "end": 1474726 }
class ____(sgqlc.types.Type, Node, UniformResourceLocatable): """Represents a 'review_dismissed' event on a given issue or pull request. """ __schema__ = github_schema __field_names__ = ( "actor", "created_at", "database_id", "dismissal_message", "dismissal_message_html", "previous_review_state", "pull_request", "pull_request_commit", "review", ) actor = sgqlc.types.Field(Actor, graphql_name="actor") """Identifies the actor who performed the event.""" created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt") """Identifies the date and time when the object was created.""" database_id = sgqlc.types.Field(Int, graphql_name="databaseId") """Identifies the primary key from the database.""" dismissal_message = sgqlc.types.Field(String, graphql_name="dismissalMessage") """Identifies the optional message associated with the 'review_dismissed' event. """ dismissal_message_html = sgqlc.types.Field(String, graphql_name="dismissalMessageHTML") """Identifies the optional message associated with the event, rendered to HTML. """ previous_review_state = sgqlc.types.Field(sgqlc.types.non_null(PullRequestReviewState), graphql_name="previousReviewState") """Identifies the previous state of the review with the 'review_dismissed' event. """ pull_request = sgqlc.types.Field(sgqlc.types.non_null(PullRequest), graphql_name="pullRequest") """PullRequest referenced by event.""" pull_request_commit = sgqlc.types.Field(PullRequestCommit, graphql_name="pullRequestCommit") """Identifies the commit which caused the review to become stale.""" review = sgqlc.types.Field(PullRequestReview, graphql_name="review") """Identifies the review associated with the 'review_dismissed' event. """
ReviewDismissedEvent
python
walkccc__LeetCode
solutions/2841. Maximum Sum of Almost Unique Subarray/2841.py
{ "start": 0, "end": 462 }
class ____: def maxSum(self, nums: list[int], m: int, k: int) -> int: ans = 0 summ = 0 count = collections.Counter() for i, num in enumerate(nums): summ += num count[num] += 1 if i >= k: numToRemove = nums[i - k] summ -= numToRemove count[numToRemove] -= 1 if count[numToRemove] == 0: del count[numToRemove] if len(count) >= m: ans = max(ans, summ) return ans
Solution
python
django-extensions__django-extensions
tests/management/commands/test_print_user_for_session.py
{ "start": 271, "end": 645 }
class ____(TestCase): """Test if print_user_for_session command raises exception.""" def test_should_raise_CommandError_if_session_key_contains_exclamination_mark(self): with self.assertRaisesRegex(CommandError, "malformed session key"): call_command("print_user_for_session", "l6hxnwblpvrfu8bohelmqjj4soyo2r!?")
PrintUserForSessionExceptionsTests
python
charliermarsh__ruff
crates/ruff_linter/resources/test/fixtures/pyflakes/F821_17.py
{ "start": 2221, "end": 2529 }
class ____[T: (DoesNotExist1, DoesNotExist2)](list[T]): ... # F821: Undefined name `DoesNotExist1`, Undefined name `DoesNotExist2` # Same in defaults type Foo[T = DoesNotExist] = T # F821: Undefined name `DoesNotExist` def foo[T = DoesNotExist](t: T) -> T: return t # F821: Undefined name `DoesNotExist`
Foo
python
airbytehq__airbyte
airbyte-integrations/connectors/source-iterable/source_iterable/streams.py
{ "start": 15737, "end": 15826 }
class ____(IterableExportStreamAdjustableRange): data_field = "emailBounce"
EmailBounce
python
pytorch__pytorch
torchgen/_autoheuristic/train_decision.py
{ "start": 23129, "end": 23616 }
class ____: # If the model predicted the wrong choice, this is the maximum speedup of the best choice over the predicted choice max_speedup: float # For all wrong predictions, this is the geometric mean of the speedups of the best choices over the predicted choices gmean_speedup: float def to_map(self): return { "wrong_max_speedup": self.max_speedup, "wrong_gmean_speedup": self.gmean_speedup, } @dataclass
WrongSpeedupMetrics
python
tensorflow__tensorflow
tensorflow/python/checkpoint/checkpoint.py
{ "start": 48398, "end": 66954 }
class ____: """Saves and restores a `Trackable` object and its dependencies. See `Trackable` for details of dependency management. `Saver` wraps `tf.compat.v1.train.Saver` for saving, including extra information about the graph of dependencies between Python objects. When restoring, it uses this information about the save-time dependency graph to more robustly match objects with their checkpointed values. When executing eagerly, it supports restoring variables on object creation (see `Saver.restore`). Values in a checkpoint are mapped to `Trackable` Python objects (`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the checkpoint was written. To avoid breaking existing checkpoints when modifying a class, dependency names (the names of attributes to which `Trackable` objects are assigned) may not change. These names are local to objects, in contrast to the `Variable.name`-based save/restore from `tf.compat.v1.train.Saver`, and so allow additional program transformations. """ def __init__(self, graph_view): """Configure saving. Args: graph_view: An `ObjectGraphView` object containing a description of the object graph to save. """ self._graph_view = graph_view # The following attributes are used when graph building. # self._cache: A more generic cache used to cache the serialized tensors and # TrackableObjectGraph proto attributes. # self._saveables_cache: A dictionary mapping `Trackable` objects -> # attribute names -> SaveableObjects, used to avoid re-creating # SaveableObjects when graph building. if context.executing_eagerly(): self._cache = None self._saveables_cache = None else: self._cache = object_identity.ObjectIdentityWeakKeyDictionary() self._saveables_cache = object_identity.ObjectIdentityWeakKeyDictionary() # The file prefix placeholder is created lazily when graph building (and not # at all when executing eagerly) to avoid creating ops in the constructor # (when they may never be necessary). self._file_prefix_placeholder = None # Op caching for save self._object_graph_feed_tensor = None self._last_save_object_graph = None self._file_prefix_feed_tensor = None self._cached_save_operation = None # Op caching for restore, shared between _CheckpointRestoreCoordinators self._restore_op_cache = {} # Object map used for checkpoint. This attribute is to be overridden by a # Checkpoint subclass, e.g., AsyncCheckpoint, to replace the trackable # objects for checkpoint saving. self._object_map = None def _gather_serialized_tensors(self, object_graph_tensor=None): """Gathers tensors to save to ckpt and includes the object graph proto.""" serialized_tensors, feed_additions, registered_savers, graph_proto = ( save_util.serialize_graph_view(self._graph_view, self._object_map, cache=self._cache)) if self._saveables_cache is not None: # Store saveables cache for restoration purposes. self._saveables_cache = ( saveable_object_util.serialized_tensors_to_saveable_cache( serialized_tensors)) if object_graph_tensor is None: with ops.device("/cpu:0"): object_graph_tensor = constant_op.constant( graph_proto.SerializeToString(), dtype=dtypes.string) else: feed_additions.update( {object_graph_tensor: graph_proto.SerializeToString()}) assert base.OBJECT_GRAPH_PROTO_KEY not in serialized_tensors.get(None, {}) serialized_tensors.setdefault(None, {})[base.OBJECT_GRAPH_PROTO_KEY] = ( object_graph_tensor) return serialized_tensors, feed_additions, registered_savers, graph_proto def _save_cached_when_graph_building(self, file_prefix, object_graph_tensor, options): """Create or retrieve save ops. Args: file_prefix: The prefix for saved checkpoint files. object_graph_tensor: A `Tensor` to which the current object graph will be fed. options: `CheckpointOptions` object. Returns: A two-element tuple with a filename tensor and a feed_dict of tensors to feed when running it (if graph building). The feed dict contains the current object graph and any Python state to be saved in the checkpoint. When executing eagerly only the first argument is meaningful. """ serialized_tensors, feed_additions, registered_savers, graph_proto = ( self._gather_serialized_tensors(object_graph_tensor)) if (self._last_save_object_graph != graph_proto # When executing eagerly, we need to re-create SaveableObjects each # time save() is called so they pick up new Tensors passed to their # constructors. That means the Saver needs to be copied with a new # var_list. or context.executing_eagerly() or ops.inside_function()): saver = functional_saver.MultiDeviceSaver(serialized_tensors, registered_savers) save_op = saver.save(file_prefix, options=options) with ops.device("/cpu:0"): with ops.control_dependencies([save_op]): self._cached_save_operation = array_ops.identity(file_prefix) self._last_save_object_graph = graph_proto return self._cached_save_operation, feed_additions def save(self, file_prefix, checkpoint_number=None, session=None, options=None): """Save a training checkpoint. The saved checkpoint includes variables created by this object and any Trackable objects it depends on at the time `Saver.save()` is called. Args: file_prefix: A prefix to use for the checkpoint filenames (/path/to/directory/and_a_prefix). Names are generated based on this prefix and `checkpoint_number`, if provided. checkpoint_number: An integer variable or Tensor, used to number checkpoints. Typically this value is saved along with other variables in training checkpoints, which will happen automatically if it was created by `root_trackable` or one of its dependencies (via `Trackable._add_variable`). session: The session to evaluate variables in. Ignored when executing eagerly. If not provided when graph building, the default session is used. options: Optional `tf.train.CheckpointOptions` object. Returns: The full path to the checkpoint. Raises: RuntimeError: if called in V1 Graph mode without a default session. """ options = options or checkpoint_options.CheckpointOptions() feed_dict = {} use_session = (not context.executing_eagerly() and not ops.inside_function()) if checkpoint_number: file_prefix = "%s-%d" % (file_prefix, checkpoint_number) if use_session: if self._object_graph_feed_tensor is None: with ops.device("/cpu:0"): self._object_graph_feed_tensor = constant_op.constant( "", dtype=dtypes.string) self._file_prefix_feed_tensor = constant_op.constant( "", dtype=dtypes.string) object_graph_tensor = self._object_graph_feed_tensor file_prefix_tensor = self._file_prefix_feed_tensor feed_dict[file_prefix_tensor] = file_prefix else: with ops.device("/cpu:0"): file_prefix_tensor = ops.convert_to_tensor( file_prefix, dtype=dtypes.string) object_graph_tensor = None if not tensor_util.is_tensor(file_prefix): file_io.recursive_create_dir(os.path.dirname(file_prefix)) save_path, new_feed_additions = self._save_cached_when_graph_building( file_prefix_tensor, object_graph_tensor, options) if new_feed_additions: feed_dict.update(new_feed_additions) if not use_session: session = None elif session is None: session = get_session() if session: return session.run(save_path, feed_dict=feed_dict) elif use_session: raise RuntimeError(f"Unable to save checkpoint to \"{file_prefix}\" " "in graph mode without a default session. Please use " "`with tf.Session():` to create a session.") else: return save_path def restore(self, save_path, options=None): """Restore a training checkpoint. Restores `root_trackable` and any objects that it tracks (transitive). Either assigns values immediately if variables to restore have been created already, or defers restoration until the variables are created. Dependencies added to the `root_trackable` passed to the constructor after this call will be matched if they have a corresponding object in the checkpoint. When building a graph, restorations are added to the graph but not run. ```python saver = Saver(root) saver.restore(path) ``` To ensure that loading is complete and no more deferred restorations will take place, you can use the `assert_consumed()` method of the status object returned by the `restore` call. The assert will raise an exception unless every object was matched and all checkpointed values have a matching variable object. ```python saver = Saver(root) saver.restore(path).assert_consumed() ``` When graph building, `assert_consumed()` indicates that all of the restore ops which will be created for this checkpoint have been created. They can be run via the `run_restore_ops()` function of the status object: ```python saver.restore(path).assert_consumed().run_restore_ops() ``` If the checkpoint has not been consumed completely, then the list of restore ops will grow as more objects are added to the dependency graph. Name-based `tf.compat.v1.train.Saver` checkpoints can be loaded using this method. There is no deferred loading, and names are used to match variables. No restore ops are created/run until `run_restore_ops()` or `initialize_or_restore()` are called on the returned status object, even when executing eagerly. Re-encode name-based checkpoints using this object-based `Saver.save` as soon as possible. Args: save_path: The path to the checkpoint, as returned by `save` or `tf.train.latest_checkpoint`. If None (as when there is no latest checkpoint for `tf.train.latest_checkpoint` to return), returns an object which may run initializers for objects in the dependency graph. If the checkpoint was written by the name-based `tf.compat.v1.train.Saver`, names are used to match variables. options: Optional `tf.train.CheckpointOptions` object. Returns: A load status object, which can be used to make assertions about the status of checkpoint restoration and run initialization/restore ops (of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if `save_path` is `None`). If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus` object is returned which runs restore ops from a name-based saver. Raises: RuntimeError: When a checkpoint file saved by async checkpoint is not available upon restore(). """ options = options or checkpoint_options.CheckpointOptions() if save_path is None: return InitializationOnlyStatus(self._graph_view, ops.uid()) # Wait until the ongoing checkpoint to finish. # TODO(chienchunh): Allow to load the file while other checkpoint events # are still ongiing. Need to add timeout mechanism along # with conditional variables to notify when the checkpoint # file is ready. global _ASYNC_CHECKPOINT_THREAD if _ASYNC_CHECKPOINT_THREAD is not None: _ASYNC_CHECKPOINT_THREAD.join() reader = py_checkpoint_reader.NewCheckpointReader(save_path) graph_building = not context.executing_eagerly() if graph_building: dtype_map = None else: dtype_map = reader.get_variable_to_dtype_map() try: object_graph_string = reader.get_tensor(base.OBJECT_GRAPH_PROTO_KEY) except errors_impl.NotFoundError: # The object graph proto does not exist in this checkpoint. Try the # name-based compatibility mode. restore_coordinator = _NameBasedRestoreCoordinator( save_path=save_path, dtype_map=dtype_map) if not graph_building: for existing_trackable in util.list_objects(self._graph_view): # pylint: disable=protected-access existing_trackable._maybe_initialize_trackable() existing_trackable._name_based_restores.add(restore_coordinator) existing_trackable._name_based_attribute_restore(restore_coordinator) # pylint: enable=protected-access return NameBasedSaverStatus( restore_coordinator, object_graph_view=self._graph_view) if graph_building: if self._file_prefix_placeholder is None: with ops.device("/cpu:0"): self._file_prefix_placeholder = constant_op.constant("model") file_prefix_tensor = self._file_prefix_placeholder file_prefix_feed_dict = {self._file_prefix_placeholder: save_path} else: with ops.device("/cpu:0"): file_prefix_tensor = constant_op.constant(save_path) file_prefix_feed_dict = None object_graph_proto = (trackable_object_graph_pb2.TrackableObjectGraph()) object_graph_proto.ParseFromString(object_graph_string) checkpoint = _CheckpointRestoreCoordinator( object_graph_proto=object_graph_proto, save_path=save_path, save_path_tensor=file_prefix_tensor, reader=reader, restore_op_cache=self._restore_op_cache, graph_view=self._graph_view, options=options, saveables_cache=self._saveables_cache) restore_lib.CheckpointPosition( checkpoint=checkpoint, proto_id=0).restore(self._graph_view.root, reader) # Attached dependencies are not attached to the root, so should be restored # separately. if self._graph_view.attached_dependencies: for ref in self._graph_view.attached_dependencies: if ref.name == "root": # Root dependency is automatically added to attached dependencies -- # this can be ignored since it maps back to the root object. continue proto_id = None # Find proto ID of attached dependency (if it is in the proto). for proto_ref in object_graph_proto.nodes[0].children: if proto_ref.local_name == ref.name: proto_id = proto_ref.node_id break if proto_id in checkpoint.object_by_proto_id: # Object has already been restored. This can happen when there's an # indirect connection from the attached object to the root. continue if proto_id is None: # Could not find attached dependency in proto. continue restore_lib.CheckpointPosition( checkpoint=checkpoint, proto_id=proto_id).restore(ref.ref, reader) load_status = CheckpointLoadStatus( checkpoint, graph_view=self._graph_view, feed_dict=file_prefix_feed_dict, options=options) return load_status def frozen_saver(root_trackable): """Creates a static `tf.compat.v1.train.Saver` from a trackable object. The returned `Saver` saves object-based checkpoints, but these checkpoints will no longer reflect structural changes to the object graph, only changes to the values of `Variable`s added as dependencies of the root object before `freeze` was called. `restore` works on the returned `Saver`, but requires that the object graph of the checkpoint being loaded exactly matches the object graph when `freeze` was called. This is in contrast the object-based restore performed by `tf.train.Checkpoint` which attempts a fuzzy matching between a checkpoint's object graph and the current Python object graph. Args: root_trackable: A trackable object to save. Returns: A saver which saves object-based checkpoints for the object graph frozen at the time `frozen_saver` was called. """ named_saveable_objects, registered_savers = ( save_util_v1.frozen_saveables_and_savers( graph_view_lib.ObjectGraphView(root_trackable))) return functional_saver.MultiDeviceSaver.from_saveables( named_saveable_objects, registered_savers) def _assert_trackable(obj, name): if not isinstance( obj, (base.Trackable, def_function.Function)): raise ValueError( f"`Checkpoint` was expecting {name} to be a trackable object (an " f"object derived from `Trackable`), got {obj}. If you believe this " "object should be trackable (i.e. it is part of the " "TensorFlow Python API and manages state), please open an issue.") def _update_checkpoint_state_internal(file_path): """Update internal checkpoint state.""" checkpoint_management.update_checkpoint_state_internal( save_dir=os.path.dirname(file_path), model_checkpoint_path=file_path, all_model_checkpoint_paths=[file_path], save_relative_paths=True) def _convert_file_name_tensor_to_string(tensor): """Convert file name tensor to string.""" output = tensor if tensor_util.is_tf_type(output): # Convert to numpy if not `tf.function` building. if context.executing_eagerly(): output = compat.as_str(output.numpy()) else: # Graph + Session, so we already session.ran it. output = compat.as_str(output) return output def _copy_single_tensor(tensor): """Copies a single Tensor / SaveSpec onto the CPU device.""" device = tensor.device if isinstance(tensor, saveable_object_lib.SaveSpec): # Pin the device according to the tensor's device location to # avoid unnecessary data copies when reading the variables. This is # aligned with the behavior in MultiDeviceSaver.save(). with ops.device(device): tensor = tensor.tensor if tensor is not None: with ops.device(saveable_object_util.set_cpu0(device)): tensor = array_ops.identity(tensor) # pylint: disable=protected-access return tensor # Mentions graph building / Sessions. The v2 version is below. @tf_export(v1=["train.Checkpoint"])
TrackableSaver