language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | astropy__astropy | astropy/io/fits/__init__.py | {
"start": 719,
"end": 3397
} | class ____(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.io.fits`.
"""
enable_record_valued_keyword_cards = _config.ConfigItem(
True,
"If True, enable support for record-valued keywords as described by "
"FITS WCS distortion paper. Otherwise they are treated as normal "
"keywords.",
aliases=["astropy.io.fits.enabled_record_valued_keyword_cards"],
)
extension_name_case_sensitive = _config.ConfigItem(
False,
"If True, extension names (i.e. the ``EXTNAME`` keyword) should be "
"treated as case-sensitive.",
)
strip_header_whitespace = _config.ConfigItem(
True,
"If True, automatically remove trailing whitespace for string values in"
" headers. Otherwise the values are returned verbatim, with all "
"whitespace intact.",
)
use_memmap = _config.ConfigItem(
True,
"If True, use memory-mapped file access to read/write the data in "
"FITS files. This generally provides better performance, especially "
"for large files, but may affect performance in I/O-heavy "
"applications.",
)
lazy_load_hdus = _config.ConfigItem(
True,
"If True, use lazy loading of HDUs when opening FITS files by "
"default; that is fits.open() will only seek for and read HDUs on "
"demand rather than reading all HDUs at once. See the documentation "
"for fits.open() for more details.",
)
enable_uint = _config.ConfigItem(
True,
"If True, default to recognizing the convention for representing "
"unsigned integers in FITS--if an array has BITPIX > 0, BSCALE = 1, "
"and BZERO = 2**BITPIX, represent the data as unsigned integers "
"per this convention.",
)
conf = Conf()
# Public API compatibility imports
# These need to come after the global config variables, as some of the
# submodules use them
from . import card, column, convenience, hdu
from .card import *
from .column import *
from .convenience import *
from .diff import *
from .fitsrec import FITS_rec, FITS_record
from .hdu import *
from .hdu.compressed import CompImageSection
from .hdu.groups import GroupData
from .hdu.hdulist import fitsopen as open
from .hdu.image import Section
from .header import Header
from .verify import VerifyError
__all__ = (
["Conf", "conf"]
+ card.__all__
+ column.__all__
+ convenience.__all__
+ hdu.__all__
+ [
"FITS_record",
"FITS_rec",
"GroupData",
"open",
"Section",
"Header",
"VerifyError",
"conf",
]
)
| Conf |
python | pypa__setuptools | setuptools/_distutils/errors.py | {
"start": 1458,
"end": 1670
} | class ____(DistutilsError):
"""Any problems in the filesystem: expected file not found, etc.
Typically this is for problems that we detect before OSError
could be raised."""
pass
| DistutilsFileError |
python | doocs__leetcode | solution/1500-1599/1523.Count Odd Numbers in an Interval Range/Solution.py | {
"start": 0,
"end": 115
} | class ____:
def countOdds(self, low: int, high: int) -> int:
return ((high + 1) >> 1) - (low >> 1)
| Solution |
python | kubernetes-client__python | kubernetes/base/stream/ws_client.py | {
"start": 9515,
"end": 23595
} | class ____:
def __init__(self, websocket, ports):
"""A websocket client with support for port forwarding.
Port Forward command sends on 2 channels per port, a read/write
data channel and a read only error channel. Both channels are sent an
initial frame containing the port number that channel is associated with.
"""
self.websocket = websocket
self.local_ports = {}
for ix, port_number in enumerate(ports):
self.local_ports[port_number] = self._Port(ix, port_number)
# There is a thread run per PortForward instance which performs the translation between the
# raw socket data sent by the python application and the websocket protocol. This thread
# terminates after either side has closed all ports, and after flushing all pending data.
proxy = threading.Thread(
name="Kubernetes port forward proxy: %s" % ', '.join([str(port) for port in ports]),
target=self._proxy
)
proxy.daemon = True
proxy.start()
@property
def connected(self):
return self.websocket.connected
def socket(self, port_number):
if port_number not in self.local_ports:
raise ValueError("Invalid port number")
return self.local_ports[port_number].socket
def error(self, port_number):
if port_number not in self.local_ports:
raise ValueError("Invalid port number")
return self.local_ports[port_number].error
def close(self):
for port in self.local_ports.values():
port.socket.close()
class _Port:
def __init__(self, ix, port_number):
# The remote port number
self.port_number = port_number
# The websocket channel byte number for this port
self.channel = six.int2byte(ix * 2)
# A socket pair is created to provide a means of translating the data flow
# between the python application and the kubernetes websocket. The self.python
# half of the socket pair is used by the _proxy method to receive and send data
# to the running python application.
s, self.python = socket.socketpair()
# The self.socket half of the pair is used by the python application to send
# and receive data to the eventual pod port. It is wrapped in the _Socket class
# because a socket pair is an AF_UNIX socket, not a AF_INET socket. This allows
# intercepting setting AF_INET socket options that would error against an AF_UNIX
# socket.
self.socket = self._Socket(s)
# Data accumulated from the websocket to be sent to the python application.
self.data = b''
# All data sent from kubernetes on the port error channel.
self.error = None
class _Socket:
def __init__(self, socket):
self._socket = socket
def __getattr__(self, name):
return getattr(self._socket, name)
def setsockopt(self, level, optname, value):
# The following socket option is not valid with a socket created from socketpair,
# and is set by the http.client.HTTPConnection.connect method.
if level == socket.IPPROTO_TCP and optname == socket.TCP_NODELAY:
return
self._socket.setsockopt(level, optname, value)
# Proxy all socket data between the python code and the kubernetes websocket.
def _proxy(self):
channel_ports = []
channel_initialized = []
local_ports = {}
for port in self.local_ports.values():
# Setup the data channel for this port number
channel_ports.append(port)
channel_initialized.append(False)
# Setup the error channel for this port number
channel_ports.append(port)
channel_initialized.append(False)
port.python.setblocking(True)
local_ports[port.python] = port
# The data to send on the websocket socket
kubernetes_data = b''
while True:
rlist = [] # List of sockets to read from
wlist = [] # List of sockets to write to
if self.websocket.connected:
rlist.append(self.websocket)
if kubernetes_data:
wlist.append(self.websocket)
local_all_closed = True
for port in self.local_ports.values():
if port.python.fileno() != -1:
if self.websocket.connected:
rlist.append(port.python)
if port.data:
wlist.append(port.python)
local_all_closed = False
else:
if port.data:
wlist.append(port.python)
local_all_closed = False
else:
port.python.close()
if local_all_closed and not (self.websocket.connected and kubernetes_data):
self.websocket.close()
return
r, w, _ = select.select(rlist, wlist, [])
for sock in r:
if sock == self.websocket:
pending = True
while pending:
try:
opcode, frame = self.websocket.recv_data_frame(True)
except WebSocketConnectionClosedException:
for port in self.local_ports.values():
port.python.close()
return
if opcode == ABNF.OPCODE_BINARY:
if not frame.data:
raise RuntimeError("Unexpected frame data size")
channel = six.byte2int(frame.data)
if channel >= len(channel_ports):
raise RuntimeError("Unexpected channel number: %s" % channel)
port = channel_ports[channel]
if channel_initialized[channel]:
if channel % 2:
if port.error is None:
port.error = ''
port.error += frame.data[1:].decode()
port.python.close()
else:
port.data += frame.data[1:]
else:
if len(frame.data) != 3:
raise RuntimeError(
"Unexpected initial channel frame data size"
)
port_number = six.byte2int(frame.data[1:2]) + (six.byte2int(frame.data[2:3]) * 256)
if port_number != port.port_number:
raise RuntimeError(
"Unexpected port number in initial channel frame: %s" % port_number
)
channel_initialized[channel] = True
elif opcode not in (ABNF.OPCODE_PING, ABNF.OPCODE_PONG, ABNF.OPCODE_CLOSE):
raise RuntimeError("Unexpected websocket opcode: %s" % opcode)
if not (isinstance(self.websocket.sock, ssl.SSLSocket) and self.websocket.sock.pending()):
pending = False
else:
port = local_ports[sock]
if port.python.fileno() != -1:
data = port.python.recv(1024 * 1024)
if data:
kubernetes_data += ABNF.create_frame(
port.channel + data,
ABNF.OPCODE_BINARY,
).format()
else:
port.python.close()
for sock in w:
if sock == self.websocket:
sent = self.websocket.sock.send(kubernetes_data)
kubernetes_data = kubernetes_data[sent:]
else:
port = local_ports[sock]
if port.python.fileno() != -1:
sent = port.python.send(port.data)
port.data = port.data[sent:]
def get_websocket_url(url, query_params=None):
parsed_url = urlparse(url)
parts = list(parsed_url)
if parsed_url.scheme == 'http':
parts[0] = 'ws'
elif parsed_url.scheme == 'https':
parts[0] = 'wss'
if query_params:
query = []
for key, value in query_params:
if key == 'command' and isinstance(value, list):
for command in value:
query.append((key, command))
else:
query.append((key, value))
if query:
parts[4] = urlencode(query)
return urlunparse(parts)
def create_websocket(configuration, url, headers=None):
enableTrace(False)
# We just need to pass the Authorization, ignore all the other
# http headers we get from the generated code
header = []
if headers and 'authorization' in headers:
header.append("authorization: %s" % headers['authorization'])
if headers and 'sec-websocket-protocol' in headers:
header.append("sec-websocket-protocol: %s" %
headers['sec-websocket-protocol'])
else:
header.append("sec-websocket-protocol: v4.channel.k8s.io")
if url.startswith('wss://') and configuration.verify_ssl:
ssl_opts = {
'cert_reqs': ssl.CERT_REQUIRED,
'ca_certs': configuration.ssl_ca_cert or certifi.where(),
}
if configuration.assert_hostname is not None:
ssl_opts['check_hostname'] = configuration.assert_hostname
else:
ssl_opts = {'cert_reqs': ssl.CERT_NONE}
if configuration.cert_file:
ssl_opts['certfile'] = configuration.cert_file
if configuration.key_file:
ssl_opts['keyfile'] = configuration.key_file
if configuration.tls_server_name:
ssl_opts['server_hostname'] = configuration.tls_server_name
websocket = WebSocket(sslopt=ssl_opts, skip_utf8_validation=False)
connect_opt = {
'header': header
}
if configuration.proxy or configuration.proxy_headers:
connect_opt = websocket_proxycare(connect_opt, configuration, url, headers)
websocket.connect(url, **connect_opt)
return websocket
def websocket_proxycare(connect_opt, configuration, url, headers):
""" An internal function to be called in api-client when a websocket
create is requested.
"""
if configuration.no_proxy:
connect_opt.update({ 'http_no_proxy': configuration.no_proxy.split(',') })
if configuration.proxy:
proxy_url = urlparse(configuration.proxy)
connect_opt.update({'http_proxy_host': proxy_url.hostname, 'http_proxy_port': proxy_url.port})
if configuration.proxy_headers:
for key,value in configuration.proxy_headers.items():
if key == 'proxy-authorization' and value.startswith('Basic'):
b64value = value.split()[1]
auth = urlsafe_b64decode(b64value).decode().split(':')
connect_opt.update({'http_proxy_auth': (auth[0], auth[1]) })
return(connect_opt)
def websocket_call(configuration, _method, url, **kwargs):
"""An internal function to be called in api-client when a websocket
connection is required. method, url, and kwargs are the parameters of
apiClient.request method."""
url = get_websocket_url(url, kwargs.get("query_params"))
headers = kwargs.get("headers")
_request_timeout = kwargs.get("_request_timeout", 60)
_preload_content = kwargs.get("_preload_content", True)
capture_all = kwargs.get("capture_all", True)
binary = kwargs.get('binary', False)
try:
client = WSClient(configuration, url, headers, capture_all, binary=binary)
if not _preload_content:
return client
client.run_forever(timeout=_request_timeout)
all = client.read_all()
if binary:
return WSResponse(all)
else:
return WSResponse('%s' % ''.join(all))
except (Exception, KeyboardInterrupt, SystemExit) as e:
raise ApiException(status=0, reason=str(e))
def portforward_call(configuration, _method, url, **kwargs):
"""An internal function to be called in api-client when a websocket
connection is required for port forwarding. args and kwargs are the
parameters of apiClient.request method."""
query_params = kwargs.get("query_params")
ports = []
for param, value in query_params:
if param == 'ports':
for port in value.split(','):
try:
port_number = int(port)
except ValueError:
raise ApiValueError("Invalid port number: %s" % port)
if not (0 < port_number < 65536):
raise ApiValueError("Port number must be between 0 and 65536: %s" % port)
if port_number in ports:
raise ApiValueError("Duplicate port numbers: %s" % port)
ports.append(port_number)
if not ports:
raise ApiValueError("Missing required parameter `ports`")
url = get_websocket_url(url, query_params)
headers = kwargs.get("headers")
try:
websocket = create_websocket(configuration, url, headers)
return PortForward(websocket, ports)
except (Exception, KeyboardInterrupt, SystemExit) as e:
raise ApiException(status=0, reason=str(e))
| PortForward |
python | mwaskom__seaborn | tests/_marks/test_area.py | {
"start": 207,
"end": 3022
} | class ____:
def test_single_defaults(self):
x, y = [1, 2, 3], [1, 2, 1]
p = Plot(x=x, y=y).add(Area()).plot()
ax = p._figure.axes[0]
poly = ax.patches[0]
verts = poly.get_path().vertices.T
colors = p._theme["axes.prop_cycle"].by_key()["color"]
expected_x = [1, 2, 3, 3, 2, 1, 1]
assert_array_equal(verts[0], expected_x)
expected_y = [0, 0, 0, 1, 2, 1, 0]
assert_array_equal(verts[1], expected_y)
fc = poly.get_facecolor()
assert_array_equal(fc, to_rgba(colors[0], .2))
ec = poly.get_edgecolor()
assert_array_equal(ec, to_rgba(colors[0], 1))
lw = poly.get_linewidth()
assert_array_equal(lw, mpl.rcParams["patch.linewidth"] * 2)
def test_set_properties(self):
x, y = [1, 2, 3], [1, 2, 1]
mark = Area(
color=".33",
alpha=.3,
edgecolor=".88",
edgealpha=.8,
edgewidth=2,
edgestyle=(0, (2, 1)),
)
p = Plot(x=x, y=y).add(mark).plot()
ax = p._figure.axes[0]
poly = ax.patches[0]
fc = poly.get_facecolor()
assert_array_equal(fc, to_rgba(mark.color, mark.alpha))
ec = poly.get_edgecolor()
assert_array_equal(ec, to_rgba(mark.edgecolor, mark.edgealpha))
lw = poly.get_linewidth()
assert_array_equal(lw, mark.edgewidth * 2)
ls = poly.get_linestyle()
dash_on, dash_off = mark.edgestyle[1]
expected = (0, (mark.edgewidth * dash_on / 4, mark.edgewidth * dash_off / 4))
assert ls == expected
def test_mapped_properties(self):
x, y = [1, 2, 3, 2, 3, 4], [1, 2, 1, 1, 3, 2]
g = ["a", "a", "a", "b", "b", "b"]
cs = [".2", ".8"]
p = Plot(x=x, y=y, color=g, edgewidth=g).scale(color=cs).add(Area()).plot()
ax = p._figure.axes[0]
expected_x = [1, 2, 3, 3, 2, 1, 1], [2, 3, 4, 4, 3, 2, 2]
expected_y = [0, 0, 0, 1, 2, 1, 0], [0, 0, 0, 2, 3, 1, 0]
for i, poly in enumerate(ax.patches):
verts = poly.get_path().vertices.T
assert_array_equal(verts[0], expected_x[i])
assert_array_equal(verts[1], expected_y[i])
fcs = [p.get_facecolor() for p in ax.patches]
assert_array_equal(fcs, to_rgba_array(cs, .2))
ecs = [p.get_edgecolor() for p in ax.patches]
assert_array_equal(ecs, to_rgba_array(cs, 1))
lws = [p.get_linewidth() for p in ax.patches]
assert lws[0] > lws[1]
def test_unfilled(self):
x, y = [1, 2, 3], [1, 2, 1]
c = ".5"
p = Plot(x=x, y=y).add(Area(fill=False, color=c)).plot()
ax = p._figure.axes[0]
poly = ax.patches[0]
assert poly.get_facecolor() == to_rgba(c, 0)
| TestArea |
python | mlflow__mlflow | mlflow/entities/model_registry/registered_model.py | {
"start": 800,
"end": 6617
} | class ____(_ModelRegistryEntity):
"""
MLflow entity for Registered Model.
"""
def __init__(
self,
name,
creation_timestamp=None,
last_updated_timestamp=None,
description=None,
latest_versions=None,
tags=None,
aliases=None,
deployment_job_id=None,
deployment_job_state=None,
):
# Constructor is called only from within the system by various backend stores.
super().__init__()
self._name = name
self._creation_time = creation_timestamp
self._last_updated_timestamp = last_updated_timestamp
self._description = description
self._latest_version = latest_versions
self._tags = {tag.key: tag.value for tag in (tags or [])}
self._aliases = {alias.alias: alias.version for alias in (aliases or [])}
self._deployment_job_id = deployment_job_id
self._deployment_job_state = deployment_job_state
@property
def name(self):
"""String. Registered model name."""
return self._name
@name.setter
def name(self, new_name):
self._name = new_name
@property
def creation_timestamp(self):
"""Integer. Model version creation timestamp (milliseconds since the Unix epoch)."""
return self._creation_time
@property
def last_updated_timestamp(self):
"""Integer. Timestamp of last update for this model version (milliseconds since the Unix
epoch).
"""
return self._last_updated_timestamp
@last_updated_timestamp.setter
def last_updated_timestamp(self, updated_timestamp):
self._last_updated_timestamp = updated_timestamp
@property
def description(self):
"""String. Description"""
return self._description
@description.setter
def description(self, description):
self._description = description
@property
def latest_versions(self):
"""List of the latest :py:class:`mlflow.entities.model_registry.ModelVersion` instances
for each stage.
"""
return self._latest_version
@latest_versions.setter
def latest_versions(self, latest_versions):
self._latest_version = latest_versions
@property
def tags(self):
"""Dictionary of tag key (string) -> tag value for the current registered model."""
# Remove the is_prompt tag as it should not be user-facing
return {k: v for k, v in self._tags.items() if k != IS_PROMPT_TAG_KEY}
def _is_prompt(self):
"""Check if the registered model is a prompt."""
return self._tags.get(IS_PROMPT_TAG_KEY, "false").lower() == "true"
@property
def aliases(self):
"""Dictionary of aliases (string) -> version for the current registered model."""
return self._aliases
@classmethod
def _properties(cls):
# aggregate with base class properties since cls.__dict__ does not do it automatically
return sorted(cls._get_properties_helper())
def _add_tag(self, tag):
self._tags[tag.key] = tag.value
def _add_alias(self, alias):
self._aliases[alias.alias] = alias.version
@property
def deployment_job_id(self):
"""Deployment job ID for the current registered model."""
return self._deployment_job_id
@deployment_job_id.setter
def deployment_job_id(self, deployment_job_id):
self._deployment_job_id = deployment_job_id
@property
def deployment_job_state(self):
"""Deployment job state for the current registered model."""
return self._deployment_job_state
# proto mappers
@classmethod
def from_proto(cls, proto):
# input: mlflow.protos.model_registry_pb2.RegisteredModel
# returns RegisteredModel entity
registered_model = cls(
proto.name,
proto.creation_timestamp,
proto.last_updated_timestamp,
proto.description,
[ModelVersion.from_proto(mvd) for mvd in proto.latest_versions],
)
for tag in proto.tags:
registered_model._add_tag(RegisteredModelTag.from_proto(tag))
for alias in proto.aliases:
registered_model._add_alias(RegisteredModelAlias.from_proto(alias))
registered_model._deployment_job_id = proto.deployment_job_id
registered_model._deployment_job_state = RegisteredModelDeploymentJobState.to_string(
proto.deployment_job_state
)
return registered_model
def to_proto(self):
# returns mlflow.protos.model_registry_pb2.RegisteredModel
rmd = ProtoRegisteredModel()
rmd.name = self.name
if self.creation_timestamp is not None:
rmd.creation_timestamp = self.creation_timestamp
if self.last_updated_timestamp:
rmd.last_updated_timestamp = self.last_updated_timestamp
if self.description:
rmd.description = self.description
if self.latest_versions is not None:
rmd.latest_versions.extend(
[model_version.to_proto() for model_version in self.latest_versions]
)
if self.deployment_job_id:
rmd.deployment_job_id = self.deployment_job_id
if self.deployment_job_state:
rmd.deployment_job_state = RegisteredModelDeploymentJobState.from_string(
self.deployment_job_state
)
rmd.tags.extend(
[ProtoRegisteredModelTag(key=key, value=value) for key, value in self._tags.items()]
)
rmd.aliases.extend(
[
ProtoRegisteredModelAlias(alias=alias, version=str(version))
for alias, version in self._aliases.items()
]
)
return rmd
| RegisteredModel |
python | realpython__materials | solid-principles-python/shapes_ocp.py | {
"start": 922,
"end": 1107
} | class ____(Shape):
def __init__(self, radius):
super().__init__("circle")
self.radius = radius
def calculate_area(self):
return pi * self.radius**2
| Circle |
python | pytorch__pytorch | torch/_export/serde/serialize.py | {
"start": 133570,
"end": 157650
} | class ____(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Enum):
return obj.value
if isinstance(obj, bytes):
return base64.b64encode(obj).decode("utf-8")
return super().default(obj)
def _dataclass_to_dict(obj):
if isinstance(obj, _Union):
return {obj.type: _dataclass_to_dict(obj.value)}
elif dataclasses.is_dataclass(obj):
return {
f.name: _dataclass_to_dict(getattr(obj, f.name))
for f in dataclasses.fields(obj)
}
elif isinstance(obj, list):
return [_dataclass_to_dict(x) for x in obj]
elif isinstance(obj, tuple):
return tuple(_dataclass_to_dict(x) for x in obj)
elif isinstance(obj, dict):
return {k: _dataclass_to_dict(v) for k, v in obj.items()}
elif isinstance(obj, float):
if obj == math.inf:
return "Infinity"
elif obj == -math.inf:
return "-Infinity"
elif math.isnan(obj):
return "NaN"
else:
return obj
else:
return obj
def _to_json_bytes(obj: Any) -> bytes:
return json.dumps(_dataclass_to_dict(obj), cls=EnumEncoder, allow_nan=False).encode(
"utf-8"
)
def serialize(
exported_program: ep.ExportedProgram,
opset_version: Optional[dict[str, int]] = None,
pickle_protocol: int = DEFAULT_PICKLE_PROTOCOL,
) -> SerializedArtifact:
with _enable_graph_inputs_of_type_nn_module(exported_program.example_inputs):
serialized_program = ExportedProgramSerializer(
opset_version, pickle_protocol
).serialize(exported_program)
assert isinstance(serialized_program.exported_program, ExportedProgram)
json_bytes = _to_json_bytes(serialized_program.exported_program)
artifact = SerializedArtifact(
json_bytes,
serialized_program.state_dict,
serialized_program.constants,
serialized_program.example_inputs,
)
return artifact
def _resolve_schema_cls(cls):
if isinstance(cls, str):
resolved = getattr(schema, cls, None)
if resolved is not None:
return resolved
if isinstance(cls, typing.ForwardRef):
return _resolve_schema_cls(cls.__forward_arg__)
return cls
def _dict_to_dataclass(cls, data):
cls = _resolve_schema_cls(cls)
assert not isinstance(cls, str), f"Unresolved class type: '{cls}'."
if typing.get_origin(cls) is Annotated:
return _dict_to_dataclass(cls.__origin__, data)
if typing.get_origin(cls) == typing.Union and type(None) in typing.get_args(cls):
if data is None:
return None
ty_args = typing.get_args(cls)
assert len(ty_args) == 2
return _dict_to_dataclass(ty_args[0], data)
elif isinstance(cls, type) and issubclass(cls, _Union):
assert isinstance(data, dict)
assert len(data) == 1
_type = next(iter(data.keys()))
_value = next(iter(data.values()))
assert isinstance(_type, str)
type_hints = typing.get_type_hints(cls, globalns=vars(schema))
field_type = type_hints[_type]
# pyrefly: ignore [missing-attribute]
return cls.create(**{_type: _dict_to_dataclass(field_type, _value)})
elif dataclasses.is_dataclass(cls):
fields = {}
type_hints = typing.get_type_hints(cls, globalns=vars(schema))
# For forward compatibility consideration, we ignore all the keys
# that are not showing up in the dataclass definition.
for f in dataclasses.fields(cls):
name = f.name
if name not in data:
continue
new_field_obj = _dict_to_dataclass(type_hints[name], data[name])
fields[name] = new_field_obj
return cls(**fields) # type: ignore[operator]
elif isinstance(data, list):
if len(data) == 0:
return data
d_type = typing.get_args(cls)[0]
return [_dict_to_dataclass(d_type, d) for d in data]
elif isinstance(data, dict):
v_type = typing.get_args(cls)[1]
return {k: _dict_to_dataclass(v_type, v) for k, v in data.items()}
elif cls is float:
return float(data)
return data
def _bytes_to_dataclass(cls: Any, artifact_bytes: bytes) -> Any:
artifact_str = artifact_bytes.decode("utf-8")
artifact_dict = json.loads(artifact_str)
artifact_dataclass = _dict_to_dataclass(cls, artifact_dict)
return artifact_dataclass
def deserialize(
artifact: SerializedArtifact,
expected_opset_version: Optional[dict[str, int]] = None,
*,
_unsafe_skip_version_check=False,
) -> ep.ExportedProgram:
assert isinstance(artifact.exported_program, bytes)
serialized_exported_program = _bytes_to_dataclass(
ExportedProgram, artifact.exported_program
)
return ExportedProgramDeserializer(expected_opset_version).deserialize(
serialized_exported_program,
artifact.state_dict,
artifact.constants,
artifact.example_inputs,
_unsafe_skip_version_check=_unsafe_skip_version_check,
)
def _canonicalize_graph(
sorted_inputs, sorted_outputs, graph, constants
) -> tuple[Graph, dict[str, str]]:
def _get_argument(a: Argument):
if a.type == "as_none":
return None
elif a.type == "as_tensor":
return a.as_tensor
elif a.type == "as_tensors":
return a.as_tensors
elif a.type == "as_int":
return None
elif a.type == "as_ints":
return None
elif a.type == "as_float":
return None
elif a.type == "as_floats":
return None
elif a.type == "as_string":
return None
elif a.type == "as_strings":
return None
elif a.type == "as_complex":
return None
elif a.type == "as_sym_int":
return a.as_sym_int
elif a.type == "as_sym_ints":
return a.as_sym_ints
elif a.type == "as_sym_float":
return a.as_sym_float
elif a.type == "as_sym_floats":
return a.as_sym_floats
elif a.type == "as_scalar_type":
return None
elif a.type == "as_memory_format":
return None
elif a.type == "as_layout":
return None
elif a.type == "as_device":
return None
elif a.type == "as_bool":
return None
elif a.type == "as_bools":
return None
elif a.type == "as_sym_bool":
return a.as_sym_bool
elif a.type == "as_sym_bools":
return a.as_sym_bools
elif a.type == "as_graph":
return None
elif a.type == "as_optional_tensors":
return a.as_optional_tensors
elif a.type == "as_custom_obj":
return a.as_custom_obj
elif a.type == "as_operator":
return None
elif a.type == "as_int_lists":
return None
elif a.type == "as_string_to_argument":
return None
else:
raise AssertionError(f"Unknown input type to the ExportedProgram: {a}")
# Stage 1: Reorder named items.
def for_args(f, a):
assert isinstance(a, Argument)
pytree.tree_map(f, _get_argument(a))
def sort_nodes(nodes):
@dataclass
class Edges:
outs: list[int]
ins: int
graph_inputs: set[str] = set()
def_table: dict[str, int] = {}
edges: dict[int, Edges] = {}
candidates: list[tuple[str, list[tuple[str, list[int]]], int]] = []
rank: dict[str, int] = {}
ret: list[Node] = []
def get_name(a) -> Optional[str]:
if a is None:
return None
if isinstance(a, TensorArgument):
return a.name
elif isinstance(a, (SymIntArgument, SymBoolArgument, SymFloatArgument)):
if a.type == "as_name":
return a.as_name
elif a.type in ("as_int", "as_bool", "as_float"):
return None
else:
raise AssertionError(f"Unknown argument type: {a}")
elif isinstance(a, OptionalTensorArgument):
if a.type == "as_tensor":
return a.as_tensor.name
elif a.type == "as_none":
return None
else:
raise AssertionError(f"Unknown optional tensor type: {a}")
elif isinstance(a, CustomObjArgument):
return a.name
else:
raise AssertionError(f"Unknown argument type: {a}")
for i in sorted_inputs:
def add_input(a):
if s := get_name(a):
graph_inputs.add(s)
for_args(add_input, i)
for idx, node in enumerate(nodes):
def add_def(a):
if s := get_name(a):
assert s not in def_table
def_table[s] = idx
for o in node.outputs:
for_args(add_def, o)
edges[idx] = Edges([], 0)
for idx, user in enumerate(nodes):
def add_edge(a):
if s := get_name(a):
if s in constants:
return
if s not in def_table:
assert s in graph_inputs
return
src = def_table[s]
edges[src].outs.append(idx)
edges[idx].ins += 1
for i in user.inputs:
for_args(add_edge, i.arg)
def add_rank(a):
if s := get_name(a):
assert s not in rank
rank[s] = len(rank)
def get_rank(a):
s = get_name(a)
if s and s not in constants:
return rank[s]
else:
return -1
for i in sorted_inputs:
for_args(add_rank, i)
def add_candidate(idx: int):
def get_ranks(i):
ranks = []
for_args(lambda x: ranks.append(get_rank(x)), i)
return ranks
node = nodes[idx]
args_rank = [(a.name, get_ranks(a.arg)) for a in node.inputs]
heapq.heappush(candidates, (node.target, args_rank, idx))
for idx, e in edges.items():
if e.ins == 0:
add_candidate(idx)
while len(candidates) > 0:
_, _, idx = heapq.heappop(candidates)
node = nodes[idx]
for o in node.outputs:
for_args(add_rank, o)
ret.append(node)
assert idx in edges
for user in edges[idx].outs:
e = edges[user]
assert e.ins > 0
e.ins -= 1
if e.ins == 0:
add_candidate(user)
edges[idx].outs.clear()
return ret
sorted_nodes = sort_nodes(graph.nodes)
assert len(sorted_nodes) == len(graph.nodes)
# Stage 2: Rename nodes.
name_table: dict[str, str] = {}
def rename_def(a):
def _rename(arg_name, values):
new_name = f"_{len(name_table)}"
assert arg_name not in name_table
name_table[arg_name] = new_name
assert arg_name in values
values[new_name] = values.pop(arg_name)
return new_name
if a is None:
return
if isinstance(a, TensorArgument):
a.name = _rename(a.name, graph.tensor_values)
elif isinstance(a, SymIntArgument):
if a.type == "as_name":
a.as_name = _rename(a.as_name, graph.sym_int_values)
elif isinstance(a, SymFloatArgument):
if a.type == "as_name":
a.as_name = _rename(a.as_name, graph.sym_float_values)
elif isinstance(a, SymBoolArgument):
if a.type == "as_name":
a.as_name = _rename(a.as_name, graph.sym_bool_values)
elif isinstance(a, CustomObjArgument):
a.name = _rename(a.name, graph.custom_obj_values)
else:
raise AssertionError(f"Unknown argument type: {a}")
def replace_use(a):
if a is None:
return
if isinstance(a, TensorArgument):
a.name = name_table.get(a.name, a.name)
elif isinstance(a, (SymIntArgument, SymFloatArgument)):
if a.type == "as_name":
a.as_name = name_table.get(a.as_name, a.as_name)
elif isinstance(a, SymBoolArgument):
if a.type == "as_name":
a.as_name = name_table.get(a.as_name, a.as_name)
elif isinstance(a, OptionalTensorArgument):
if a.type == "as_tensor":
a.as_tensor.name = name_table.get(a.as_tensor.name, a.as_tensor.name)
elif isinstance(a, CustomObjArgument):
a.name = name_table.get(a.name, a.name)
else:
raise AssertionError(f"Unknown argument type: {a}")
for i in sorted_inputs:
for_args(rename_def, i)
for n in sorted_nodes:
for o in n.outputs:
for_args(rename_def, o)
for n in sorted_nodes:
for i in n.inputs:
for_args(replace_use, i.arg)
for o in sorted_outputs:
for_args(replace_use, o)
# Stage 3: Remove unstable fields.
for n in sorted_nodes:
n.metadata.clear()
# Stage 4: Aggregate values.
# pyrefly: ignore [no-matching-overload]
sorted_tensor_values = dict(
sorted(graph.tensor_values.items(), key=operator.itemgetter(0))
)
# pyrefly: ignore [no-matching-overload]
sorted_sym_int_values = dict(
sorted(graph.sym_int_values.items(), key=operator.itemgetter(0))
)
# pyrefly: ignore [no-matching-overload]
sorted_sym_float_values = dict(
sorted(graph.sym_float_values.items(), key=operator.itemgetter(0))
)
# pyrefly: ignore [no-matching-overload]
sorted_sym_bool_values = dict(
sorted(graph.sym_bool_values.items(), key=operator.itemgetter(0))
)
# pyrefly: ignore [no-matching-overload]
sorted_custom_obj_values = dict(
sorted(graph.custom_obj_values.items(), key=operator.itemgetter(0))
)
# Stage 5: Recurse in subgraphs.
counter = 0
for node in sorted_nodes:
for i in node.inputs:
a = i.arg
if a.type == "as_graph":
a.as_graph.graph, _ = _canonicalize_graph(
a.as_graph.graph.inputs,
a.as_graph.graph.outputs,
a.as_graph.graph,
constants,
)
a.as_graph.name = f"_g{counter}"
counter += 1
graph = Graph(
inputs=sorted_inputs,
outputs=sorted_outputs,
nodes=sorted_nodes,
tensor_values=sorted_tensor_values,
sym_int_values=sorted_sym_int_values,
sym_float_values=sorted_sym_float_values,
sym_bool_values=sorted_sym_bool_values,
is_single_tensor_return=graph.is_single_tensor_return,
custom_obj_values=sorted_custom_obj_values,
)
return graph, name_table
def canonicalize(
ep: ExportedProgram, constants: Optional[set[str]] = None
) -> ExportedProgram:
"""
Normalize a serialized ExportedProgram, so that different eager program which
shares the same semantics can get a single representation on disk.
This function canonicalizes an ExportedProgram by:
1. Sorting nodes in topological order.
2. Rename nodes to have unique names.
3. Remove unstable fields.
4. Aggregate the above program fields.
5. Recurse in subgraphs.
Args:
ep (ExportedProgram): The ExportedProgram to canonicalize.
constants (Optional[set[str]]): Set of constants names
Returns:
ExportedProgram: The canonicalized exported program.
"""
ep = copy.deepcopy(ep)
# pyrefly: ignore [annotation-mismatch]
constants: set[str] = constants or set()
opset_version = dict(sorted(ep.opset_version.items(), key=operator.itemgetter(0)))
range_constraints = dict(
sorted(ep.range_constraints.items(), key=operator.itemgetter(0))
)
guards_code = sorted(ep.guards_code)
module_call_graph = sorted(ep.graph_module.module_call_graph, key=lambda x: x.fqn)
signature = ep.graph_module.signature
graph = ep.graph_module.graph
assert len(graph.inputs) == len(signature.input_specs)
assert len(graph.outputs) == len(signature.output_specs)
def rank_input(inp) -> tuple[int, Optional[str], int]:
idx, (_arg, spec) = inp
assert isinstance(spec, InputSpec)
if spec.type == "user_input":
return 5, None, idx
elif spec.type == "parameter":
return 1, spec.parameter.parameter_name, idx
elif spec.type == "buffer":
return 2, spec.buffer.buffer_name, idx
elif spec.type == "tensor_constant":
return 3, spec.tensor_constant.tensor_constant_name, idx
elif spec.type == "custom_obj":
return 4, spec.custom_obj.custom_obj_name, idx
elif spec.type == "token":
return 0, None, idx
elif spec.type == "constant_input":
return 6, spec.constant_input.name, idx
else:
raise AssertionError(f"Unknown input type: {spec}")
def rank_output(out) -> tuple[int, Optional[str], int]:
idx, (_arg, spec) = out
assert isinstance(spec, OutputSpec)
if spec.type == "user_output":
return 4, None, idx
elif spec.type == "loss_output":
return 4, None, idx
elif spec.type == "parameter_mutation":
return 1, spec.parameter_mutation.parameter_name, idx
elif spec.type == "buffer_mutation":
return 2, spec.buffer_mutation.buffer_name, idx
elif spec.type == "gradient_to_parameter":
return 5, spec.gradient_to_parameter.parameter_name, idx
elif spec.type == "gradient_to_user_input":
return 6, None, idx
elif spec.type == "user_input_mutation":
return 3, None, idx
elif spec.type == "token":
return 0, None, idx
else:
raise AssertionError(f"Unknown output type: {spec}")
sorted_ins = sorted(
enumerate(zip(graph.inputs, signature.input_specs)), key=rank_input
)
if len(sorted_ins) > 0:
sorted_inputs, input_specs = zip(*(i for idx, i in sorted_ins)) # type: ignore[assignment]
else:
sorted_inputs = ()
input_specs = ()
sorted_outs = sorted(
enumerate(zip(graph.outputs, signature.output_specs)), key=rank_output
)
sorted_outputs, output_specs = zip(*(i for idx, i in sorted_outs)) # type: ignore[assignment]
sorted_graph, replace_table = _canonicalize_graph(
sorted_inputs, sorted_outputs, graph, constants
)
def replace_input(spec):
assert isinstance(spec, InputSpec)
if spec.type == "user_input":
arg = spec.user_input.arg
if arg.type == "as_tensor":
t = arg.as_tensor
t.name = replace_table[t.name]
elif arg.type == "as_sym_int":
s = arg.as_sym_int
if s.type == "as_name":
s.as_name = replace_table[s.as_name]
elif s.type == "as_int":
pass
else:
raise AssertionError(f"Unknown sym_int type: {s}")
elif arg.type == "as_sym_float":
f = arg.as_sym_float
if f.type == "as_name":
f.as_name = replace_table[f.as_name]
elif f.type == "as_float":
pass
else:
raise AssertionError(f"Unknown sym_float type: {f}")
elif arg.type in (
"as_none",
"as_bool",
"as_int",
"as_float",
"as_string",
"as_custom_obj",
):
return
else:
raise AssertionError(f"Unknown input type: {arg}")
elif spec.type == "parameter":
t = spec.parameter.arg
t.name = replace_table[t.name]
elif spec.type == "buffer":
t = spec.buffer.arg
t.name = replace_table[t.name]
elif spec.type == "tensor_constant":
t = spec.tensor_constant.arg
t.name = replace_table[t.name]
elif spec.type == "custom_obj":
t_custom_obj = spec.custom_obj.arg
t_custom_obj.name = replace_table[t_custom_obj.name]
return
elif spec.type == "token":
tok = spec.token.arg
tok.name = replace_table[tok.name]
elif spec.type == "constant_input":
return
else:
raise AssertionError(f"Unknown input type: {spec}")
def replace_output(out):
assert isinstance(spec, OutputSpec)
if spec.type == "user_output":
arg = spec.user_output.arg
if arg.type == "as_tensor":
t = arg.as_tensor
t.name = replace_table[t.name]
elif arg.type == "as_sym_int":
s = arg.as_sym_int
if s.type == "as_name":
s.as_name = replace_table[s.as_name]
elif s.type == "as_int":
pass
else:
raise AssertionError(f"Unknown sym_int type: {s}")
elif arg.type == "as_sym_float":
f = arg.as_sym_float
if f.type == "as_name":
f.as_name = replace_table[f.as_name]
elif f.type == "as_float":
pass
else:
raise AssertionError(f"Unknown sym_float type: {f}")
elif arg.type in ("as_none", "as_bool", "as_int", "as_float", "as_string"):
return
else:
raise AssertionError(f"Unknown input type: {arg}")
elif spec.type == "loss_output":
t = spec.loss_output.arg
t.name = replace_table[t.name]
elif spec.type == "buffer_mutation":
t = spec.buffer_mutation.arg
t.name = replace_table[t.name]
elif spec.type == "parameter_mutation":
t = spec.parameter_mutation.arg
t.name = replace_table[t.name]
elif spec.type == "gradient_to_parameter":
t = spec.gradient_to_parameter.arg
t.name = replace_table[t.name]
elif spec.type == "gradient_to_user_input":
g = spec.gradient_to_user_input
g.arg.name = replace_table[g.arg.name]
g.user_input_name = replace_table[g.user_input_name]
elif spec.type == "user_input_mutation":
u = spec.user_input_mutation
u.arg.name = replace_table[u.arg.name]
u.user_input_name = replace_table[u.user_input_name]
elif spec.type == "token":
tok = spec.token.arg
tok.name = replace_table[tok.name]
else:
raise AssertionError(f"Unknown output type: {spec}")
for spec in input_specs:
replace_input(spec)
for spec in output_specs:
replace_output(spec)
return ExportedProgram(
graph_module=GraphModule(
graph=sorted_graph,
signature=GraphSignature(
input_specs=list(input_specs),
output_specs=list(output_specs),
),
module_call_graph=module_call_graph,
),
opset_version=opset_version,
range_constraints=range_constraints,
schema_version=ep.schema_version,
verifiers=ep.verifiers,
torch_version=ep.torch_version,
guards_code=guards_code,
)
| EnumEncoder |
python | doocs__leetcode | solution/1000-1099/1008.Construct Binary Search Tree from Preorder Traversal/Solution.py | {
"start": 0,
"end": 601
} | class ____:
def bstFromPreorder(self, preorder: List[int]) -> Optional[TreeNode]:
def dfs(i: int, j: int) -> Optional[TreeNode]:
if i > j:
return None
root = TreeNode(preorder[i])
l, r = i + 1, j + 1
while l < r:
mid = (l + r) >> 1
if preorder[mid] > preorder[i]:
r = mid
else:
l = mid + 1
root.left = dfs(i + 1, l - 1)
root.right = dfs(l, j)
return root
return dfs(0, len(preorder) - 1)
| Solution |
python | psf__black | tests/data/cases/class_methods_new_line.py | {
"start": 1257,
"end": 1379
} | class ____:
cls_var = 100
class Inner:
pass
def __init__(self):
pass
| ClassWithInitAndVarsWithInner |
python | sympy__sympy | sympy/geometry/line.py | {
"start": 67068,
"end": 68732
} | class ____(LinearEntity2D, Segment):
"""A line segment in 2D space.
Parameters
==========
p1 : Point
p2 : Point
Attributes
==========
length : number or SymPy expression
midpoint : Point
See Also
========
sympy.geometry.point.Point, Line
Examples
========
>>> from sympy import Point, Segment
>>> Segment((1, 0), (1, 1)) # tuples are interpreted as pts
Segment2D(Point2D(1, 0), Point2D(1, 1))
>>> s = Segment(Point(4, 3), Point(1, 1)); s
Segment2D(Point2D(4, 3), Point2D(1, 1))
>>> s.points
(Point2D(4, 3), Point2D(1, 1))
>>> s.slope
2/3
>>> s.length
sqrt(13)
>>> s.midpoint
Point2D(5/2, 2)
"""
def __new__(cls, p1, p2, **kwargs):
p1 = Point(p1, dim=2)
p2 = Point(p2, dim=2)
if p1 == p2:
return p1
return LinearEntity2D.__new__(cls, p1, p2, **kwargs)
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the LinearEntity.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
verts = (N(self.p1), N(self.p2))
coords = ["{},{}".format(p.x, p.y) for p in verts]
path = "M {} L {}".format(coords[0], " L ".join(coords[1:]))
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" />'
).format(2.*scale_factor, path, fill_color)
| Segment2D |
python | kamyu104__LeetCode-Solutions | Python/minimum-ascii-delete-sum-for-two-strings.py | {
"start": 767,
"end": 1474
} | class ____(object):
def minimumDeleteSum(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: int
"""
dp = [[0] * (len(s2)+1) for _ in xrange(len(s1)+1)]
for i in xrange(len(s1)):
dp[i+1][0] = dp[i][0] + ord(s1[i])
for j in xrange(len(s2)):
dp[0][j+1] = dp[0][j] + ord(s2[j])
for i in xrange(len(s1)):
for j in xrange(len(s2)):
if s1[i] == s2[j]:
dp[i+1][j+1] = dp[i][j]
else:
dp[i+1][j+1] = min(dp[i][j+1] + ord(s1[i]), \
dp[i+1][j] + ord(s2[j]))
return dp[-1][-1]
| Solution2 |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/solids.py | {
"start": 25053,
"end": 25656
} | class ____(graphene.Interface):
id = graphene.NonNull(graphene.ID)
name = graphene.NonNull(graphene.String)
description = graphene.String()
solids = non_null_list(GrapheneSolid)
solid_handle = graphene.Field(
GrapheneSolidHandle,
handleID=graphene.Argument(graphene.NonNull(graphene.String)),
)
solid_handles = graphene.Field(
non_null_list(GrapheneSolidHandle), parentHandleID=graphene.String()
)
modes = non_null_list("dagster_graphql.schema.pipelines.mode.GrapheneMode")
class Meta:
name = "SolidContainer"
| GrapheneSolidContainer |
python | weaviate__weaviate-python-client | weaviate/exceptions.py | {
"start": 8877,
"end": 9147
} | class ____(WeaviateBaseError):
"""Is raised when a batch validation error occurs."""
def __init__(self, message: str):
msg = f"""Batch validation error: {message}"""
super().__init__(msg)
self.message = message
| WeaviateBatchValidationError |
python | pallets__jinja | src/jinja2/environment.py | {
"start": 55766,
"end": 57030
} | class ____:
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into a string renders the contents.
"""
def __init__(
self,
template: Template,
context: Context,
body_stream: t.Iterable[str] | None = None,
) -> None:
if body_stream is None:
if context.environment.is_async:
raise RuntimeError(
"Async mode requires a body stream to be passed to"
" a template module. Use the async methods of the"
" API you are using."
)
body_stream = list(template.root_render_func(context))
self._body_stream = body_stream
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self) -> Markup:
return Markup(concat(self._body_stream))
def __str__(self) -> str:
return concat(self._body_stream)
def __repr__(self) -> str:
if self.__name__ is None:
name = f"memory:{id(self):x}"
else:
name = repr(self.__name__)
return f"<{type(self).__name__} {name}>"
| TemplateModule |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/dependency.py | {
"start": 27145,
"end": 28595
} | class ____(
IDependencyDefinition,
NamedTuple(
"_BlockingAssetChecksDependencyDefinition",
[
(
"asset_check_dependencies",
Sequence[DependencyDefinition],
),
("other_dependency", Optional[DependencyDefinition]),
],
),
):
"""An input that depends on a set of outputs that correspond to upstream asset checks, and also
optionally depends on a single upstream output that does not correspond to an asset check.
We model this with a different kind of DependencyDefinition than MultiDependencyDefinition,
because we treat the value that's passed to the input parameter differently: we ignore the asset
check dependencies and only pass a single value, instead of a fanned-in list.
"""
@public
def get_node_dependencies(self) -> Sequence[DependencyDefinition]:
"""Return the list of :py:class:`DependencyDefinition` contained by this object."""
if self.other_dependency:
return [*self.asset_check_dependencies, self.other_dependency]
else:
return self.asset_check_dependencies
@public
def is_fan_in(self) -> bool:
return False
@public
def get_dependencies_and_mappings(
self,
) -> Sequence[Union[DependencyDefinition, type["MappedInputPlaceholder"]]]:
return self.get_node_dependencies()
| BlockingAssetChecksDependencyDefinition |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofwork.py | {
"start": 7638,
"end": 8781
} | class ____(fixtures.MappedTest, testing.AssertsExecutionResults):
@classmethod
def define_tables(cls, metadata):
Table(
"t1",
metadata,
Column(
"id",
sa.Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("data", sa.LargeBinary),
)
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
@testing.requires.non_broken_binary
def test_binary_equality(self):
Foo, t1 = self.classes.Foo, self.tables.t1
# data = b("this is some data")
data = b"m\x18" # m\xf2\r\n\x7f\x10'
self.mapper_registry.map_imperatively(Foo, t1)
s = fixture_session()
f1 = Foo(data=data)
s.add(f1)
s.flush()
s.expire_all()
f1 = s.query(Foo).first()
assert f1.data == data
f1.data = data
eq_(sa.orm.attributes.get_history(f1, "data"), ((), [data], ()))
def go():
s.flush()
self.assert_sql_count(testing.db, go, 0)
| BinaryHistTest |
python | weaviate__weaviate-python-client | weaviate/proto/v1/v4216/v1/weaviate_pb2_grpc.py | {
"start": 2863,
"end": 7467
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
def Search(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BatchObjects(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BatchReferences(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BatchDelete(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def TenantsGet(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Aggregate(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BatchStream(self, request_iterator, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_WeaviateServicer_to_server(servicer, server):
rpc_method_handlers = {
'Search': grpc.unary_unary_rpc_method_handler(
servicer.Search,
request_deserializer=v1_dot_search__get__pb2.SearchRequest.FromString,
response_serializer=v1_dot_search__get__pb2.SearchReply.SerializeToString,
),
'BatchObjects': grpc.unary_unary_rpc_method_handler(
servicer.BatchObjects,
request_deserializer=v1_dot_batch__pb2.BatchObjectsRequest.FromString,
response_serializer=v1_dot_batch__pb2.BatchObjectsReply.SerializeToString,
),
'BatchReferences': grpc.unary_unary_rpc_method_handler(
servicer.BatchReferences,
request_deserializer=v1_dot_batch__pb2.BatchReferencesRequest.FromString,
response_serializer=v1_dot_batch__pb2.BatchReferencesReply.SerializeToString,
),
'BatchDelete': grpc.unary_unary_rpc_method_handler(
servicer.BatchDelete,
request_deserializer=v1_dot_batch__delete__pb2.BatchDeleteRequest.FromString,
response_serializer=v1_dot_batch__delete__pb2.BatchDeleteReply.SerializeToString,
),
'TenantsGet': grpc.unary_unary_rpc_method_handler(
servicer.TenantsGet,
request_deserializer=v1_dot_tenants__pb2.TenantsGetRequest.FromString,
response_serializer=v1_dot_tenants__pb2.TenantsGetReply.SerializeToString,
),
'Aggregate': grpc.unary_unary_rpc_method_handler(
servicer.Aggregate,
request_deserializer=v1_dot_aggregate__pb2.AggregateRequest.FromString,
response_serializer=v1_dot_aggregate__pb2.AggregateReply.SerializeToString,
),
'BatchStream': grpc.stream_stream_rpc_method_handler(
servicer.BatchStream,
request_deserializer=v1_dot_batch__pb2.BatchStreamRequest.FromString,
response_serializer=v1_dot_batch__pb2.BatchStreamReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'weaviate.v1.Weaviate', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
| WeaviateServicer |
python | ray-project__ray | python/ray/tune/schedulers/median_stopping_rule.py | {
"start": 448,
"end": 8420
} | class ____(FIFOScheduler):
"""Implements the median stopping rule as described in the Vizier paper:
https://research.google.com/pubs/pub46180.html
Args:
time_attr: The training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric: The training result objective value attribute. Stopping
procedures will use this attribute. If None but a mode was passed,
the `ray.tune.result.DEFAULT_METRIC` will be used per default.
mode: One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
grace_period: Only stop trials at least this old in time.
The mean will only be computed from this time onwards. The units
are the same as the attribute named by `time_attr`.
min_samples_required: Minimum number of trials to compute median
over.
min_time_slice: Each trial runs at least this long before
yielding (assuming it isn't stopped). Note: trials ONLY yield if
there are not enough samples to evaluate performance for the
current result AND there are other trials waiting to run.
The units are the same as the attribute named by `time_attr`.
hard_stop: If False, pauses trials instead of stopping
them. When all other trials are complete, paused trials will be
resumed and allowed to run FIFO.
"""
def __init__(
self,
time_attr: str = "time_total_s",
metric: Optional[str] = None,
mode: Optional[str] = None,
grace_period: float = 60.0,
min_samples_required: int = 3,
min_time_slice: int = 0,
hard_stop: bool = True,
):
super().__init__()
self._stopped_trials = set()
self._grace_period = grace_period
self._min_samples_required = min_samples_required
self._min_time_slice = min_time_slice
self._metric = metric
self._worst = None
self._compare_op = None
self._mode = mode
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self._worst = float("-inf") if self._mode == "max" else float("inf")
self._compare_op = max if self._mode == "max" else min
self._time_attr = time_attr
self._hard_stop = hard_stop
self._trial_state = {}
self._last_pause = collections.defaultdict(lambda: float("-inf"))
self._results = collections.defaultdict(list)
def set_search_properties(
self, metric: Optional[str], mode: Optional[str], **spec
) -> bool:
if self._metric and metric:
return False
if self._mode and mode:
return False
if metric:
self._metric = metric
if mode:
self._mode = mode
self._worst = float("-inf") if self._mode == "max" else float("inf")
self._compare_op = max if self._mode == "max" else min
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
return True
def on_trial_add(self, tune_controller: "TuneController", trial: Trial):
if not self._metric or not self._worst or not self._compare_op:
raise ValueError(
"{} has been instantiated without a valid `metric` ({}) or "
"`mode` ({}) parameter. Either pass these parameters when "
"instantiating the scheduler, or pass them as parameters "
"to `tune.TuneConfig()`".format(
self.__class__.__name__, self._metric, self._mode
)
)
super(MedianStoppingRule, self).on_trial_add(tune_controller, trial)
def on_trial_result(
self, tune_controller: "TuneController", trial: Trial, result: Dict
) -> str:
"""Callback for early stopping.
This stopping rule stops a running trial if the trial's best objective
value by step `t` is strictly worse than the median of the running
averages of all completed trials' objectives reported up to step `t`.
"""
if self._time_attr not in result or self._metric not in result:
return TrialScheduler.CONTINUE
if trial in self._stopped_trials:
assert not self._hard_stop
# Fall back to FIFO
return TrialScheduler.CONTINUE
time = result[self._time_attr]
self._results[trial].append(result)
if time < self._grace_period:
return TrialScheduler.CONTINUE
trials = self._trials_beyond_time(time)
trials.remove(trial)
if len(trials) < self._min_samples_required:
action = self._on_insufficient_samples(tune_controller, trial, time)
if action == TrialScheduler.PAUSE:
self._last_pause[trial] = time
action_str = "Yielding time to other trials."
else:
action_str = "Continuing anyways."
logger.debug(
"MedianStoppingRule: insufficient samples={} to evaluate "
"trial {} at t={}. {}".format(
len(trials), trial.trial_id, time, action_str
)
)
return action
median_result = self._median_result(trials, time)
best_result = self._best_result(trial)
logger.debug(
"Trial {} best res={} vs median res={} at t={}".format(
trial, best_result, median_result, time
)
)
if self._compare_op(median_result, best_result) != best_result:
logger.debug("MedianStoppingRule: early stopping {}".format(trial))
self._stopped_trials.add(trial)
if self._hard_stop:
return TrialScheduler.STOP
else:
return TrialScheduler.PAUSE
else:
return TrialScheduler.CONTINUE
def on_trial_complete(
self, tune_controller: "TuneController", trial: Trial, result: Dict
):
self._results[trial].append(result)
def debug_string(self) -> str:
return "Using MedianStoppingRule: num_stopped={}.".format(
len(self._stopped_trials)
)
def _on_insufficient_samples(
self, tune_controller: "TuneController", trial: Trial, time: float
) -> str:
pause = time - self._last_pause[trial] > self._min_time_slice
pause = pause and [
t
for t in tune_controller.get_live_trials()
if t.status in (Trial.PENDING, Trial.PAUSED)
]
return TrialScheduler.PAUSE if pause else TrialScheduler.CONTINUE
def _trials_beyond_time(self, time: float) -> List[Trial]:
trials = [
trial
for trial in self._results
if self._results[trial][-1][self._time_attr] >= time
]
return trials
def _median_result(self, trials: List[Trial], time: float):
return np.median([self._running_mean(trial, time) for trial in trials])
def _running_mean(self, trial: Trial, time: float) -> np.ndarray:
results = self._results[trial]
# TODO(ekl) we could do interpolation to be more precise, but for now
# assume len(results) is large and the time diffs are roughly equal
scoped_results = [
r for r in results if self._grace_period <= r[self._time_attr] <= time
]
return np.mean([r[self._metric] for r in scoped_results])
def _best_result(self, trial):
results = self._results[trial]
return self._compare_op([r[self._metric] for r in results])
| MedianStoppingRule |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/convolutional.py | {
"start": 105989,
"end": 109558
} | class ____(Layer):
"""Upsampling layer for 2D inputs.
Repeats the rows and columns of the data
by `size[0]` and `size[1]` respectively.
Examples:
>>> input_shape = (2, 2, 1, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> print(x)
[[[[ 0 1 2]]
[[ 3 4 5]]]
[[[ 6 7 8]]
[[ 9 10 11]]]]
>>> y = tf.keras.layers.UpSampling2D(size=(1, 2))(x)
>>> print(y)
tf.Tensor(
[[[[ 0 1 2]
[ 0 1 2]]
[[ 3 4 5]
[ 3 4 5]]]
[[[ 6 7 8]
[ 6 7 8]]
[[ 9 10 11]
[ 9 10 11]]]], shape=(2, 2, 2, 3), dtype=int64)
Args:
size: Int, or tuple of 2 integers.
The upsampling factors for rows and columns.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
interpolation: A string, one of `nearest` or `bilinear`.
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, upsampled_rows, upsampled_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, upsampled_rows, upsampled_cols)`
"""
def __init__(self,
size=(2, 2),
data_format=None,
interpolation='nearest',
**kwargs):
super(UpSampling2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
if interpolation not in {'nearest', 'bilinear'}:
raise ValueError('`interpolation` argument should be one of `"nearest"` '
'or `"bilinear"`.')
self.interpolation = interpolation
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[
2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[
3] if input_shape[3] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], height, width])
else:
height = self.size[0] * input_shape[
1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[
2] if input_shape[2] is not None else None
return tensor_shape.TensorShape(
[input_shape[0], height, width, input_shape[3]])
def call(self, inputs):
return backend.resize_images(
inputs, self.size[0], self.size[1], self.data_format,
interpolation=self.interpolation)
def get_config(self):
config = {
'size': self.size,
'data_format': self.data_format,
'interpolation': self.interpolation
}
base_config = super(UpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| UpSampling2D |
python | plotly__plotly.py | plotly/graph_objs/indicator/gauge/_step.py | {
"start": 233,
"end": 8894
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "indicator.gauge"
_path_str = "indicator.gauge.step"
_valid_props = {"color", "line", "name", "range", "templateitemname", "thickness"}
@property
def color(self):
"""
Sets the background color of the arc.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.gauge.step.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.indicator.gauge.step.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def range(self):
"""
Sets the range of this axis.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property is a number and may be specified as:
- An int or float
(1) The 'range[1]' property is a number and may be specified as:
- An int or float
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def thickness(self):
"""
Sets the thickness of the bar as a fraction of the total
thickness of the gauge.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the background color of the arc.
line
:class:`plotly.graph_objects.indicator.gauge.step.Line`
instance or dict with compatible properties
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
range
Sets the range of this axis.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
thickness
Sets the thickness of the bar as a fraction of the
total thickness of the gauge.
"""
def __init__(
self,
arg=None,
color=None,
line=None,
name=None,
range=None,
templateitemname=None,
thickness=None,
**kwargs,
):
"""
Construct a new Step object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.indicator.gauge.Step`
color
Sets the background color of the arc.
line
:class:`plotly.graph_objects.indicator.gauge.step.Line`
instance or dict with compatible properties
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
range
Sets the range of this axis.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
thickness
Sets the thickness of the bar as a fraction of the
total thickness of the gauge.
Returns
-------
Step
"""
super().__init__("steps")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.indicator.gauge.Step
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.gauge.Step`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("line", arg, line)
self._set_property("name", arg, name)
self._set_property("range", arg, range)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("thickness", arg, thickness)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Step |
python | bokeh__bokeh | src/bokeh/core/has_props.py | {
"start": 8914,
"end": 8984
} | class ____:
"""Don't register this class in model registry. """
| Local |
python | python-excel__xlwt | xlwt/Bitmap.py | {
"start": 4319,
"end": 9897
} | class ____(BiffRecord):
_REC_ID = 0x005D # Record identifier
def __init__(self, row, col, sheet, im_data_bmp, x, y, scale_x, scale_y):
# Scale the frame of the image.
width = im_data_bmp.width * scale_x
height = im_data_bmp.height * scale_y
# Calculate the vertices of the image and write the OBJ record
coordinates = _position_image(sheet, row, col, x, y, width, height)
# print coordinates
col_start, x1, row_start, y1, col_end, x2, row_end, y2 = coordinates
"""Store the OBJ record that precedes an IMDATA record. This could be generalise
to support other Excel objects.
"""
cObj = 0x0001 # Count of objects in file (set to 1)
OT = 0x0008 # Object type. 8 = Picture
id = 0x0001 # Object ID
grbit = 0x0614 # Option flags
colL = col_start # Col containing upper left corner of object
dxL = x1 # Distance from left side of cell
rwT = row_start # Row containing top left corner of object
dyT = y1 # Distance from top of cell
colR = col_end # Col containing lower right corner of object
dxR = x2 # Distance from right of cell
rwB = row_end # Row containing bottom right corner of object
dyB = y2 # Distance from bottom of cell
cbMacro = 0x0000 # Length of FMLA structure
Reserved1 = 0x0000 # Reserved
Reserved2 = 0x0000 # Reserved
icvBack = 0x09 # Background colour
icvFore = 0x09 # Foreground colour
fls = 0x00 # Fill pattern
fAuto = 0x00 # Automatic fill
icv = 0x08 # Line colour
lns = 0xff # Line style
lnw = 0x01 # Line weight
fAutoB = 0x00 # Automatic border
frs = 0x0000 # Frame style
cf = 0x0009 # Image format, 9 = bitmap
Reserved3 = 0x0000 # Reserved
cbPictFmla = 0x0000 # Length of FMLA structure
Reserved4 = 0x0000 # Reserved
grbit2 = 0x0001 # Option flags
Reserved5 = 0x0000 # Reserved
data = pack("<L", cObj)
data += pack("<H", OT)
data += pack("<H", id)
data += pack("<H", grbit)
data += pack("<H", colL)
data += pack("<H", dxL)
data += pack("<H", rwT)
data += pack("<H", dyT)
data += pack("<H", colR)
data += pack("<H", dxR)
data += pack("<H", rwB)
data += pack("<H", dyB)
data += pack("<H", cbMacro)
data += pack("<L", Reserved1)
data += pack("<H", Reserved2)
data += pack("<B", icvBack)
data += pack("<B", icvFore)
data += pack("<B", fls)
data += pack("<B", fAuto)
data += pack("<B", icv)
data += pack("<B", lns)
data += pack("<B", lnw)
data += pack("<B", fAutoB)
data += pack("<H", frs)
data += pack("<L", cf)
data += pack("<H", Reserved3)
data += pack("<H", cbPictFmla)
data += pack("<H", Reserved4)
data += pack("<H", grbit2)
data += pack("<L", Reserved5)
self._rec_data = data
def _process_bitmap(bitmap):
"""Convert a 24 bit bitmap into the modified internal format used by Windows.
This is described in BITMAPCOREHEADER and BITMAPCOREINFO structures in the
MSDN library.
"""
# Open file and binmode the data in case the platform needs it.
with open(bitmap, "rb") as fh:
# Slurp the file into a string.
data = fh.read()
return _process_bitmap_data(data)
def _process_bitmap_data(data):
# Check that the file is big enough to be a bitmap.
if len(data) <= 0x36:
raise Exception("bitmap doesn't contain enough data.")
# The first 2 bytes are used to identify the bitmap.
if (data[:2] != b"BM"):
raise Exception("bitmap doesn't appear to to be a valid bitmap image.")
# Remove bitmap data: ID.
data = data[2:]
# Read and remove the bitmap size. This is more reliable than reading
# the data size at offset 0x22.
#
size = unpack("<L", data[:4])[0]
size -= 0x36 # Subtract size of bitmap header.
size += 0x0C # Add size of BIFF header.
data = data[4:]
# Remove bitmap data: reserved, offset, header length.
data = data[12:]
# Read and remove the bitmap width and height. Verify the sizes.
width, height = unpack("<LL", data[:8])
data = data[8:]
if (width > 0xFFFF):
raise Exception("bitmap: largest image width supported is 65k.")
if (height > 0xFFFF):
raise Exception("bitmap: largest image height supported is 65k.")
# Read and remove the bitmap planes and bpp data. Verify them.
planes, bitcount = unpack("<HH", data[:4])
data = data[4:]
if (bitcount != 24):
raise Exception("bitmap isn't a 24bit true color bitmap.")
if (planes != 1):
raise Exception("bitmap: only 1 plane supported in bitmap image.")
# Read and remove the bitmap compression. Verify compression.
compression = unpack("<L", data[:4])[0]
data = data[4:]
if (compression != 0):
raise Exception("bitmap: compression not supported in bitmap image.")
# Remove bitmap data: data size, hres, vres, colours, imp. colours.
data = data[20:]
# Add the BITMAPCOREHEADER data
header = pack("<LHHHH", 0x000c, width, height, 0x01, 0x18)
data = header + data
return (width, height, size, data)
| ObjBmpRecord |
python | doocs__leetcode | solution/0000-0099/0054.Spiral Matrix/Solution2.py | {
"start": 0,
"end": 615
} | class ____:
def spiralOrder(self, matrix: List[List[int]]) -> List[int]:
m, n = len(matrix), len(matrix[0])
dirs = (0, 1, 0, -1, 0)
i = j = k = 0
ans = []
for _ in range(m * n):
ans.append(matrix[i][j])
matrix[i][j] += 300
x, y = i + dirs[k], j + dirs[k + 1]
if x < 0 or x >= m or y < 0 or y >= n or matrix[x][y] > 100:
k = (k + 1) % 4
i += dirs[k]
j += dirs[k + 1]
for i in range(m):
for j in range(n):
matrix[i][j] -= 300
return ans
| Solution |
python | scipy__scipy | benchmarks/benchmarks/common.py | {
"start": 8666,
"end": 13529
} | class ____(Benchmark):
"""
Limits parameter combinations to `max_number` choices, chosen
pseudo-randomly with fixed seed.
Raises NotImplementedError (skip) if not in active set.
"""
num_param_combinations = 0
def setup(self, *args, **kwargs):
slow = is_xslow()
if slow:
# no need to skip
return
param_seed = kwargs.pop('param_seed', None)
if param_seed is None:
param_seed = 1
params = kwargs.pop('params', None)
if params is None:
params = self.params
num_param_combinations = kwargs.pop('num_param_combinations', None)
if num_param_combinations is None:
num_param_combinations = self.num_param_combinations
all_choices = list(itertools.product(*params))
rng = random.Random(param_seed)
rng.shuffle(all_choices)
active_choices = all_choices[:num_param_combinations]
if args not in active_choices:
raise NotImplementedError("skipped")
def get_max_rss_bytes(rusage):
"""
Extract the max RSS value in bytes.
"""
if not rusage:
return None
if sys.platform.startswith('linux'):
# On Linux getrusage() returns ru_maxrss in kilobytes
# https://man7.org/linux/man-pages/man2/getrusage.2.html
return rusage.ru_maxrss * 1024
elif sys.platform == "darwin":
# on macOS ru_maxrss is in bytes
return rusage.ru_maxrss
else:
# Unknown, just return whatever is here.
return rusage.ru_maxrss
def run_monitored_wait4(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : int
Peak memory usage in bytes of the child Python process
Notes
-----
Works on Unix platforms (Linux, macOS) that have `os.wait4()`.
"""
code = textwrap.dedent(code)
start = time.time()
process = subprocess.Popen([sys.executable, '-c', code])
pid, returncode, rusage = os.wait4(process.pid, 0)
duration = time.time() - start
max_rss_bytes = get_max_rss_bytes(rusage)
if returncode != 0:
raise AssertionError(f"Running failed:\n{code}")
return duration, max_rss_bytes
def run_monitored_proc(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : float
Peak memory usage (rough estimate only) in bytes
"""
if not sys.platform.startswith('linux'):
raise RuntimeError("Peak memory monitoring only works on Linux")
code = textwrap.dedent(code)
process = subprocess.Popen([sys.executable, '-c', code])
peak_memusage = -1
start = time.time()
while True:
ret = process.poll()
if ret is not None:
break
with open(f'/proc/{process.pid}/status') as f:
procdata = f.read()
m = re.search(r'VmRSS:\s*(\d+)\s*kB', procdata, re.S | re.I)
if m is not None:
memusage = float(m.group(1)) * 1e3
peak_memusage = max(memusage, peak_memusage)
time.sleep(0.01)
process.wait()
duration = time.time() - start
if process.returncode != 0:
raise AssertionError(f"Running failed:\n{code}")
return duration, peak_memusage
def run_monitored(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : float or int
Peak memory usage (rough estimate only) in bytes
"""
if hasattr(os, 'wait4'):
return run_monitored_wait4(code)
else:
return run_monitored_proc(code)
def get_mem_info():
"""Get information about available memory"""
import psutil
vm = psutil.virtual_memory()
return {
"memtotal": vm.total,
"memavailable": vm.available,
}
def set_mem_rlimit(max_mem=None):
"""
Set address space rlimit
"""
import resource
if max_mem is None:
mem_info = get_mem_info()
max_mem = int(mem_info['memtotal'] * 0.7)
cur_limit = resource.getrlimit(resource.RLIMIT_AS)
if cur_limit[0] > 0:
max_mem = min(max_mem, cur_limit[0])
try:
resource.setrlimit(resource.RLIMIT_AS, (max_mem, cur_limit[1]))
except ValueError:
# on macOS may raise: current limit exceeds maximum limit
pass
def with_attributes(**attrs):
def decorator(func):
for key, value in attrs.items():
setattr(func, key, value)
return func
return decorator
| LimitedParamBenchmark |
python | django__django | django/db/models/functions/window.py | {
"start": 1418,
"end": 1514
} | class ____(Func):
arity = 1
function = "LAST_VALUE"
window_compatible = True
| LastValue |
python | pandas-dev__pandas | asv_bench/benchmarks/groupby.py | {
"start": 1722,
"end": 3107
} | class ____:
param_names = ["factor"]
params = [4, 5]
def setup(self, factor):
N = 10**factor
# two cases:
# - small groups: small data (N**4) + many labels (2000) -> average group
# size of 5 (-> larger overhead of slicing method)
# - larger groups: larger data (N**5) + fewer labels (20) -> average group
# size of 5000
labels = np.random.randint(0, 2000 if factor == 4 else 20, size=N)
labels2 = np.random.randint(0, 3, size=N)
df = DataFrame(
{
"key": labels,
"key2": labels2,
"value1": np.random.randn(N),
"value2": ["foo", "bar", "baz", "qux"] * (N // 4),
}
)
self.df = df
def time_scalar_function_multi_col(self, factor):
self.df.groupby(["key", "key2"]).apply(lambda x: 1)
def time_scalar_function_single_col(self, factor):
self.df.groupby("key").apply(lambda x: 1)
@staticmethod
def df_copy_function(g):
# ensure that the group name is available (see GH #15062)
g.name
return g.copy()
def time_copy_function_multi_col(self, factor):
self.df.groupby(["key", "key2"]).apply(self.df_copy_function)
def time_copy_overhead_single_col(self, factor):
self.df.groupby("key").apply(self.df_copy_function)
| Apply |
python | walkccc__LeetCode | solutions/2949. Count Beautiful Substrings II/2949.py | {
"start": 0,
"end": 721
} | class ____:
# Same as 2947. Count Beautiful Substrings I
def beautifulSubstrings(self, s: str, k: int) -> int:
VOWELS = 'aeiou'
root = self._getRoot(k)
ans = 0
vowels = 0
vowelsMinusConsonants = 0
# {(vowels, vowelsMinusConsonants): count}
prefixCount = collections.Counter({(0, 0): 1})
for c in s:
if c in VOWELS:
vowelsMinusConsonants += 1
vowels = (vowels + 1) % root
else:
vowelsMinusConsonants -= 1
ans += prefixCount[(vowels, vowelsMinusConsonants)]
prefixCount[(vowels, vowelsMinusConsonants)] += 1
return ans
def _getRoot(self, k: int) -> int:
for i in range(1, k + 1):
if i * i % k == 0:
return i
| Solution |
python | scrapy__scrapy | tests/mockserver/http_resources.py | {
"start": 8706,
"end": 9182
} | class ____(resource.Resource):
"""Return the full uri that was requested"""
def getChild(self, path, request):
return self
def render(self, request):
# Note: this is an ugly hack for CONNECT request timeout test.
# Returning some data here fail SSL/TLS handshake
# ToDo: implement proper HTTPS proxy tests, not faking them.
if request.method != b"CONNECT":
return request.uri
return b""
| UriResource |
python | pypa__setuptools | setuptools/_distutils/tests/test_clean.py | {
"start": 132,
"end": 1240
} | class ____(support.TempdirManager):
def test_simple_run(self):
pkg_dir, dist = self.create_dist()
cmd = clean(dist)
# let's add some elements clean should remove
dirs = [
(d, os.path.join(pkg_dir, d))
for d in (
'build_temp',
'build_lib',
'bdist_base',
'build_scripts',
'build_base',
)
]
for name, path in dirs:
os.mkdir(path)
setattr(cmd, name, path)
if name == 'build_base':
continue
for f in ('one', 'two', 'three'):
self.write_file(os.path.join(path, f))
# let's run the command
cmd.all = 1
cmd.ensure_finalized()
cmd.run()
# make sure the files where removed
for _name, path in dirs:
assert not os.path.exists(path), f'{path} was not removed'
# let's run the command again (should spit warnings but succeed)
cmd.all = 1
cmd.ensure_finalized()
cmd.run()
| TestClean |
python | scrapy__scrapy | tests/test_http_request.py | {
"start": 16007,
"end": 57192
} | class ____(TestRequest):
request_class = FormRequest
def assertQueryEqual(self, first, second, msg=None):
first = to_unicode(first).split("&")
second = to_unicode(second).split("&")
assert sorted(first) == sorted(second), msg
def test_empty_formdata(self):
r1 = self.request_class("http://www.example.com", formdata={})
assert r1.body == b""
def test_formdata_overrides_querystring(self):
data = (("a", "one"), ("a", "two"), ("b", "2"))
url = self.request_class(
"http://www.example.com/?a=0&b=1&c=3#fragment", method="GET", formdata=data
).url.split("#", maxsplit=1)[0]
fs = _qs(self.request_class(url, method="GET", formdata=data))
assert set(fs[b"a"]) == {b"one", b"two"}
assert fs[b"b"] == [b"2"]
assert fs.get(b"c") is None
data = {"a": "1", "b": "2"}
fs = _qs(
self.request_class("http://www.example.com/", method="GET", formdata=data)
)
assert fs[b"a"] == [b"1"]
assert fs[b"b"] == [b"2"]
def test_default_encoding_bytes(self):
# using default encoding (utf-8)
data = {b"one": b"two", b"price": b"\xc2\xa3 100"}
r2 = self.request_class("http://www.example.com", formdata=data)
assert r2.method == "POST"
assert r2.encoding == "utf-8"
self.assertQueryEqual(r2.body, b"price=%C2%A3+100&one=two")
assert r2.headers[b"Content-Type"] == b"application/x-www-form-urlencoded"
def test_default_encoding_textual_data(self):
# using default encoding (utf-8)
data = {"µ one": "two", "price": "£ 100"}
r2 = self.request_class("http://www.example.com", formdata=data)
assert r2.method == "POST"
assert r2.encoding == "utf-8"
self.assertQueryEqual(r2.body, b"price=%C2%A3+100&%C2%B5+one=two")
assert r2.headers[b"Content-Type"] == b"application/x-www-form-urlencoded"
def test_default_encoding_mixed_data(self):
# using default encoding (utf-8)
data = {"\u00b5one": b"two", b"price\xc2\xa3": "\u00a3 100"}
r2 = self.request_class("http://www.example.com", formdata=data)
assert r2.method == "POST"
assert r2.encoding == "utf-8"
self.assertQueryEqual(r2.body, b"%C2%B5one=two&price%C2%A3=%C2%A3+100")
assert r2.headers[b"Content-Type"] == b"application/x-www-form-urlencoded"
def test_custom_encoding_bytes(self):
data = {b"\xb5 one": b"two", b"price": b"\xa3 100"}
r2 = self.request_class(
"http://www.example.com", formdata=data, encoding="latin1"
)
assert r2.method == "POST"
assert r2.encoding == "latin1"
self.assertQueryEqual(r2.body, b"price=%A3+100&%B5+one=two")
assert r2.headers[b"Content-Type"] == b"application/x-www-form-urlencoded"
def test_custom_encoding_textual_data(self):
data = {"price": "£ 100"}
r3 = self.request_class(
"http://www.example.com", formdata=data, encoding="latin1"
)
assert r3.encoding == "latin1"
assert r3.body == b"price=%A3+100"
def test_multi_key_values(self):
# using multiples values for a single key
data = {"price": "\xa3 100", "colours": ["red", "blue", "green"]}
r3 = self.request_class("http://www.example.com", formdata=data)
self.assertQueryEqual(
r3.body, b"colours=red&colours=blue&colours=green&price=%C2%A3+100"
)
def test_from_response_post(self):
response = _buildresponse(
b"""<form action="post.php" method="POST">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>""",
url="http://www.example.com/this/list.html",
)
req = self.request_class.from_response(
response, formdata={"one": ["two", "three"], "six": "seven"}
)
assert req.method == "POST"
assert req.headers[b"Content-type"] == b"application/x-www-form-urlencoded"
assert req.url == "http://www.example.com/this/post.php"
fs = _qs(req)
assert set(fs[b"test"]) == {b"val1", b"val2"}
assert set(fs[b"one"]) == {b"two", b"three"}
assert fs[b"test2"] == [b"xxx"]
assert fs[b"six"] == [b"seven"]
def test_from_response_post_nonascii_bytes_utf8(self):
response = _buildresponse(
b"""<form action="post.php" method="POST">
<input type="hidden" name="test \xc2\xa3" value="val1">
<input type="hidden" name="test \xc2\xa3" value="val2">
<input type="hidden" name="test2" value="xxx \xc2\xb5">
</form>""",
url="http://www.example.com/this/list.html",
)
req = self.request_class.from_response(
response, formdata={"one": ["two", "three"], "six": "seven"}
)
assert req.method == "POST"
assert req.headers[b"Content-type"] == b"application/x-www-form-urlencoded"
assert req.url == "http://www.example.com/this/post.php"
fs = _qs(req, to_unicode=True)
assert set(fs["test £"]) == {"val1", "val2"}
assert set(fs["one"]) == {"two", "three"}
assert fs["test2"] == ["xxx µ"]
assert fs["six"] == ["seven"]
def test_from_response_post_nonascii_bytes_latin1(self):
response = _buildresponse(
b"""<form action="post.php" method="POST">
<input type="hidden" name="test \xa3" value="val1">
<input type="hidden" name="test \xa3" value="val2">
<input type="hidden" name="test2" value="xxx \xb5">
</form>""",
url="http://www.example.com/this/list.html",
encoding="latin1",
)
req = self.request_class.from_response(
response, formdata={"one": ["two", "three"], "six": "seven"}
)
assert req.method == "POST"
assert req.headers[b"Content-type"] == b"application/x-www-form-urlencoded"
assert req.url == "http://www.example.com/this/post.php"
fs = _qs(req, to_unicode=True, encoding="latin1")
assert set(fs["test £"]) == {"val1", "val2"}
assert set(fs["one"]) == {"two", "three"}
assert fs["test2"] == ["xxx µ"]
assert fs["six"] == ["seven"]
def test_from_response_post_nonascii_unicode(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="test £" value="val1">
<input type="hidden" name="test £" value="val2">
<input type="hidden" name="test2" value="xxx µ">
</form>""",
url="http://www.example.com/this/list.html",
)
req = self.request_class.from_response(
response, formdata={"one": ["two", "three"], "six": "seven"}
)
assert req.method == "POST"
assert req.headers[b"Content-type"] == b"application/x-www-form-urlencoded"
assert req.url == "http://www.example.com/this/post.php"
fs = _qs(req, to_unicode=True)
assert set(fs["test £"]) == {"val1", "val2"}
assert set(fs["one"]) == {"two", "three"}
assert fs["test2"] == ["xxx µ"]
assert fs["six"] == ["seven"]
def test_from_response_duplicate_form_key(self):
response = _buildresponse("<form></form>", url="http://www.example.com")
req = self.request_class.from_response(
response=response,
method="GET",
formdata=(("foo", "bar"), ("foo", "baz")),
)
assert urlparse_cached(req).hostname == "www.example.com"
assert urlparse_cached(req).query == "foo=bar&foo=baz"
def test_from_response_override_duplicate_form_key(self):
response = _buildresponse(
"""<form action="get.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
</form>"""
)
req = self.request_class.from_response(
response, formdata=(("two", "2"), ("two", "4"))
)
fs = _qs(req)
assert fs[b"one"] == [b"1"]
assert fs[b"two"] == [b"2", b"4"]
def test_from_response_extra_headers(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>"""
)
req = self.request_class.from_response(
response=response,
formdata={"one": ["two", "three"], "six": "seven"},
headers={"Accept-Encoding": "gzip,deflate"},
)
assert req.method == "POST"
assert req.headers["Content-type"] == b"application/x-www-form-urlencoded"
assert req.headers["Accept-Encoding"] == b"gzip,deflate"
def test_from_response_get(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>""",
url="http://www.example.com/this/list.html",
)
r1 = self.request_class.from_response(
response, formdata={"one": ["two", "three"], "six": "seven"}
)
assert r1.method == "GET"
assert urlparse_cached(r1).hostname == "www.example.com"
assert urlparse_cached(r1).path == "/this/get.php"
fs = _qs(r1)
assert set(fs[b"test"]) == {b"val1", b"val2"}
assert set(fs[b"one"]) == {b"two", b"three"}
assert fs[b"test2"] == [b"xxx"]
assert fs[b"six"] == [b"seven"]
def test_from_response_override_params(self):
response = _buildresponse(
"""<form action="get.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
</form>"""
)
req = self.request_class.from_response(response, formdata={"two": "2"})
fs = _qs(req)
assert fs[b"one"] == [b"1"]
assert fs[b"two"] == [b"2"]
def test_from_response_drop_params(self):
response = _buildresponse(
"""<form action="get.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
</form>"""
)
req = self.request_class.from_response(response, formdata={"two": None})
fs = _qs(req)
assert fs[b"one"] == [b"1"]
assert b"two" not in fs
def test_from_response_override_method(self):
response = _buildresponse(
"""<html><body>
<form action="/app"></form>
</body></html>"""
)
request = FormRequest.from_response(response)
assert request.method == "GET"
request = FormRequest.from_response(response, method="POST")
assert request.method == "POST"
def test_from_response_override_url(self):
response = _buildresponse(
"""<html><body>
<form action="/app"></form>
</body></html>"""
)
request = FormRequest.from_response(response)
assert request.url == "http://example.com/app"
request = FormRequest.from_response(response, url="http://foo.bar/absolute")
assert request.url == "http://foo.bar/absolute"
request = FormRequest.from_response(response, url="/relative")
assert request.url == "http://example.com/relative"
def test_from_response_case_insensitive(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="SuBmIt" name="clickable1" value="clicked1">
<input type="iMaGe" name="i1" src="http://my.image.org/1.jpg">
<input type="submit" name="clickable2" value="clicked2">
</form>"""
)
req = self.request_class.from_response(response)
fs = _qs(req)
assert fs[b"clickable1"] == [b"clicked1"]
assert b"i1" not in fs, fs # xpath in _get_inputs()
assert b"clickable2" not in fs, fs # xpath in _get_clickable()
def test_from_response_submit_first_clickable(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>"""
)
req = self.request_class.from_response(response, formdata={"two": "2"})
fs = _qs(req)
assert fs[b"clickable1"] == [b"clicked1"]
assert b"clickable2" not in fs, fs
assert fs[b"one"] == [b"1"]
assert fs[b"two"] == [b"2"]
def test_from_response_submit_not_first_clickable(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>"""
)
req = self.request_class.from_response(
response, formdata={"two": "2"}, clickdata={"name": "clickable2"}
)
fs = _qs(req)
assert fs[b"clickable2"] == [b"clicked2"]
assert b"clickable1" not in fs, fs
assert fs[b"one"] == [b"1"]
assert fs[b"two"] == [b"2"]
def test_from_response_dont_submit_image_as_input(self):
response = _buildresponse(
"""<form>
<input type="hidden" name="i1" value="i1v">
<input type="image" name="i2" src="http://my.image.org/1.jpg">
<input type="submit" name="i3" value="i3v">
</form>"""
)
req = self.request_class.from_response(response, dont_click=True)
fs = _qs(req)
assert fs == {b"i1": [b"i1v"]}
def test_from_response_dont_submit_reset_as_input(self):
response = _buildresponse(
"""<form>
<input type="hidden" name="i1" value="i1v">
<input type="text" name="i2" value="i2v">
<input type="reset" name="resetme">
<input type="submit" name="i3" value="i3v">
</form>"""
)
req = self.request_class.from_response(response, dont_click=True)
fs = _qs(req)
assert fs == {b"i1": [b"i1v"], b"i2": [b"i2v"]}
def test_from_response_clickdata_does_not_ignore_image(self):
response = _buildresponse(
"""<form>
<input type="text" name="i1" value="i1v">
<input id="image" name="i2" type="image" value="i2v" alt="Login" src="http://my.image.org/1.jpg">
</form>"""
)
req = self.request_class.from_response(response)
fs = _qs(req)
assert fs == {b"i1": [b"i1v"], b"i2": [b"i2v"]}
def test_from_response_multiple_clickdata(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable" value="clicked1">
<input type="submit" name="clickable" value="clicked2">
<input type="hidden" name="one" value="clicked1">
<input type="hidden" name="two" value="clicked2">
</form>"""
)
req = self.request_class.from_response(
response, clickdata={"name": "clickable", "value": "clicked2"}
)
fs = _qs(req)
assert fs[b"clickable"] == [b"clicked2"]
assert fs[b"one"] == [b"clicked1"]
assert fs[b"two"] == [b"clicked2"]
def test_from_response_unicode_clickdata(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="price in \u00a3" value="\u00a3 1000">
<input type="submit" name="price in \u20ac" value="\u20ac 2000">
<input type="hidden" name="poundsign" value="\u00a3">
<input type="hidden" name="eurosign" value="\u20ac">
</form>"""
)
req = self.request_class.from_response(
response, clickdata={"name": "price in \u00a3"}
)
fs = _qs(req, to_unicode=True)
assert fs["price in \u00a3"]
def test_from_response_unicode_clickdata_latin1(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="price in \u00a3" value="\u00a3 1000">
<input type="submit" name="price in \u00a5" value="\u00a5 2000">
<input type="hidden" name="poundsign" value="\u00a3">
<input type="hidden" name="yensign" value="\u00a5">
</form>""",
encoding="latin1",
)
req = self.request_class.from_response(
response, clickdata={"name": "price in \u00a5"}
)
fs = _qs(req, to_unicode=True, encoding="latin1")
assert fs["price in \u00a5"]
def test_from_response_multiple_forms_clickdata(self):
response = _buildresponse(
"""<form name="form1">
<input type="submit" name="clickable" value="clicked1">
<input type="hidden" name="field1" value="value1">
</form>
<form name="form2">
<input type="submit" name="clickable" value="clicked2">
<input type="hidden" name="field2" value="value2">
</form>
"""
)
req = self.request_class.from_response(
response, formname="form2", clickdata={"name": "clickable"}
)
fs = _qs(req)
assert fs[b"clickable"] == [b"clicked2"]
assert fs[b"field2"] == [b"value2"]
assert b"field1" not in fs, fs
def test_from_response_override_clickable(self):
response = _buildresponse(
"""<form><input type="submit" name="clickme" value="one"> </form>"""
)
req = self.request_class.from_response(
response, formdata={"clickme": "two"}, clickdata={"name": "clickme"}
)
fs = _qs(req)
assert fs[b"clickme"] == [b"two"]
def test_from_response_dont_click(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>"""
)
r1 = self.request_class.from_response(response, dont_click=True)
fs = _qs(r1)
assert b"clickable1" not in fs, fs
assert b"clickable2" not in fs, fs
def test_from_response_ambiguous_clickdata(self):
response = _buildresponse(
"""
<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>"""
)
with pytest.raises(
ValueError,
match=r"Multiple elements found .* matching the criteria in clickdata",
):
self.request_class.from_response(response, clickdata={"type": "submit"})
def test_from_response_non_matching_clickdata(self):
response = _buildresponse(
"""<form>
<input type="submit" name="clickable" value="clicked">
</form>"""
)
with pytest.raises(
ValueError, match="No clickable element matching clickdata:"
):
self.request_class.from_response(
response, clickdata={"nonexistent": "notme"}
)
def test_from_response_nr_index_clickdata(self):
response = _buildresponse(
"""<form>
<input type="submit" name="clickable1" value="clicked1">
<input type="submit" name="clickable2" value="clicked2">
</form>
"""
)
req = self.request_class.from_response(response, clickdata={"nr": 1})
fs = _qs(req)
assert b"clickable2" in fs
assert b"clickable1" not in fs
def test_from_response_invalid_nr_index_clickdata(self):
response = _buildresponse(
"""<form>
<input type="submit" name="clickable" value="clicked">
</form>
"""
)
with pytest.raises(
ValueError, match="No clickable element matching clickdata:"
):
self.request_class.from_response(response, clickdata={"nr": 1})
def test_from_response_errors_noform(self):
response = _buildresponse("""<html></html>""")
with pytest.raises(ValueError, match="No <form> element found in"):
self.request_class.from_response(response)
def test_from_response_invalid_html5(self):
response = _buildresponse(
"""<!DOCTYPE html><body></html><form>"""
"""<input type="text" name="foo" value="xxx">"""
"""</form></body></html>"""
)
req = self.request_class.from_response(response, formdata={"bar": "buz"})
fs = _qs(req)
assert fs == {b"foo": [b"xxx"], b"bar": [b"buz"]}
def test_from_response_errors_formnumber(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>"""
)
with pytest.raises(IndexError):
self.request_class.from_response(response, formnumber=1)
def test_from_response_noformname(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="2">
</form>"""
)
r1 = self.request_class.from_response(response, formdata={"two": "3"})
assert r1.method == "POST"
assert r1.headers["Content-type"] == b"application/x-www-form-urlencoded"
fs = _qs(r1)
assert fs == {b"one": [b"1"], b"two": [b"3"]}
def test_from_response_formname_exists(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="2">
</form>
<form name="form2" action="post.php" method="POST">
<input type="hidden" name="three" value="3">
<input type="hidden" name="four" value="4">
</form>"""
)
r1 = self.request_class.from_response(response, formname="form2")
assert r1.method == "POST"
fs = _qs(r1)
assert fs == {b"four": [b"4"], b"three": [b"3"]}
def test_from_response_formname_nonexistent(self):
response = _buildresponse(
"""<form name="form1" action="post.php" method="POST">
<input type="hidden" name="one" value="1">
</form>
<form name="form2" action="post.php" method="POST">
<input type="hidden" name="two" value="2">
</form>"""
)
r1 = self.request_class.from_response(response, formname="form3")
assert r1.method == "POST"
fs = _qs(r1)
assert fs == {b"one": [b"1"]}
def test_from_response_formname_errors_formnumber(self):
response = _buildresponse(
"""<form name="form1" action="post.php" method="POST">
<input type="hidden" name="one" value="1">
</form>
<form name="form2" action="post.php" method="POST">
<input type="hidden" name="two" value="2">
</form>"""
)
with pytest.raises(IndexError):
self.request_class.from_response(response, formname="form3", formnumber=2)
def test_from_response_formid_exists(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="2">
</form>
<form id="form2" action="post.php" method="POST">
<input type="hidden" name="three" value="3">
<input type="hidden" name="four" value="4">
</form>"""
)
r1 = self.request_class.from_response(response, formid="form2")
assert r1.method == "POST"
fs = _qs(r1)
assert fs == {b"four": [b"4"], b"three": [b"3"]}
def test_from_response_formname_nonexistent_fallback_formid(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="2">
</form>
<form id="form2" name="form2" action="post.php" method="POST">
<input type="hidden" name="three" value="3">
<input type="hidden" name="four" value="4">
</form>"""
)
r1 = self.request_class.from_response(
response, formname="form3", formid="form2"
)
assert r1.method == "POST"
fs = _qs(r1)
assert fs == {b"four": [b"4"], b"three": [b"3"]}
def test_from_response_formid_nonexistent(self):
response = _buildresponse(
"""<form id="form1" action="post.php" method="POST">
<input type="hidden" name="one" value="1">
</form>
<form id="form2" action="post.php" method="POST">
<input type="hidden" name="two" value="2">
</form>"""
)
r1 = self.request_class.from_response(response, formid="form3")
assert r1.method == "POST"
fs = _qs(r1)
assert fs == {b"one": [b"1"]}
def test_from_response_formid_errors_formnumber(self):
response = _buildresponse(
"""<form id="form1" action="post.php" method="POST">
<input type="hidden" name="one" value="1">
</form>
<form id="form2" name="form2" action="post.php" method="POST">
<input type="hidden" name="two" value="2">
</form>"""
)
with pytest.raises(IndexError):
self.request_class.from_response(response, formid="form3", formnumber=2)
def test_from_response_select(self):
res = _buildresponse(
"""<form>
<select name="i1">
<option value="i1v1">option 1</option>
<option value="i1v2" selected>option 2</option>
</select>
<select name="i2">
<option value="i2v1">option 1</option>
<option value="i2v2">option 2</option>
</select>
<select>
<option value="i3v1">option 1</option>
<option value="i3v2">option 2</option>
</select>
<select name="i4" multiple>
<option value="i4v1">option 1</option>
<option value="i4v2" selected>option 2</option>
<option value="i4v3" selected>option 3</option>
</select>
<select name="i5" multiple>
<option value="i5v1">option 1</option>
<option value="i5v2">option 2</option>
</select>
<select name="i6"></select>
<select name="i7"/>
</form>"""
)
req = self.request_class.from_response(res)
fs = _qs(req, to_unicode=True)
assert fs == {"i1": ["i1v2"], "i2": ["i2v1"], "i4": ["i4v2", "i4v3"]}
def test_from_response_radio(self):
res = _buildresponse(
"""<form>
<input type="radio" name="i1" value="i1v1">
<input type="radio" name="i1" value="iv2" checked>
<input type="radio" name="i2" checked>
<input type="radio" name="i2">
<input type="radio" name="i3" value="i3v1">
<input type="radio" name="i3">
<input type="radio" value="i4v1">
<input type="radio">
</form>"""
)
req = self.request_class.from_response(res)
fs = _qs(req)
assert fs == {b"i1": [b"iv2"], b"i2": [b"on"]}
def test_from_response_checkbox(self):
res = _buildresponse(
"""<form>
<input type="checkbox" name="i1" value="i1v1">
<input type="checkbox" name="i1" value="iv2" checked>
<input type="checkbox" name="i2" checked>
<input type="checkbox" name="i2">
<input type="checkbox" name="i3" value="i3v1">
<input type="checkbox" name="i3">
<input type="checkbox" value="i4v1">
<input type="checkbox">
</form>"""
)
req = self.request_class.from_response(res)
fs = _qs(req)
assert fs == {b"i1": [b"iv2"], b"i2": [b"on"]}
def test_from_response_input_text(self):
res = _buildresponse(
"""<form>
<input type="text" name="i1" value="i1v1">
<input type="text" name="i2">
<input type="text" value="i3v1">
<input type="text">
<input name="i4" value="i4v1">
</form>"""
)
req = self.request_class.from_response(res)
fs = _qs(req)
assert fs == {b"i1": [b"i1v1"], b"i2": [b""], b"i4": [b"i4v1"]}
def test_from_response_input_hidden(self):
res = _buildresponse(
"""<form>
<input type="hidden" name="i1" value="i1v1">
<input type="hidden" name="i2">
<input type="hidden" value="i3v1">
<input type="hidden">
</form>"""
)
req = self.request_class.from_response(res)
fs = _qs(req)
assert fs == {b"i1": [b"i1v1"], b"i2": [b""]}
def test_from_response_input_textarea(self):
res = _buildresponse(
"""<form>
<textarea name="i1">i1v</textarea>
<textarea name="i2"></textarea>
<textarea name="i3"/>
<textarea>i4v</textarea>
</form>"""
)
req = self.request_class.from_response(res)
fs = _qs(req)
assert fs == {b"i1": [b"i1v"], b"i2": [b""], b"i3": [b""]}
def test_from_response_descendants(self):
res = _buildresponse(
"""<form>
<div>
<fieldset>
<input type="text" name="i1">
<select name="i2">
<option value="v1" selected>
</select>
</fieldset>
<input type="radio" name="i3" value="i3v2" checked>
<input type="checkbox" name="i4" value="i4v2" checked>
<textarea name="i5"></textarea>
<input type="hidden" name="h1" value="h1v">
</div>
<input type="hidden" name="h2" value="h2v">
</form>"""
)
req = self.request_class.from_response(res)
fs = _qs(req)
assert set(fs) == {b"h2", b"i2", b"i1", b"i3", b"h1", b"i5", b"i4"}
def test_from_response_xpath(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="2">
</form>
<form action="post2.php" method="POST">
<input type="hidden" name="three" value="3">
<input type="hidden" name="four" value="4">
</form>"""
)
r1 = self.request_class.from_response(
response, formxpath="//form[@action='post.php']"
)
fs = _qs(r1)
assert fs[b"one"] == [b"1"]
r1 = self.request_class.from_response(
response, formxpath="//form/input[@name='four']"
)
fs = _qs(r1)
assert fs[b"three"] == [b"3"]
with pytest.raises(ValueError, match="No <form> element found with"):
self.request_class.from_response(
response, formxpath="//form/input[@name='abc']"
)
def test_from_response_unicode_xpath(self):
response = _buildresponse(b'<form name="\xd1\x8a"></form>')
r = self.request_class.from_response(
response, formxpath="//form[@name='\u044a']"
)
fs = _qs(r)
assert not fs
xpath = "//form[@name='\u03b1']"
with pytest.raises(ValueError, match=re.escape(xpath)):
self.request_class.from_response(response, formxpath=xpath)
def test_from_response_button_submit(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="test1" value="val1">
<input type="hidden" name="test2" value="val2">
<button type="submit" name="button1" value="submit1">Submit</button>
</form>""",
url="http://www.example.com/this/list.html",
)
req = self.request_class.from_response(response)
assert req.method == "POST"
assert req.headers["Content-type"] == b"application/x-www-form-urlencoded"
assert req.url == "http://www.example.com/this/post.php"
fs = _qs(req)
assert fs[b"test1"] == [b"val1"]
assert fs[b"test2"] == [b"val2"]
assert fs[b"button1"] == [b"submit1"]
def test_from_response_button_notype(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="test1" value="val1">
<input type="hidden" name="test2" value="val2">
<button name="button1" value="submit1">Submit</button>
</form>""",
url="http://www.example.com/this/list.html",
)
req = self.request_class.from_response(response)
assert req.method == "POST"
assert req.headers["Content-type"] == b"application/x-www-form-urlencoded"
assert req.url == "http://www.example.com/this/post.php"
fs = _qs(req)
assert fs[b"test1"] == [b"val1"]
assert fs[b"test2"] == [b"val2"]
assert fs[b"button1"] == [b"submit1"]
def test_from_response_submit_novalue(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="test1" value="val1">
<input type="hidden" name="test2" value="val2">
<input type="submit" name="button1">Submit</button>
</form>""",
url="http://www.example.com/this/list.html",
)
req = self.request_class.from_response(response)
assert req.method == "POST"
assert req.headers["Content-type"] == b"application/x-www-form-urlencoded"
assert req.url == "http://www.example.com/this/post.php"
fs = _qs(req)
assert fs[b"test1"] == [b"val1"]
assert fs[b"test2"] == [b"val2"]
assert fs[b"button1"] == [b""]
def test_from_response_button_novalue(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="test1" value="val1">
<input type="hidden" name="test2" value="val2">
<button type="submit" name="button1">Submit</button>
</form>""",
url="http://www.example.com/this/list.html",
)
req = self.request_class.from_response(response)
assert req.method == "POST"
assert req.headers["Content-type"] == b"application/x-www-form-urlencoded"
assert req.url == "http://www.example.com/this/post.php"
fs = _qs(req)
assert fs[b"test1"] == [b"val1"]
assert fs[b"test2"] == [b"val2"]
assert fs[b"button1"] == [b""]
def test_html_base_form_action(self):
response = _buildresponse(
"""
<html>
<head>
<base href=" http://b.com/">
</head>
<body>
<form action="test_form">
</form>
</body>
</html>
""",
url="http://a.com/",
)
req = self.request_class.from_response(response)
assert req.url == "http://b.com/test_form"
def test_spaces_in_action(self):
resp = _buildresponse('<body><form action=" path\n"></form></body>')
req = self.request_class.from_response(resp)
assert req.url == "http://example.com/path"
def test_from_response_css(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="2">
</form>
<form action="post2.php" method="POST">
<input type="hidden" name="three" value="3">
<input type="hidden" name="four" value="4">
</form>"""
)
r1 = self.request_class.from_response(
response, formcss="form[action='post.php']"
)
fs = _qs(r1)
assert fs[b"one"] == [b"1"]
r1 = self.request_class.from_response(response, formcss="input[name='four']")
fs = _qs(r1)
assert fs[b"three"] == [b"3"]
with pytest.raises(ValueError, match="No <form> element found with"):
self.request_class.from_response(response, formcss="input[name='abc']")
def test_from_response_valid_form_methods(self):
form_methods = [
[method, method] for method in self.request_class.valid_form_methods
]
form_methods.append(["UNKNOWN", "GET"])
for method, expected in form_methods:
response = _buildresponse(
f'<form action="post.php" method="{method}">'
'<input type="hidden" name="one" value="1">'
"</form>"
)
r = self.request_class.from_response(response)
assert r.method == expected
def test_form_response_with_invalid_formdata_type_error(self):
"""Test that a ValueError is raised for non-iterable and non-dict formdata input"""
response = _buildresponse(
"""<html><body>
<form action="/submit" method="post">
<input type="text" name="test" value="value">
</form>
</body></html>"""
)
with pytest.raises(
ValueError, match="formdata should be a dict or iterable of tuples"
):
FormRequest.from_response(response, formdata=123)
def test_form_response_with_custom_invalid_formdata_value_error(self):
"""Test that a ValueError is raised for fault-inducing iterable formdata input"""
response = _buildresponse(
"""<html><body>
<form action="/submit" method="post">
<input type="text" name="test" value="value">
</form>
</body></html>"""
)
with pytest.raises(
ValueError, match="formdata should be a dict or iterable of tuples"
):
FormRequest.from_response(response, formdata=("a",))
def test_get_form_with_xpath_no_form_parent(self):
"""Test that _get_from raised a ValueError when an XPath selects an element
not nested within a <form> and no <form> parent is found"""
response = _buildresponse(
"""<html><body>
<div id="outside-form">
<p>This paragraph is not inside a form.</p>
</div>
<form action="/submit" method="post">
<input type="text" name="inside-form" value="">
</form>
</body></html>"""
)
with pytest.raises(ValueError, match="No <form> element found with"):
FormRequest.from_response(response, formxpath='//div[@id="outside-form"]/p')
def _buildresponse(body, **kwargs):
kwargs.setdefault("body", body)
kwargs.setdefault("url", "http://example.com")
kwargs.setdefault("encoding", "utf-8")
return HtmlResponse(**kwargs)
def _qs(req, encoding="utf-8", to_unicode=False):
qs = req.body if req.method == "POST" else req.url.partition("?")[2]
uqs = unquote_to_bytes(qs)
if to_unicode:
uqs = uqs.decode(encoding)
return parse_qs(uqs, True)
| TestFormRequest |
python | falconry__falcon | tests/test_redirects.py | {
"start": 1087,
"end": 1849
} | class ____:
# NOTE(kgriffs): You wouldn't necessarily use these types of
# http methods with these types of redirects; this is only
# done to simplify testing.
def on_get(self, req, resp):
raise falcon.HTTPMovedPermanently('/moved/perm', headers={'foo': 'bar'})
def on_post(self, req, resp):
raise falcon.HTTPFound('/found', headers={'foo': 'bar'})
def on_put(self, req, resp):
raise falcon.HTTPSeeOther('/see/other', headers={'foo': 'bar'})
def on_delete(self, req, resp):
raise falcon.HTTPTemporaryRedirect('/tmp/redirect', headers={'foo': 'bar'})
def on_head(self, req, resp):
raise falcon.HTTPPermanentRedirect('/perm/redirect', headers={'foo': 'bar'})
| RedirectingResourceWithHeaders |
python | huggingface__transformers | src/transformers/models/bridgetower/modeling_bridgetower.py | {
"start": 14047,
"end": 15481
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.link_tower_type = config.link_tower_type
self.hidden_size = config.hidden_size
if config.link_tower_type in ["add", "scaled_add", "interpolate"]:
if config.link_tower_type == "scaled_add":
self.scaled_factor = nn.Parameter(torch.tensor(1.0))
elif config.link_tower_type == "interpolate":
self.beta = nn.Parameter(torch.tensor(0.5))
self.LayerNorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps)
else:
raise NotImplementedError(f"link_tower_type {config.link_tower_type} is not implemented")
def forward(self, hidden_states, cross_modal_hidden_states, attention_mask):
if self.link_tower_type == "add":
return self.LayerNorm(hidden_states + cross_modal_hidden_states)
elif self.link_tower_type == "scaled_add":
return self.LayerNorm(hidden_states * self.scaled_factor + cross_modal_hidden_states)
elif self.link_tower_type == "interpolate":
return self.LayerNorm(hidden_states * (1 - self.beta) + cross_modal_hidden_states * self.beta)
else:
raise NotImplementedError(f"link_tower_type {self.link_tower_type} is not implemented")
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BridgeTower
| BridgeTowerLinkTower |
python | allegroai__clearml | clearml/backend_api/services/v2_23/frames.py | {
"start": 299931,
"end": 310904
} | class ____(Request):
"""
Gets frames for a given view specification
:param dataview: Dataview ID
:type dataview: str
:param scroll_id: Scroll session id for getting the next batch of images
:type scroll_id: str
:param batch_size: Max number of images to be returned. Used only if scroll_id
is not provided.
:type batch_size: int
:param reset_scroll:
:type reset_scroll: bool
:param force_scroll_id:
:type force_scroll_id: bool
:param flow_control: Frames retreival that allows eiter one-directional
navigation (the default) or bidirectional
:type flow_control: FlowControl
:param random_seed: Optional random seed used for frame selection. If not
provided, one will be generated.
:type random_seed: int
:param node: Node number. This provides support for multi-node experiments
running multiple workers executing the same experiment on multiple processes or
machines
:type node: int
:param projection: Used to select which parts of the frame will be returned.
Each string represents a field or sub-field (using dot-separated notation). In
order to specify a specific array element, use array index as a field name. To
specify all array elements, use '*'.
:type projection: Sequence[str]
:param remove_none_values: If set to Truethen none values are removed from
frames (except for metadata)
:type remove_none_values: bool
:param clean_subfields: If set to Truethen both frame toplevel fields and
subfields are cleaned according to the schema. Otherwise only top level fields
:type clean_subfields: bool
"""
_service = "frames"
_action = "get_next_for_dataview_id"
_version = "2.23"
_schema = {
"definitions": {
"flow_control": {
"properties": {
"bidirectional": {
"description": (
"If set then frames retreival can go either forward or backwards. Otherwise only forward.\n"
" The default is False. The limitations of bidirectional navigation:\n "
" - Frames are always returned in sequential order\n - The"
" iteration is finite (no support for infinite iteration)\n "
),
"type": ["boolean", "null"],
},
"navigate_backwards": {
"description": (
"When bidirectional is True, settings this to True navigates backwards duing frames"
" retreival. Default is False"
),
"type": ["boolean", "null"],
},
},
"type": "object",
}
},
"properties": {
"batch_size": {
"default": 500,
"description": "Max number of images to be returned. Used only if scroll_id is not provided.",
"type": "integer",
},
"clean_subfields": {
"default": False,
"description": (
"If set to Truethen both frame toplevel fields and subfields are cleaned according to the schema."
" Otherwise only top level fields"
),
"type": "boolean",
},
"dataview": {"description": "Dataview ID", "type": "string"},
"flow_control": {
"$ref": "#/definitions/flow_control",
"description": (
"Frames retreival that allows eiter one-directional navigation (the default) or bidirectional"
),
},
"force_scroll_id": {"description": "", "type": "boolean"},
"node": {
"description": (
"Node number. This provides support for multi-node experiments running multiple workers executing"
" the same experiment on multiple processes or machines"
),
"type": "integer",
},
"projection": {
"description": (
"Used to select which parts of the frame will be returned. Each string represents a\n "
" field or sub-field (using dot-separated notation). In order to specify a specific array"
" element,\n use array index as a field name. To specify all array elements, use"
" '*'."
),
"items": {"type": "string"},
"type": "array",
},
"random_seed": {
"description": "Optional random seed used for frame selection. If not provided, one will be generated.",
"type": "integer",
},
"remove_none_values": {
"default": False,
"description": "If set to Truethen none values are removed from frames (except for metadata)",
"type": "boolean",
},
"reset_scroll": {"description": "", "type": "boolean"},
"scroll_id": {
"description": "Scroll session id for getting the next batch of images",
"type": "string",
},
},
"required": ["dataview"],
"type": "object",
}
def __init__(
self,
dataview,
scroll_id=None,
batch_size=500,
reset_scroll=None,
force_scroll_id=None,
flow_control=None,
random_seed=None,
node=None,
projection=None,
remove_none_values=False,
clean_subfields=False,
**kwargs
):
super(GetNextForDataviewIdRequest, self).__init__(**kwargs)
self.dataview = dataview
self.scroll_id = scroll_id
self.batch_size = batch_size
self.reset_scroll = reset_scroll
self.force_scroll_id = force_scroll_id
self.flow_control = flow_control
self.random_seed = random_seed
self.node = node
self.projection = projection
self.remove_none_values = remove_none_values
self.clean_subfields = clean_subfields
@schema_property("dataview")
def dataview(self):
return self._property_dataview
@dataview.setter
def dataview(self, value):
if value is None:
self._property_dataview = None
return
self.assert_isinstance(value, "dataview", six.string_types)
self._property_dataview = value
@schema_property("scroll_id")
def scroll_id(self):
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value):
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("batch_size")
def batch_size(self):
return self._property_batch_size
@batch_size.setter
def batch_size(self, value):
if value is None:
self._property_batch_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "batch_size", six.integer_types)
self._property_batch_size = value
@schema_property("reset_scroll")
def reset_scroll(self):
return self._property_reset_scroll
@reset_scroll.setter
def reset_scroll(self, value):
if value is None:
self._property_reset_scroll = None
return
self.assert_isinstance(value, "reset_scroll", (bool,))
self._property_reset_scroll = value
@schema_property("force_scroll_id")
def force_scroll_id(self):
return self._property_force_scroll_id
@force_scroll_id.setter
def force_scroll_id(self, value):
if value is None:
self._property_force_scroll_id = None
return
self.assert_isinstance(value, "force_scroll_id", (bool,))
self._property_force_scroll_id = value
@schema_property("flow_control")
def flow_control(self):
return self._property_flow_control
@flow_control.setter
def flow_control(self, value):
if value is None:
self._property_flow_control = None
return
if isinstance(value, dict):
value = FlowControl.from_dict(value)
else:
self.assert_isinstance(value, "flow_control", FlowControl)
self._property_flow_control = value
@schema_property("random_seed")
def random_seed(self):
return self._property_random_seed
@random_seed.setter
def random_seed(self, value):
if value is None:
self._property_random_seed = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "random_seed", six.integer_types)
self._property_random_seed = value
@schema_property("node")
def node(self):
return self._property_node
@node.setter
def node(self, value):
if value is None:
self._property_node = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "node", six.integer_types)
self._property_node = value
@schema_property("projection")
def projection(self):
return self._property_projection
@projection.setter
def projection(self, value):
if value is None:
self._property_projection = None
return
self.assert_isinstance(value, "projection", (list, tuple))
self.assert_isinstance(value, "projection", six.string_types, is_array=True)
self._property_projection = value
@schema_property("remove_none_values")
def remove_none_values(self):
return self._property_remove_none_values
@remove_none_values.setter
def remove_none_values(self, value):
if value is None:
self._property_remove_none_values = None
return
self.assert_isinstance(value, "remove_none_values", (bool,))
self._property_remove_none_values = value
@schema_property("clean_subfields")
def clean_subfields(self):
return self._property_clean_subfields
@clean_subfields.setter
def clean_subfields(self, value):
if value is None:
self._property_clean_subfields = None
return
self.assert_isinstance(value, "clean_subfields", (bool,))
self._property_clean_subfields = value
| GetNextForDataviewIdRequest |
python | ray-project__ray | rllib/examples/envs/classes/parametric_actions_cartpole.py | {
"start": 3328,
"end": 5484
} | class ____(gym.Env):
"""Same as the above ParametricActionsCartPole.
However, action embeddings are not published inside observations,
but will be learnt by the model.
At each step, we emit a dict of:
- the actual cart observation
- a mask of valid actions (e.g., [0, 0, 1, 0, 0, 1] for 6 max avail)
- action embeddings (w/ "dummy embedding" for invalid actions) are
outsourced in the model and will be learned.
"""
def __init__(self, max_avail_actions):
# Randomly set which two actions are valid and available.
self.left_idx, self.right_idx = random.sample(range(max_avail_actions), 2)
self.valid_avail_actions_mask = np.array(
[0.0] * max_avail_actions, dtype=np.int8
)
self.valid_avail_actions_mask[self.left_idx] = 1
self.valid_avail_actions_mask[self.right_idx] = 1
self.action_space = Discrete(max_avail_actions)
self.wrapped = gym.make("CartPole-v1")
self.observation_space = Dict(
{
"valid_avail_actions_mask": Box(0, 1, shape=(max_avail_actions,)),
"cart": self.wrapped.observation_space,
}
)
def reset(self, *, seed=None, options=None):
obs, infos = self.wrapped.reset()
return {
"valid_avail_actions_mask": self.valid_avail_actions_mask,
"cart": obs,
}, infos
def step(self, action):
if action == self.left_idx:
actual_action = 0
elif action == self.right_idx:
actual_action = 1
else:
raise ValueError(
"Chosen action was not one of the non-zero action embeddings",
action,
self.valid_avail_actions_mask,
self.left_idx,
self.right_idx,
)
orig_obs, rew, done, truncated, info = self.wrapped.step(actual_action)
obs = {
"valid_avail_actions_mask": self.valid_avail_actions_mask,
"cart": orig_obs,
}
return obs, rew, done, truncated, info
| ParametricActionsCartPoleNoEmbeddings |
python | langchain-ai__langchain | libs/langchain/langchain_classic/memory/buffer.py | {
"start": 3293,
"end": 6053
} | class ____(BaseMemory):
"""A basic memory implementation that simply stores the conversation history.
This stores the entire conversation history in memory without any
additional processing.
Equivalent to ConversationBufferMemory but tailored more specifically
for string-based conversations rather than chat models.
Note that additional processing may be required in some situations when the
conversation history is too large to fit in the context window of the model.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
"""Prefix to use for AI generated responses."""
buffer: str = ""
output_key: str | None = None
input_key: str | None = None
memory_key: str = "history"
@pre_init
def validate_chains(cls, values: dict) -> dict:
"""Validate that return messages is not True."""
if values.get("return_messages", False):
msg = "return_messages must be False for ConversationStringBufferMemory"
raise ValueError(msg)
return values
@property
def memory_variables(self) -> list[str]:
"""Will always return list of memory variables."""
return [self.memory_key]
@override
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
"""Return history buffer."""
return {self.memory_key: self.buffer}
async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
"""Return history buffer."""
return self.load_memory_variables(inputs)
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
if self.input_key is None:
prompt_input_key = get_prompt_input_key(inputs, self.memory_variables)
else:
prompt_input_key = self.input_key
if self.output_key is None:
if len(outputs) != 1:
msg = f"One output key expected, got {outputs.keys()}"
raise ValueError(msg)
output_key = next(iter(outputs.keys()))
else:
output_key = self.output_key
human = f"{self.human_prefix}: " + inputs[prompt_input_key]
ai = f"{self.ai_prefix}: " + outputs[output_key]
self.buffer += f"\n{human}\n{ai}"
async def asave_context(
self,
inputs: dict[str, Any],
outputs: dict[str, str],
) -> None:
"""Save context from this conversation to buffer."""
return self.save_context(inputs, outputs)
def clear(self) -> None:
"""Clear memory contents."""
self.buffer = ""
@override
async def aclear(self) -> None:
self.clear()
| ConversationStringBufferMemory |
python | pandas-dev__pandas | pandas/tests/plotting/test_converter.py | {
"start": 10415,
"end": 12688
} | class ____:
"""Test timedelta converter"""
@pytest.mark.parametrize(
"x, decimal, format_expected",
[
(0.0, 0, "00:00:00"),
(3972320000000, 1, "01:06:12.3"),
(713233432000000, 2, "8 days 06:07:13.43"),
(32423432000000, 4, "09:00:23.4320"),
],
)
def test_format_timedelta_ticks(self, x, decimal, format_expected):
tdc = converter.TimeSeries_TimedeltaFormatter
result = tdc.format_timedelta_ticks(x, pos=None, n_decimals=decimal)
assert result == format_expected
@pytest.mark.parametrize("view_interval", [(1, 2), (2, 1)])
def test_call_w_different_view_intervals(self, view_interval, monkeypatch):
# previously broke on reversed xlmits; see GH37454
class mock_axis:
def get_view_interval(self):
return view_interval
tdc = converter.TimeSeries_TimedeltaFormatter()
monkeypatch.setattr(tdc, "axis", mock_axis())
tdc(0.0, 0)
@pytest.mark.parametrize("year_span", [11.25, 30, 80, 150, 400, 800, 1500, 2500, 3500])
# The range is limited to 11.25 at the bottom by if statements in
# the _quarterly_finder() function
def test_quarterly_finder(year_span):
vmin = -1000
vmax = vmin + year_span * 4
span = vmax - vmin + 1
if span < 45:
pytest.skip("the quarterly finder is only invoked if the span is >= 45")
nyears = span / 4
(min_anndef, maj_anndef) = converter._get_default_annual_spacing(nyears)
result = converter._quarterly_finder(vmin, vmax, to_offset("QE"))
quarters = PeriodIndex(
arrays.PeriodArray(np.array([x[0] for x in result]), dtype="period[Q]")
)
majors = np.array([x[1] for x in result])
minors = np.array([x[2] for x in result])
major_quarters = quarters[majors]
minor_quarters = quarters[minors]
check_major_years = major_quarters.year % maj_anndef == 0
check_minor_years = minor_quarters.year % min_anndef == 0
check_major_quarters = major_quarters.quarter == 1
check_minor_quarters = minor_quarters.quarter == 1
assert np.all(check_major_years)
assert np.all(check_minor_years)
assert np.all(check_major_quarters)
assert np.all(check_minor_quarters)
| TestTimeDeltaConverter |
python | tornadoweb__tornado | tornado/curl_httpclient.py | {
"start": 1294,
"end": 25001
} | class ____(AsyncHTTPClient):
def initialize( # type: ignore
self, max_clients: int = 10, defaults: Optional[Dict[str, Any]] = None
) -> None:
super().initialize(defaults=defaults)
# Typeshed is incomplete for CurlMulti, so just use Any for now.
self._multi = pycurl.CurlMulti() # type: Any
self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
self._curls = [self._curl_create() for i in range(max_clients)]
self._free_list = self._curls[:]
self._requests = (
collections.deque()
) # type: Deque[Tuple[HTTPRequest, Callable[[HTTPResponse], None], float]]
self._fds = {} # type: Dict[int, int]
self._timeout = None # type: Optional[object]
# libcurl has bugs that sometimes cause it to not report all
# relevant file descriptors and timeouts to TIMERFUNCTION/
# SOCKETFUNCTION. Mitigate the effects of such bugs by
# forcing a periodic scan of all active requests.
self._force_timeout_callback = ioloop.PeriodicCallback(
self._handle_force_timeout, 1000
)
self._force_timeout_callback.start()
# Work around a bug in libcurl 7.29.0: Some fields in the curl
# multi object are initialized lazily, and its destructor will
# segfault if it is destroyed without having been used. Add
# and remove a dummy handle to make sure everything is
# initialized.
dummy_curl_handle = pycurl.Curl()
self._multi.add_handle(dummy_curl_handle)
self._multi.remove_handle(dummy_curl_handle)
def close(self) -> None:
self._force_timeout_callback.stop()
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
for curl in self._curls:
curl.close()
self._multi.close()
super().close()
# Set below properties to None to reduce the reference count of current
# instance, because those properties hold some methods of current
# instance that will case circular reference.
self._force_timeout_callback = None # type: ignore
self._multi = None
def fetch_impl(
self, request: HTTPRequest, callback: Callable[[HTTPResponse], None]
) -> None:
self._requests.append((request, callback, self.io_loop.time()))
self._process_queue()
self._set_timeout(0)
def _handle_socket(self, event: int, fd: int, multi: Any, data: bytes) -> None:
"""Called by libcurl when it wants to change the file descriptors
it cares about.
"""
event_map = {
pycurl.POLL_NONE: ioloop.IOLoop.NONE,
pycurl.POLL_IN: ioloop.IOLoop.READ,
pycurl.POLL_OUT: ioloop.IOLoop.WRITE,
pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE,
}
if event == pycurl.POLL_REMOVE:
if fd in self._fds:
self.io_loop.remove_handler(fd)
del self._fds[fd]
else:
ioloop_event = event_map[event]
# libcurl sometimes closes a socket and then opens a new
# one using the same FD without giving us a POLL_NONE in
# between. This is a problem with the epoll IOLoop,
# because the kernel can tell when a socket is closed and
# removes it from the epoll automatically, causing future
# update_handler calls to fail. Since we can't tell when
# this has happened, always use remove and re-add
# instead of update.
if fd in self._fds:
self.io_loop.remove_handler(fd)
self.io_loop.add_handler(fd, self._handle_events, ioloop_event)
self._fds[fd] = ioloop_event
def _set_timeout(self, msecs: int) -> None:
"""Called by libcurl to schedule a timeout."""
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = self.io_loop.add_timeout(
self.io_loop.time() + msecs / 1000.0, self._handle_timeout
)
def _handle_events(self, fd: int, events: int) -> None:
"""Called by IOLoop when there is activity on one of our
file descriptors.
"""
action = 0
if events & ioloop.IOLoop.READ:
action |= pycurl.CSELECT_IN
if events & ioloop.IOLoop.WRITE:
action |= pycurl.CSELECT_OUT
while True:
try:
ret, num_handles = self._multi.socket_action(fd, action)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _handle_timeout(self) -> None:
"""Called by IOLoop when the requested timeout has passed."""
self._timeout = None
while True:
try:
ret, num_handles = self._multi.socket_action(pycurl.SOCKET_TIMEOUT, 0)
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
# In theory, we shouldn't have to do this because curl will
# call _set_timeout whenever the timeout changes. However,
# sometimes after _handle_timeout we will need to reschedule
# immediately even though nothing has changed from curl's
# perspective. This is because when socket_action is
# called with SOCKET_TIMEOUT, libcurl decides internally which
# timeouts need to be processed by using a monotonic clock
# (where available) while tornado uses python's time.time()
# to decide when timeouts have occurred. When those clocks
# disagree on elapsed time (as they will whenever there is an
# NTP adjustment), tornado might call _handle_timeout before
# libcurl is ready. After each timeout, resync the scheduled
# timeout with libcurl's current state.
new_timeout = self._multi.timeout()
if new_timeout >= 0:
self._set_timeout(new_timeout)
def _handle_force_timeout(self) -> None:
"""Called by IOLoop periodically to ask libcurl to process any
events it may have forgotten about.
"""
while True:
try:
ret, num_handles = self._multi.socket_all()
except pycurl.error as e:
ret = e.args[0]
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
self._finish_pending_requests()
def _finish_pending_requests(self) -> None:
"""Process any requests that were completed by the last
call to multi.socket_action.
"""
while True:
num_q, ok_list, err_list = self._multi.info_read()
for curl in ok_list:
self._finish(curl)
for curl, errnum, errmsg in err_list:
self._finish(curl, errnum, errmsg)
if num_q == 0:
break
self._process_queue()
def _process_queue(self) -> None:
while True:
started = 0
while self._free_list and self._requests:
started += 1
curl = self._free_list.pop()
(request, callback, queue_start_time) = self._requests.popleft()
# TODO: Don't smuggle extra data on an attribute of the Curl object.
curl.info = { # type: ignore
"headers": httputil.HTTPHeaders(),
"buffer": BytesIO(),
"request": request,
"callback": callback,
"queue_start_time": queue_start_time,
"curl_start_time": time.time(),
"curl_start_ioloop_time": self.io_loop.current().time(), # type: ignore
}
try:
self._curl_setup_request(
curl,
request,
curl.info["buffer"], # type: ignore
curl.info["headers"], # type: ignore
)
except Exception as e:
# If there was an error in setup, pass it on
# to the callback. Note that allowing the
# error to escape here will appear to work
# most of the time since we are still in the
# caller's original stack frame, but when
# _process_queue() is called from
# _finish_pending_requests the exceptions have
# nowhere to go.
self._free_list.append(curl)
callback(HTTPResponse(request=request, code=599, error=e))
else:
self._multi.add_handle(curl)
if not started:
break
def _finish(
self,
curl: pycurl.Curl,
curl_error: Optional[int] = None,
curl_message: Optional[str] = None,
) -> None:
info = curl.info # type: ignore
curl.info = None # type: ignore
self._multi.remove_handle(curl)
self._free_list.append(curl)
buffer = info["buffer"]
if curl_error:
assert curl_message is not None
error = CurlError(curl_error, curl_message) # type: Optional[CurlError]
assert error is not None
code = error.code
effective_url = None
buffer.close()
buffer = None
else:
error = None
code = curl.getinfo(pycurl.HTTP_CODE)
effective_url = curl.getinfo(pycurl.EFFECTIVE_URL)
buffer.seek(0)
# the various curl timings are documented at
# http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html
time_info = dict(
queue=info["curl_start_ioloop_time"] - info["queue_start_time"],
namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME),
connect=curl.getinfo(pycurl.CONNECT_TIME),
appconnect=curl.getinfo(pycurl.APPCONNECT_TIME),
pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME),
starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME),
total=curl.getinfo(pycurl.TOTAL_TIME),
redirect=curl.getinfo(pycurl.REDIRECT_TIME),
)
try:
info["callback"](
HTTPResponse(
request=info["request"],
code=code,
headers=info["headers"],
buffer=buffer,
effective_url=effective_url,
error=error,
reason=info["headers"].get("X-Http-Reason", None),
request_time=self.io_loop.time() - info["curl_start_ioloop_time"],
start_time=info["curl_start_time"],
time_info=time_info,
)
)
except Exception:
self.handle_callback_exception(info["callback"])
def handle_callback_exception(self, callback: Any) -> None:
app_log.error("Exception in callback %r", callback, exc_info=True)
def _curl_create(self) -> pycurl.Curl:
curl = pycurl.Curl()
if curl_log.isEnabledFor(logging.DEBUG):
curl.setopt(pycurl.VERBOSE, 1)
curl.setopt(pycurl.DEBUGFUNCTION, self._curl_debug)
if hasattr(
pycurl, "PROTOCOLS"
): # PROTOCOLS first appeared in pycurl 7.19.5 (2014-07-12)
curl.setopt(pycurl.PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS)
curl.setopt(pycurl.REDIR_PROTOCOLS, pycurl.PROTO_HTTP | pycurl.PROTO_HTTPS)
return curl
def _curl_setup_request(
self,
curl: pycurl.Curl,
request: HTTPRequest,
buffer: BytesIO,
headers: httputil.HTTPHeaders,
) -> None:
curl.setopt(pycurl.URL, native_str(request.url))
# libcurl's magic "Expect: 100-continue" behavior causes delays
# with servers that don't support it (which include, among others,
# Google's OpenID endpoint). Additionally, this behavior has
# a bug in conjunction with the curl_multi_socket_action API
# (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976),
# which increases the delays. It's more trouble than it's worth,
# so just turn off the feature (yes, setting Expect: to an empty
# value is the official way to disable this)
if "Expect" not in request.headers:
request.headers["Expect"] = ""
# libcurl adds Pragma: no-cache by default; disable that too
if "Pragma" not in request.headers:
request.headers["Pragma"] = ""
encoded_headers = [
b"%s: %s"
% (native_str(k).encode("ASCII"), native_str(v).encode("ISO8859-1"))
for k, v in request.headers.get_all()
]
for line in encoded_headers:
if CR_OR_LF_RE.search(line):
raise ValueError("Illegal characters in header (CR or LF): %r" % line)
curl.setopt(pycurl.HTTPHEADER, encoded_headers)
curl.setopt(
pycurl.HEADERFUNCTION,
functools.partial(
self._curl_header_callback, headers, request.header_callback
),
)
if request.streaming_callback:
if gen.is_coroutine_function(
request.streaming_callback
) or inspect.iscoroutinefunction(request.streaming_callback):
raise TypeError(
"'CurlAsyncHTTPClient' does not support async streaming_callbacks."
)
def write_function(b: Union[bytes, bytearray]) -> int:
assert request.streaming_callback is not None
self.io_loop.add_callback(request.streaming_callback, b)
return len(b)
else:
write_function = buffer.write # type: ignore
curl.setopt(pycurl.WRITEFUNCTION, write_function)
curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects)
curl.setopt(pycurl.MAXREDIRS, request.max_redirects)
assert request.connect_timeout is not None
curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout))
assert request.request_timeout is not None
curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout))
if request.user_agent:
curl.setopt(pycurl.USERAGENT, native_str(request.user_agent))
else:
curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)")
if request.network_interface:
curl.setopt(pycurl.INTERFACE, request.network_interface)
if request.decompress_response:
curl.setopt(pycurl.ENCODING, "gzip,deflate")
else:
curl.setopt(pycurl.ENCODING, None)
if request.proxy_host and request.proxy_port:
curl.setopt(pycurl.PROXY, request.proxy_host)
curl.setopt(pycurl.PROXYPORT, request.proxy_port)
if request.proxy_username:
assert request.proxy_password is not None
credentials = httputil.encode_username_password(
request.proxy_username, request.proxy_password
)
curl.setopt(pycurl.PROXYUSERPWD, credentials)
if request.proxy_auth_mode is None or request.proxy_auth_mode == "basic":
curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_BASIC)
elif request.proxy_auth_mode == "digest":
curl.setopt(pycurl.PROXYAUTH, pycurl.HTTPAUTH_DIGEST)
else:
raise ValueError(
"Unsupported proxy_auth_mode %s" % request.proxy_auth_mode
)
else:
try:
curl.unsetopt(pycurl.PROXY)
except TypeError: # not supported, disable proxy
curl.setopt(pycurl.PROXY, "")
curl.unsetopt(pycurl.PROXYUSERPWD)
if request.validate_cert:
curl.setopt(pycurl.SSL_VERIFYPEER, 1)
curl.setopt(pycurl.SSL_VERIFYHOST, 2)
else:
curl.setopt(pycurl.SSL_VERIFYPEER, 0)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
if request.ca_certs is not None:
curl.setopt(pycurl.CAINFO, request.ca_certs)
else:
# There is no way to restore pycurl.CAINFO to its default value
# (Using unsetopt makes it reject all certificates).
# I don't see any way to read the default value from python so it
# can be restored later. We'll have to just leave CAINFO untouched
# if no ca_certs file was specified, and require that if any
# request uses a custom ca_certs file, they all must.
pass
if request.allow_ipv6 is False:
# Curl behaves reasonably when DNS resolution gives an ipv6 address
# that we can't reach, so allow ipv6 unless the user asks to disable.
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4)
else:
curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER)
# Set the request method through curl's irritating interface which makes
# up names for almost every single method
curl_options = {
"GET": pycurl.HTTPGET,
"POST": pycurl.POST,
"PUT": pycurl.UPLOAD,
"HEAD": pycurl.NOBODY,
}
custom_methods = {"DELETE", "OPTIONS", "PATCH"}
for o in curl_options.values():
curl.setopt(o, False)
if request.method in curl_options:
curl.unsetopt(pycurl.CUSTOMREQUEST)
curl.setopt(curl_options[request.method], True)
elif request.allow_nonstandard_methods or request.method in custom_methods:
curl.setopt(pycurl.CUSTOMREQUEST, request.method)
else:
raise KeyError("unknown method " + request.method)
body_expected = request.method in ("POST", "PATCH", "PUT")
body_present = request.body is not None
if not request.allow_nonstandard_methods:
# Some HTTP methods nearly always have bodies while others
# almost never do. Fail in this case unless the user has
# opted out of sanity checks with allow_nonstandard_methods.
if (body_expected and not body_present) or (
body_present and not body_expected
):
raise ValueError(
"Body must %sbe None for method %s (unless "
"allow_nonstandard_methods is true)"
% ("not " if body_expected else "", request.method)
)
if body_expected or body_present:
if request.method == "GET":
# Even with `allow_nonstandard_methods` we disallow
# GET with a body (because libcurl doesn't allow it
# unless we use CUSTOMREQUEST). While the spec doesn't
# forbid clients from sending a body, it arguably
# disallows the server from doing anything with them.
raise ValueError("Body must be None for GET request")
request_buffer = BytesIO(utf8(request.body or ""))
def ioctl(cmd: int) -> None:
if cmd == curl.IOCMD_RESTARTREAD: # type: ignore
request_buffer.seek(0)
curl.setopt(pycurl.READFUNCTION, request_buffer.read)
curl.setopt(pycurl.IOCTLFUNCTION, ioctl)
if request.method == "POST":
curl.setopt(pycurl.POSTFIELDSIZE, len(request.body or ""))
else:
curl.setopt(pycurl.UPLOAD, True)
curl.setopt(pycurl.INFILESIZE, len(request.body or ""))
if request.auth_username is not None:
assert request.auth_password is not None
if request.auth_mode is None or request.auth_mode == "basic":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
elif request.auth_mode == "digest":
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST)
else:
raise ValueError("Unsupported auth_mode %s" % request.auth_mode)
userpwd = httputil.encode_username_password(
request.auth_username, request.auth_password
)
curl.setopt(pycurl.USERPWD, userpwd)
curl_log.debug(
"%s %s (username: %r)",
request.method,
request.url,
request.auth_username,
)
else:
curl.unsetopt(pycurl.USERPWD)
curl_log.debug("%s %s", request.method, request.url)
if request.client_cert is not None:
curl.setopt(pycurl.SSLCERT, request.client_cert)
if request.client_key is not None:
curl.setopt(pycurl.SSLKEY, request.client_key)
if request.ssl_options is not None:
raise ValueError("ssl_options not supported in curl_httpclient")
if threading.active_count() > 1:
# libcurl/pycurl is not thread-safe by default. When multiple threads
# are used, signals should be disabled. This has the side effect
# of disabling DNS timeouts in some environments (when libcurl is
# not linked against ares), so we don't do it when there is only one
# thread. Applications that use many short-lived threads may need
# to set NOSIGNAL manually in a prepare_curl_callback since
# there may not be any other threads running at the time we call
# threading.activeCount.
curl.setopt(pycurl.NOSIGNAL, 1)
if request.prepare_curl_callback is not None:
request.prepare_curl_callback(curl)
def _curl_header_callback(
self,
headers: httputil.HTTPHeaders,
header_callback: Optional[Callable[[str], None]],
header_line_bytes: bytes,
) -> None:
header_line = native_str(header_line_bytes.decode("latin1"))
if header_callback is not None:
self.io_loop.add_callback(header_callback, header_line)
# header_line as returned by curl includes the end-of-line characters.
# whitespace at the start should be preserved to allow multi-line headers
header_line = header_line.rstrip()
if header_line.startswith("HTTP/"):
headers.clear()
try:
(_version, _code, reason) = httputil.parse_response_start_line(
header_line
)
header_line = "X-Http-Reason: %s" % reason
except httputil.HTTPInputError:
return
if not header_line:
return
headers.parse_line(header_line)
def _curl_debug(self, debug_type: int, debug_msg: str) -> None:
debug_types = ("I", "<", ">", "<", ">")
if debug_type == 0:
debug_msg = native_str(debug_msg)
curl_log.debug("%s", debug_msg.strip())
elif debug_type in (1, 2):
debug_msg = native_str(debug_msg)
for line in debug_msg.splitlines():
curl_log.debug("%s %s", debug_types[debug_type], line)
elif debug_type == 4:
curl_log.debug("%s %r", debug_types[debug_type], debug_msg)
| CurlAsyncHTTPClient |
python | langchain-ai__langchain | libs/core/langchain_core/structured_query.py | {
"start": 3094,
"end": 3795
} | class ____(FilterDirective):
"""Comparison to a value."""
comparator: Comparator
"""The comparator to use."""
attribute: str
"""The attribute to compare."""
value: Any
"""The value to compare to."""
def __init__(
self, comparator: Comparator, attribute: str, value: Any, **kwargs: Any
) -> None:
"""Create a Comparison.
Args:
comparator: The comparator to use.
attribute: The attribute to compare.
value: The value to compare to.
"""
# super exists from BaseModel
super().__init__(
comparator=comparator, attribute=attribute, value=value, **kwargs
)
| Comparison |
python | pypa__pipenv | pipenv/patched/pip/_internal/index/collector.py | {
"start": 8817,
"end": 12821
} | class ____(HTMLParser):
"""
HTMLParser that keeps the first base HREF and a list of all anchor
elements' attributes.
"""
def __init__(self, url: str) -> None:
super().__init__(convert_charrefs=True)
self.url: str = url
self.base_url: Optional[str] = None
self.anchors: List[Dict[str, Optional[str]]] = []
def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
if tag == "base" and self.base_url is None:
href = self.get_href(attrs)
if href is not None:
self.base_url = href
elif tag == "a":
self.anchors.append(dict(attrs))
def get_href(self, attrs: List[Tuple[str, Optional[str]]]) -> Optional[str]:
for name, value in attrs:
if name == "href":
return value
return None
def _handle_get_simple_fail(
link: Link,
reason: Union[str, Exception],
meth: Optional[Callable[..., None]] = None,
) -> None:
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
def _make_index_content(
response: Response, cache_link_parsing: bool = True
) -> IndexContent:
encoding = _get_encoding_from_headers(response.headers)
return IndexContent(
response.content,
response.headers["Content-Type"],
encoding=encoding,
url=response.url,
cache_link_parsing=cache_link_parsing,
)
def _get_index_content(link: Link, *, session: PipSession) -> Optional["IndexContent"]:
url = link.url.split("#", 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
vcs_scheme = _match_vcs_scheme(url)
if vcs_scheme:
logger.warning(
"Cannot look at %s URL %s because it does not support lookup as web pages.",
vcs_scheme,
link,
)
return None
# Tack index.html onto file:// URLs that point to directories
scheme, _, path, _, _, _ = urllib.parse.urlparse(url)
if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith("/"):
url += "/"
# TODO: In the future, it would be nice if pip supported PEP 691
# style responses in the file:// URLs, however there's no
# standard file extension for application/vnd.pypi.simple.v1+json
# so we'll need to come up with something on our own.
url = urllib.parse.urljoin(url, "index.html")
logger.debug(" file: URL is directory, getting %s", url)
try:
resp = _get_simple_response(url, session=session)
except _NotHTTP:
logger.warning(
"Skipping page %s because it looks like an archive, and cannot "
"be checked by a HTTP HEAD request.",
link,
)
except _NotAPIContent as exc:
logger.warning(
"Skipping page %s because the %s request got Content-Type: %s. "
"The only supported Content-Types are application/vnd.pypi.simple.v1+json, "
"application/vnd.pypi.simple.v1+html, and text/html",
link,
exc.request_desc,
exc.content_type,
)
except NetworkConnectionError as exc:
_handle_get_simple_fail(link, exc)
except RetryError as exc:
_handle_get_simple_fail(link, exc)
except SSLError as exc:
reason = "There was a problem confirming the ssl certificate: "
reason += str(exc)
_handle_get_simple_fail(link, reason, meth=logger.info)
except requests.ConnectionError as exc:
_handle_get_simple_fail(link, f"connection error: {exc}")
except requests.Timeout:
_handle_get_simple_fail(link, "timed out")
else:
return _make_index_content(resp, cache_link_parsing=link.cache_link_parsing)
return None
| HTMLLinkParser |
python | huggingface__transformers | src/transformers/models/siglip2/modeling_siglip2.py | {
"start": 3512,
"end": 5359
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`Siglip2TextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The image embeddings obtained by applying the projection layer to the pooled output of [`Siglip2VisionModel`].
text_model_output (`BaseModelOutputWithPooling`):
The output of the [`Siglip2TextModel`].
vision_model_output (`BaseModelOutputWithPooling`):
The output of the [`Siglip2VisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits_per_image: Optional[torch.FloatTensor] = None
logits_per_text: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPooling = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
| Siglip2Output |
python | jina-ai__jina | jina/proto/serializer.py | {
"start": 4735,
"end": 5278
} | class ____:
"""Placeholder that delegates the serialization and deserialization to the internal protobuf"""
@staticmethod
def SerializeToString(x):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
return x.SerializeToString()
@staticmethod
def FromString(x: bytes):
"""
# noqa: DAR101
# noqa: DAR102
# noqa: DAR201
"""
sp = jina_pb2.SnapshotStatusProto()
sp.ParseFromString(x)
return sp
| SnapshotStatusProto |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/core.py | {
"start": 5252,
"end": 8936
} | class ____(Layer):
"""Applies Dropout to the input.
The Dropout layer randomly sets input units to 0 with a frequency of `rate`
at each step during training time, which helps prevent overfitting.
Inputs not set to 0 are scaled up by 1/(1 - rate) such that the sum over
all inputs is unchanged.
Note that the Dropout layer only applies when `training` is set to True
such that no values are dropped during inference. When using `model.fit`,
`training` will be appropriately set to True automatically, and in other
contexts, you can set the kwarg explicitly to True when calling the layer.
(This is in contrast to setting `trainable=False` for a Dropout layer.
`trainable` does not affect the layer's behavior, as Dropout does
not have any variables/weights that can be frozen during training.)
>>> tf.random.set_seed(0)
>>> layer = tf.keras.layers.Dropout(.2, input_shape=(2,))
>>> data = np.arange(10).reshape(5, 2).astype(np.float32)
>>> print(data)
[[0. 1.]
[2. 3.]
[4. 5.]
[6. 7.]
[8. 9.]]
>>> outputs = layer(data, training=True)
>>> print(outputs)
tf.Tensor(
[[ 0. 1.25]
[ 2.5 3.75]
[ 5. 6.25]
[ 7.5 8.75]
[10. 0. ]], shape=(5, 2), dtype=float32)
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(Dropout, self).__init__(**kwargs)
if isinstance(rate, (int, float)) and not 0 <= rate <= 1:
raise ValueError(f'Invalid value {rate} received for '
f'`rate`, expected a value between 0 and 1.')
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return None
concrete_inputs_shape = array_ops.shape(inputs)
noise_shape = []
for i, value in enumerate(self.noise_shape):
noise_shape.append(concrete_inputs_shape[i] if value is None else value)
return tensor_conversion.convert_to_tensor_v2_with_dispatch(noise_shape)
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
def dropped_inputs():
return nn.dropout(
inputs,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed,
rate=self.rate)
output = control_flow_util.smart_cond(training, dropped_inputs,
lambda: array_ops.identity(inputs))
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'rate': self.rate,
'noise_shape': self.noise_shape,
'seed': self.seed
}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| Dropout |
python | pypa__pipenv | pipenv/patched/pip/_internal/cli/command_context.py | {
"start": 141,
"end": 774
} | class ____:
def __init__(self) -> None:
super().__init__()
self._in_main_context = False
self._main_context = ExitStack()
@contextmanager
def main_context(self) -> Generator[None, None, None]:
assert not self._in_main_context
self._in_main_context = True
try:
with self._main_context:
yield
finally:
self._in_main_context = False
def enter_context(self, context_provider: ContextManager[_T]) -> _T:
assert self._in_main_context
return self._main_context.enter_context(context_provider)
| CommandContextMixIn |
python | pandas-dev__pandas | asv_bench/benchmarks/strings.py | {
"start": 7260,
"end": 7425
} | class ____:
def setup(self):
self.s = Series(["abcdefg", np.nan] * 500000)
def time_vector_slice(self):
# GH 2602
self.s.str[:5]
| Slice |
python | scipy__scipy | scipy/stats/_new_distributions.py | {
"start": 11746,
"end": 14451
} | class ____(ContinuousDistribution):
r"""Uniform distribution.
The probability density function of the uniform distribution is:
.. math::
f(x; a, b) = \frac{1}
{b - a}
"""
_a_domain = _RealInterval(endpoints=(-inf, inf))
_b_domain = _RealInterval(endpoints=('a', inf))
_x_support = _RealInterval(endpoints=('a', 'b'), inclusive=(True, True))
_a_param = _RealParameter('a', domain=_a_domain, typical=(1e-3, 0.9))
_b_param = _RealParameter('b', domain=_b_domain, typical=(1.1, 1e3))
_x_param = _RealParameter('x', domain=_x_support, typical=('a', 'b'))
_b_domain.define_parameters(_a_param)
_x_support.define_parameters(_a_param, _b_param)
_parameterizations = [_Parameterization(_a_param, _b_param)]
_variable = _x_param
def __init__(self, *, a=None, b=None, **kwargs):
super().__init__(a=a, b=b, **kwargs)
def _process_parameters(self, a=None, b=None, ab=None, **kwargs):
ab = b - a
kwargs.update(dict(a=a, b=b, ab=ab))
return kwargs
def _logpdf_formula(self, x, *, ab, **kwargs):
return np.where(np.isnan(x), np.nan, -np.log(ab))
def _pdf_formula(self, x, *, ab, **kwargs):
return np.where(np.isnan(x), np.nan, 1/ab)
def _logcdf_formula(self, x, *, a, ab, **kwargs):
with np.errstate(divide='ignore'):
return np.log(x - a) - np.log(ab)
def _cdf_formula(self, x, *, a, ab, **kwargs):
return (x - a) / ab
def _logccdf_formula(self, x, *, b, ab, **kwargs):
with np.errstate(divide='ignore'):
return np.log(b - x) - np.log(ab)
def _ccdf_formula(self, x, *, b, ab, **kwargs):
return (b - x) / ab
def _icdf_formula(self, p, *, a, ab, **kwargs):
return a + ab*p
def _iccdf_formula(self, p, *, b, ab, **kwargs):
return b - ab*p
def _entropy_formula(self, *, ab, **kwargs):
return np.log(ab)
def _mode_formula(self, *, a, b, ab, **kwargs):
return a + 0.5*ab
def _median_formula(self, *, a, b, ab, **kwargs):
return a + 0.5*ab
def _moment_raw_formula(self, order, a, b, ab, **kwargs):
np1 = order + 1
return (b**np1 - a**np1) / (np1 * ab)
def _moment_central_formula(self, order, ab, **kwargs):
return ab**2/12 if order == 2 else None
_moment_central_formula.orders = [2] # type: ignore[attr-defined]
def _sample_formula(self, full_shape, rng, a, b, ab, **kwargs):
try:
return rng.uniform(a, b, size=full_shape)[()]
except OverflowError: # happens when there are NaNs
return rng.uniform(0, 1, size=full_shape)*ab + a
| Uniform |
python | google__jax | jax/_src/blocked_sampler.py | {
"start": 799,
"end": 5880
} | class ____(Protocol):
def __call__(self, key: ArrayLike, *args, shape: Shape,
**kwargs) -> Array:
...
def _compute_tile_index(block_index: Sequence[int],
block_size_in_tiles: Shape,
total_size_in_tiles: Shape,
tile_index_in_block: Sequence[int]) -> int:
ndims = len(block_index)
dim_size = 1
total_idx = 0
for i in range(ndims-1, -1, -1):
dim_idx = tile_index_in_block[i] + block_index[i] * block_size_in_tiles[i]
total_idx += dim_idx * dim_size
dim_size *= total_size_in_tiles[i]
return total_idx
def blocked_fold_in(
global_key: ArrayLike,
total_size: Shape,
block_size: Shape,
tile_size: Shape,
block_index: Sequence[ArrayLike],
) -> NdKeyList:
"""Computes a grid of keys for block-invariant sampling.
Suppose we wished to construct a 16x512 array of random numbers, using
block sizes of 16x128 and 16x256. We could select an tile size of 8x128
(which divides both 16x128 and 16x256) and divide the total array in tiles as:
---------------------------------
| 8x128 | 8x128 | 8x128 | 8x128 |
---------------------------------
| 8x128 | 8x128 | 8x128 | 8x128 |
---------------------------------
We generate a key for each tile as:
tile_key = fold_in(global_key, tile_idx)
Where the tile_idx is the row-major raveled index of each element:
-----------------
| 0 | 1 | 2 | 3 |
-----------------
| 4 | 5 | 6 | 7 |
-----------------
We then compute and return the keys required to sample the tiles that make
up the current block (specified via `block_index`).
With a 16x256 block size, each block requires 4 (2x2) tile keys:
---------------
| 0, 1 | 2, 3 |
| 4, 5 | 6, 7 |
---------------
Therefore, we return a grid of 2x2 keys for each block (2 blocks total).
With a 16x128 block size, each block requires 2 (2x1) tile keys:
-----------------
| 0 | 1 | 2 | 3 |
| 4 | 5 | 6 | 7 |
-----------------
Therefore, we return a grid of 2x1 keys for each block (4 blocks total).
Args:
global_key: The global key shared between all blocks.
total_size: The shape of the array being generated.
block_size: The shape of an individual block.
tile_size: The shape of a `tile`, which is the smallest unit at
which samples are generated. This should be selected to be a divisor
of all block sizes one needs to be invariant to.
block_index: The index denoting which block to generate keys for.
Returns:
An N-dimensional nested list of keys required to sample the tiles
corresponding to the block specified by `block_index`.
"""
block_size_in_tiles = tuple(
_shape // _element for _shape, _element in zip(block_size, tile_size)
)
# Round up to make sure every tile is numbered.
total_size_in_tiles = tuple(
(_shape + _element - 1) // _element
for _shape, _element in zip(total_size, tile_size)
)
def _keygen_loop(axis, prefix):
if axis == len(block_size_in_tiles):
subtile_key = random.fold_in(
global_key, _compute_tile_index(
block_index, block_size_in_tiles, total_size_in_tiles, prefix))
return subtile_key
else:
keys = []
for i in range(block_size_in_tiles[axis]):
keys.append(_keygen_loop(axis+1, prefix+(i,)))
return keys
return _keygen_loop(0, ())
def sample_block(
sampler_fn: SampleFn,
keys: NdKeyList,
block_size: Shape,
tile_size: Shape,
*args,
**kwargs
) -> Array:
"""Draws random samples for a single block.
This function is intended to be used in conjunction with `blocked_fold_in`:
```
key_list = blocked_fold_in(global_key, total_size, block_size, tile_size,
block_index)
samples = sample_block(jax.random.uniform, key_list, block_size, tile_size)
```
Args:
sampler_fn: A random sampling function, e.g. jax.random.uniform.
keys: A grid of keys generated by `blocked_fold_in`.
block_size: The shape of an individual block.
tile_size: The shape of a `tile`, which is the smallest unit at
which samples are generated. This should be selected to be a divisor
of all block sizes one needs to be invariant to.
args: varargs for sampler_fn.
kwargs: kwargs for sampler_fn.
Returns:
An array of random samples drawn using sampler_fn.
"""
size_in_tiles = tuple(
_shape // _element for _shape, _element in zip(block_size, tile_size))
def _nested_index(arr: Array, idx: Sequence[int]) -> Array:
if len(idx) == 1:
return arr[idx[0]]
return _nested_index(arr[idx[0]], idx[1:])
def _sample_loop(axis: int, prefix: tuple[int, ...]) -> Array:
if axis == len(size_in_tiles):
return sampler_fn(_nested_index(keys, prefix), *args,
shape=tile_size, **kwargs)
else:
samples = []
for i in range(size_in_tiles[axis]):
samples.append(_sample_loop(axis+1, prefix+(i,)))
return jnp.concatenate(samples, axis=axis)
return _sample_loop(0, ())
| SampleFn |
python | dateutil__dateutil | tests/test_tz.py | {
"start": 35317,
"end": 40332
} | class ____(unittest.TestCase, TzFoldMixin):
gettz = staticmethod(tz.gettz)
def testGettz(self):
# bug 892569
str(self.gettz('UTC'))
def testGetTzEquality(self):
self.assertEqual(self.gettz('UTC'), self.gettz('UTC'))
def testTimeOnlyGettz(self):
# gettz returns None
tz_get = self.gettz('Europe/Minsk')
self.assertIs(dt_time(13, 20, tzinfo=tz_get).utcoffset(), None)
def testTimeOnlyGettzDST(self):
# gettz returns None
tz_get = self.gettz('Europe/Minsk')
self.assertIs(dt_time(13, 20, tzinfo=tz_get).dst(), None)
def testTimeOnlyGettzTzName(self):
tz_get = self.gettz('Europe/Minsk')
self.assertIs(dt_time(13, 20, tzinfo=tz_get).tzname(), None)
def testTimeOnlyFormatZ(self):
tz_get = self.gettz('Europe/Minsk')
t = dt_time(13, 20, tzinfo=tz_get)
self.assertEqual(t.strftime('%H%M%Z'), '1320')
def testPortugalDST(self):
# In 1996, Portugal changed from CET to WET
PORTUGAL = self.gettz('Portugal')
t_cet = datetime(1996, 3, 31, 1, 59, tzinfo=PORTUGAL)
self.assertEqual(t_cet.tzname(), 'CET')
self.assertEqual(t_cet.utcoffset(), timedelta(hours=1))
self.assertEqual(t_cet.dst(), timedelta(0))
t_west = datetime(1996, 3, 31, 2, 1, tzinfo=PORTUGAL)
self.assertEqual(t_west.tzname(), 'WEST')
self.assertEqual(t_west.utcoffset(), timedelta(hours=1))
self.assertEqual(t_west.dst(), timedelta(hours=1))
def testGettzCacheTzFile(self):
NYC1 = tz.gettz('America/New_York')
NYC2 = tz.gettz('America/New_York')
assert NYC1 is NYC2
def testGettzCacheTzLocal(self):
local1 = tz.gettz()
local2 = tz.gettz()
assert local1 is not local2
@pytest.mark.gettz
def test_gettz_same_result_for_none_and_empty_string():
local_from_none = tz.gettz()
local_from_empty_string = tz.gettz("")
assert local_from_none is not None
assert local_from_empty_string is not None
assert local_from_none == local_from_empty_string
@pytest.mark.gettz
@pytest.mark.parametrize('badzone', [
'Fake.Region/Abcdefghijklmnop', # Violates several tz project name rules
])
def test_gettz_badzone(badzone):
# Make sure passing a bad TZ string to gettz returns None (GH #800)
tzi = tz.gettz(badzone)
assert tzi is None
@pytest.mark.gettz
def test_gettz_badzone_unicode():
# Make sure a unicode string can be passed to TZ (GH #802)
# When fixed, combine this with test_gettz_badzone
tzi = tz.gettz('🐼')
assert tzi is None
@pytest.mark.gettz
@pytest.mark.parametrize(
"badzone,exc_reason",
[
pytest.param(
b"America/New_York",
".*should be str, not bytes.*",
id="bytes on Python 3",
marks=[
pytest.mark.skipif(
PY2, reason="bytes arguments accepted in Python 2"
)
],
),
pytest.param(
object(),
None,
id="no startswith()",
marks=[
pytest.mark.xfail(reason="AttributeError instead of TypeError",
raises=AttributeError),
],
),
],
)
def test_gettz_zone_wrong_type(badzone, exc_reason):
with pytest.raises(TypeError, match=exc_reason):
tz.gettz(badzone)
@pytest.mark.gettz
@pytest.mark.xfail(IS_WIN, reason='zoneinfo separately cached')
def test_gettz_cache_clear():
NYC1 = tz.gettz('America/New_York')
tz.gettz.cache_clear()
NYC2 = tz.gettz('America/New_York')
assert NYC1 is not NYC2
@pytest.mark.gettz
@pytest.mark.xfail(IS_WIN, reason='zoneinfo separately cached')
def test_gettz_set_cache_size():
tz.gettz.cache_clear()
tz.gettz.set_cache_size(3)
MONACO_ref = weakref.ref(tz.gettz('Europe/Monaco'))
EASTER_ref = weakref.ref(tz.gettz('Pacific/Easter'))
CURRIE_ref = weakref.ref(tz.gettz('Australia/Currie'))
gc.collect()
assert MONACO_ref() is not None
assert EASTER_ref() is not None
assert CURRIE_ref() is not None
tz.gettz.set_cache_size(2)
gc.collect()
assert MONACO_ref() is None
@pytest.mark.xfail(IS_WIN, reason="Windows does not use system zoneinfo")
@pytest.mark.smoke
@pytest.mark.gettz
def test_gettz_weakref():
tz.gettz.cache_clear()
tz.gettz.set_cache_size(2)
NYC1 = tz.gettz('America/New_York')
NYC_ref = weakref.ref(tz.gettz('America/New_York'))
assert NYC1 is NYC_ref()
del NYC1
gc.collect()
assert NYC_ref() is not None # Should still be in the strong cache
assert tz.gettz('America/New_York') is NYC_ref()
# Populate strong cache with other timezones
tz.gettz('Europe/Monaco')
tz.gettz('Pacific/Easter')
tz.gettz('Australia/Currie')
gc.collect()
assert NYC_ref() is None # Should have been pushed out
assert tz.gettz('America/New_York') is not NYC_ref()
| GettzTest |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_v2.py | {
"start": 96437,
"end": 102744
} | class ____(
DenseColumn,
CategoricalColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
fc_old._CategoricalColumn, # pylint: disable=protected-access
collections.namedtuple('BucketizedColumn',
('source_column', 'boundaries'))):
"""See `bucketized_column`."""
@property
def _is_v2_column(self):
return (
isinstance(self.source_column, fc_types.FeatureColumn)
and self.source_column._is_v2_column
) # pylint: disable=protected-access
@property
def name(self):
"""See `FeatureColumn` base class."""
return '{}_bucketized'.format(self.source_column.name)
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return self.source_column.parse_example_spec
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.source_column._parse_example_spec # pylint: disable=protected-access
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = inputs.get(self.source_column)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
def transform_feature(self, transformation_cache, state_manager):
"""Returns bucketized categorical `source_column` tensor."""
source_tensor = transformation_cache.get(self.source_column, state_manager)
return math_ops._bucketize( # pylint: disable=protected-access
source_tensor,
boundaries=self.boundaries)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(
tuple(self.source_column.shape) + (len(self.boundaries) + 1,))
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def _get_dense_tensor_for_input_tensor(self, input_tensor):
return array_ops.one_hot(
indices=math_ops.cast(input_tensor, dtypes.int64),
depth=len(self.boundaries) + 1,
on_value=1.,
off_value=0.)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns one hot encoded dense `Tensor`."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_dense_tensor_for_input_tensor(input_tensor)
@property
def num_buckets(self):
"""See `CategoricalColumn` base class."""
# By construction, source_column is always one-dimensional.
return (len(self.boundaries) + 1) * self.source_column.shape[0]
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _num_buckets(self):
return self.num_buckets
def _get_sparse_tensors_for_input_tensor(self, input_tensor):
batch_size = array_ops.shape(input_tensor)[0]
# By construction, source_column is always one-dimensional.
source_dimension = self.source_column.shape[0]
i1 = array_ops.reshape(
array_ops.tile(
array_ops.expand_dims(math_ops.range(0, batch_size), 1),
[1, source_dimension]), (-1,))
i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size])
# Flatten the bucket indices and unique them across dimensions
# E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets
bucket_indices = (
array_ops.reshape(input_tensor,
(-1,)) + (len(self.boundaries) + 1) * i2)
indices = math_ops.cast(
array_ops.transpose(array_ops_stack.stack((i1, i2))), dtypes.int64)
dense_shape = math_ops.cast(
array_ops_stack.stack([batch_size, source_dimension]), dtypes.int64)
sparse_tensor = sparse_tensor_lib.SparseTensor(
indices=indices, values=bucket_indices, dense_shape=dense_shape)
return CategoricalColumn.IdWeightPair(sparse_tensor, None)
def get_sparse_tensors(self, transformation_cache, state_manager):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
input_tensor = transformation_cache.get(self, state_manager)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
"""Converts dense inputs to SparseTensor so downstream code can use it."""
del weight_collections
del trainable
input_tensor = inputs.get(self)
return self._get_sparse_tensors_for_input_tensor(input_tensor)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.source_column]
def get_config(self):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import serialize_feature_column # pylint: disable=g-import-not-at-top
config = dict(zip(self._fields, self))
config['source_column'] = serialize_feature_column(self.source_column)
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
from tensorflow.python.feature_column.serialization import deserialize_feature_column # pylint: disable=g-import-not-at-top
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['source_column'] = deserialize_feature_column(
config['source_column'], custom_objects, columns_by_name)
return cls(**kwargs)
@serialization.register_feature_column
| BucketizedColumn |
python | numpy__numpy | benchmarks/benchmarks/bench_ufunc_strides.py | {
"start": 5459,
"end": 6389
} | class ____(Benchmark):
def f(self, z):
return np.abs(z) < 4.0
def g(self, z, c):
return np.sum(np.multiply(z, z) + c)
def mandelbrot_numpy(self, c, maxiter):
output = np.zeros(c.shape, np.int32)
z = np.empty(c.shape, np.complex64)
for it in range(maxiter):
notdone = self.f(z)
output[notdone] = it
z[notdone] = self.g(z[notdone], c[notdone])
output[output == maxiter - 1] = 0
return output
def mandelbrot_set(self, xmin, xmax, ymin, ymax, width, height, maxiter):
r1 = np.linspace(xmin, xmax, width, dtype=np.float32)
r2 = np.linspace(ymin, ymax, height, dtype=np.float32)
c = r1 + r2[:, None] * 1j
n3 = self.mandelbrot_numpy(c, maxiter)
return (r1, r2, n3.T)
def time_mandel(self):
self.mandelbrot_set(-0.74877, -0.74872, 0.06505, 0.06510, 1000, 1000, 2048)
| Mandelbrot |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/bedrock/_stream.py | {
"start": 552,
"end": 871
} | class ____(AsyncStream[_T]):
def __init__(
self,
*,
cast_to: type[_T],
response: httpx.Response,
client: AsyncAnthropic,
) -> None:
super().__init__(cast_to=cast_to, response=response, client=client)
self._decoder = AWSEventStreamDecoder()
| AsyncBedrockStream |
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 57485,
"end": 57746
} | class ____(Response):
"""
Response of events.clear_scroll endpoint.
"""
_service = "events"
_action = "clear_scroll"
_version = "2.23"
_schema = {"additionalProperties": False, "definitions": {}, "type": "object"}
| ClearScrollResponse |
python | python__mypy | mypy/types.py | {
"start": 34523,
"end": 38761
} | class ____(ProperType):
"""Instance type that has not been bound during semantic analysis."""
__slots__ = (
"name",
"args",
"optional",
"empty_tuple_index",
"original_str_expr",
"original_str_fallback",
)
def __init__(
self,
name: str,
args: Sequence[Type] | None = None,
line: int = -1,
column: int = -1,
optional: bool = False,
empty_tuple_index: bool = False,
original_str_expr: str | None = None,
original_str_fallback: str | None = None,
) -> None:
super().__init__(line, column)
if not args:
args = []
self.name = name
self.args = tuple(args)
# Should this type be wrapped in an Optional?
self.optional = optional
# Special case for X[()]
self.empty_tuple_index = empty_tuple_index
# If this UnboundType was originally defined as a str or bytes, keep track of
# the original contents of that string-like thing. This way, if this UnboundExpr
# ever shows up inside of a LiteralType, we can determine whether that
# Literal[...] is valid or not. E.g. Literal[foo] is most likely invalid
# (unless 'foo' is an alias for another literal or something) and
# Literal["foo"] most likely is.
#
# We keep track of the entire string instead of just using a boolean flag
# so we can distinguish between things like Literal["foo"] vs
# Literal[" foo "].
#
# We also keep track of what the original base fallback type was supposed to be
# so we don't have to try and recompute it later
self.original_str_expr = original_str_expr
self.original_str_fallback = original_str_fallback
def copy_modified(self, args: Bogus[Sequence[Type] | None] = _dummy) -> UnboundType:
if args is _dummy:
args = self.args
return UnboundType(
name=self.name,
args=args,
line=self.line,
column=self.column,
optional=self.optional,
empty_tuple_index=self.empty_tuple_index,
original_str_expr=self.original_str_expr,
original_str_fallback=self.original_str_fallback,
)
def accept(self, visitor: TypeVisitor[T]) -> T:
return visitor.visit_unbound_type(self)
def __hash__(self) -> int:
return hash((self.name, self.optional, tuple(self.args), self.original_str_expr))
def __eq__(self, other: object) -> bool:
if not isinstance(other, UnboundType):
return NotImplemented
return (
self.name == other.name
and self.optional == other.optional
and self.args == other.args
and self.original_str_expr == other.original_str_expr
and self.original_str_fallback == other.original_str_fallback
)
def serialize(self) -> JsonDict:
return {
".class": "UnboundType",
"name": self.name,
"args": [a.serialize() for a in self.args],
"expr": self.original_str_expr,
"expr_fallback": self.original_str_fallback,
}
@classmethod
def deserialize(cls, data: JsonDict) -> UnboundType:
assert data[".class"] == "UnboundType"
return UnboundType(
data["name"],
[deserialize_type(a) for a in data["args"]],
original_str_expr=data["expr"],
original_str_fallback=data["expr_fallback"],
)
def write(self, data: WriteBuffer) -> None:
write_tag(data, UNBOUND_TYPE)
write_str(data, self.name)
write_type_list(data, self.args)
write_str_opt(data, self.original_str_expr)
write_str_opt(data, self.original_str_fallback)
write_tag(data, END_TAG)
@classmethod
def read(cls, data: ReadBuffer) -> UnboundType:
ret = UnboundType(
read_str(data),
read_type_list(data),
original_str_expr=read_str_opt(data),
original_str_fallback=read_str_opt(data),
)
assert read_tag(data) == END_TAG
return ret
| UnboundType |
python | numpy__numpy | numpy/lib/tests/test_regression.py | {
"start": 195,
"end": 7716
} | class ____:
def test_poly1d(self):
# Ticket #28
assert_equal(np.poly1d([1]) - np.poly1d([1, 0]),
np.poly1d([-1, 1]))
def test_cov_parameters(self):
# Ticket #91
x = np.random.random((3, 3))
y = x.copy()
np.cov(x, rowvar=True)
np.cov(y, rowvar=False)
assert_array_equal(x, y)
def test_mem_digitize(self):
# Ticket #95
for i in range(100):
np.digitize([1, 2, 3, 4], [1, 3])
np.digitize([0, 1, 2, 3, 4], [1, 3])
def test_unique_zero_sized(self):
# Ticket #205
assert_array_equal([], np.unique(np.array([])))
def test_mem_vectorise(self):
# Ticket #325
vt = np.vectorize(lambda *args: args)
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)))
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1,
1, 2)), np.zeros((2, 2)))
def test_mgrid_single_element(self):
# Ticket #339
assert_array_equal(np.mgrid[0:0:1j], [0])
assert_array_equal(np.mgrid[0:0], [])
def test_refcount_vectorize(self):
# Ticket #378
def p(x, y):
return 123
v = np.vectorize(p)
_assert_valid_refcount(v)
def test_poly1d_nan_roots(self):
# Ticket #396
p = np.poly1d([np.nan, np.nan, 1], r=False)
assert_raises(np.linalg.LinAlgError, getattr, p, "r")
def test_mem_polymul(self):
# Ticket #448
np.polymul([], [1.])
def test_mem_string_concat(self):
# Ticket #469
x = np.array([])
np.append(x, 'asdasd\tasdasd')
def test_poly_div(self):
# Ticket #553
u = np.poly1d([1, 2, 3])
v = np.poly1d([1, 2, 3, 4, 5])
q, r = np.polydiv(u, v)
assert_equal(q * v + r, u)
def test_poly_eq(self):
# Ticket #554
x = np.poly1d([1, 2, 3])
y = np.poly1d([3, 4])
assert_(x != y)
assert_(x == x)
def test_polyfit_build(self):
# Ticket #628
ref = [-1.06123820e-06, 5.70886914e-04, -1.13822012e-01,
9.95368241e+00, -3.14526520e+02]
x = [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103,
104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 129,
130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141,
146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157,
158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
170, 171, 172, 173, 174, 175, 176]
y = [9.0, 3.0, 7.0, 4.0, 4.0, 8.0, 6.0, 11.0, 9.0, 8.0, 11.0, 5.0,
6.0, 5.0, 9.0, 8.0, 6.0, 10.0, 6.0, 10.0, 7.0, 6.0, 6.0, 6.0,
13.0, 4.0, 9.0, 11.0, 4.0, 5.0, 8.0, 5.0, 7.0, 7.0, 6.0, 12.0,
7.0, 7.0, 9.0, 4.0, 12.0, 6.0, 6.0, 4.0, 3.0, 9.0, 8.0, 8.0,
6.0, 7.0, 9.0, 10.0, 6.0, 8.0, 4.0, 7.0, 7.0, 10.0, 8.0, 8.0,
6.0, 3.0, 8.0, 4.0, 5.0, 7.0, 8.0, 6.0, 6.0, 4.0, 12.0, 9.0,
8.0, 8.0, 8.0, 6.0, 7.0, 4.0, 4.0, 5.0, 7.0]
tested = np.polyfit(x, y, 4)
assert_array_almost_equal(ref, tested)
def test_polydiv_type(self):
# Make polydiv work for complex types
msg = "Wrong type, should be complex"
x = np.ones(3, dtype=complex)
q, r = np.polydiv(x, x)
assert_(q.dtype == complex, msg)
msg = "Wrong type, should be float"
x = np.ones(3, dtype=int)
q, r = np.polydiv(x, x)
assert_(q.dtype == float, msg)
def test_histogramdd_too_many_bins(self):
# Ticket 928.
assert_raises(ValueError, np.histogramdd, np.ones((1, 10)), bins=2**10)
def test_polyint_type(self):
# Ticket #944
msg = "Wrong type, should be complex"
x = np.ones(3, dtype=complex)
assert_(np.polyint(x).dtype == complex, msg)
msg = "Wrong type, should be float"
x = np.ones(3, dtype=int)
assert_(np.polyint(x).dtype == float, msg)
def test_ndenumerate_crash(self):
# Ticket 1140
# Shouldn't crash:
list(np.ndenumerate(np.array([[]])))
def test_large_fancy_indexing(self):
# Large enough to fail on 64-bit.
nbits = np.dtype(np.intp).itemsize * 8
thesize = int((2**nbits)**(1.0 / 5.0) + 1)
def dp():
n = 3
a = np.ones((n,) * 5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)] = 0
def dp2():
n = 3
a = np.ones((n,) * 5)
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)]
assert_raises(ValueError, dp)
assert_raises(ValueError, dp2)
def test_void_coercion(self):
dt = np.dtype([('a', 'f4'), ('b', 'i4')])
x = np.zeros((1,), dt)
assert_(np.r_[x, x].dtype == dt)
def test_include_dirs(self):
# As a sanity check, just test that get_include
# includes something reasonable. Somewhat
# related to ticket #1405.
include_dirs = [np.get_include()]
for path in include_dirs:
assert_(isinstance(path, str))
assert_(path != '')
def test_polyder_return_type(self):
# Ticket #1249
assert_(isinstance(np.polyder(np.poly1d([1]), 0), np.poly1d))
assert_(isinstance(np.polyder([1], 0), np.ndarray))
assert_(isinstance(np.polyder(np.poly1d([1]), 1), np.poly1d))
assert_(isinstance(np.polyder([1], 1), np.ndarray))
def test_append_fields_dtype_list(self):
# Ticket #1676
from numpy.lib.recfunctions import append_fields
base = np.array([1, 2, 3], dtype=np.int32)
names = ['a', 'b', 'c']
data = np.eye(3).astype(np.int32)
dlist = [np.float64, np.int32, np.int32]
try:
append_fields(base, names, data, dlist)
except Exception:
raise AssertionError
def test_loadtxt_fields_subarrays(self):
# For ticket #1936
from io import StringIO
dt = [("a", 'u1', 2), ("b", 'u1', 2)]
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([((0, 1), (2, 3))], dtype=dt))
dt = [("a", [("a", 'u1', (1, 3)), ("b", 'u1')])]
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([(((0, 1, 2), 3),)], dtype=dt))
dt = [("a", 'u1', (2, 2))]
x = np.loadtxt(StringIO("0 1 2 3"), dtype=dt)
assert_equal(x, np.array([(((0, 1), (2, 3)),)], dtype=dt))
dt = [("a", 'u1', (2, 3, 2))]
x = np.loadtxt(StringIO("0 1 2 3 4 5 6 7 8 9 10 11"), dtype=dt)
data = [((((0, 1), (2, 3), (4, 5)), ((6, 7), (8, 9), (10, 11))),)]
assert_equal(x, np.array(data, dtype=dt))
def test_nansum_with_boolean(self):
# gh-2978
a = np.zeros(2, dtype=bool)
try:
np.nansum(a)
except Exception:
raise AssertionError
def test_py3_compat(self):
# gh-2561
# Test if the oldstyle class test is bypassed in python3
class C:
"""Old-style class in python2, normal class in python3"""
pass
out = open(os.devnull, 'w')
try:
np.info(C(), output=out)
except AttributeError:
raise AssertionError
finally:
out.close()
| TestRegression |
python | scipy__scipy | scipy/io/_harwell_boeing/tests/test_fortran_format.py | {
"start": 1825,
"end": 2383
} | class ____:
def test_to_fortran(self):
f = [ExpFormat(10, 5), ExpFormat(12, 10), ExpFormat(12, 10, min=3),
ExpFormat(10, 5, repeat=3)]
res = ["(E10.5)", "(E12.10)", "(E12.10E3)", "(3E10.5)"]
for i, j in zip(f, res):
assert_equal(i.fortran_format, j)
def test_from_number(self):
f = np.array([1.0, -1.2])
r_f = [ExpFormat(24, 16, repeat=3), ExpFormat(25, 16, repeat=3)]
for i, j in zip(f, r_f):
assert_equal(ExpFormat.from_number(i).__dict__, j.__dict__)
| TestExpFormat |
python | Lightning-AI__lightning | src/lightning/pytorch/cli.py | {
"start": 40146,
"end": 42229
} | class ____:
def __init__(self, cli: LightningCLI, key: str) -> None:
self.cli = cli
self.key = key
def __call__(
self,
class_type: type[ModuleType],
*args: Any,
applied_instantiation_links: dict,
**kwargs: Any,
) -> ModuleType:
self.cli._dump_config()
hparams = self.cli.config_dump.get(self.key, {})
if "class_path" in hparams:
# To make hparams backwards compatible, and so that it is the same irrespective of subclass_mode, the
# parameters are stored directly, and the class_path in a special key `_class_path` to clarify its internal
# use.
hparams = {
"_class_path": hparams["class_path"],
**hparams.get("init_args", {}),
**hparams.get("dict_kwargs", {}),
}
# get instantiation link target values from kwargs
for key, value in applied_instantiation_links.items():
if not key.startswith(f"{self.key}."):
continue
key = key[len(f"{self.key}.") :]
if key.startswith("init_args."):
key = key[len("init_args.") :]
_set_dict_nested(hparams, key, value)
with _given_hyperparameters_context(
hparams=hparams,
instantiator="lightning.pytorch.cli.instantiate_module",
):
return class_type(*args, **kwargs)
def instantiate_module(class_type: type[ModuleType], config: dict[str, Any]) -> ModuleType:
parser = ArgumentParser(exit_on_error=False)
if "_class_path" in config:
parser.add_subclass_arguments(class_type, "module", fail_untyped=False)
config = {
"class_path": config["_class_path"],
"dict_kwargs": {k: v for k, v in config.items() if k != "_class_path"},
}
else:
parser.add_class_arguments(class_type, "module", fail_untyped=False)
cfg = parser.parse_object({"module": config})
init = parser.instantiate_classes(cfg)
return init.module
| _InstantiatorFn |
python | Textualize__textual | src/textual/events.py | {
"start": 21918,
"end": 22050
} | class ____(Event, bubble=False):
"""Sent when a widget is blurred (un-focussed).
- [ ] Bubbles
- [ ] Verbose
"""
| Blur |
python | getsentry__sentry-python | sentry_sdk/integrations/starlite.py | {
"start": 1783,
"end": 10559
} | class ____(SentryAsgiMiddleware):
def __init__(self, app, span_origin=StarliteIntegration.origin):
# type: (ASGIApp, str) -> None
super().__init__(
app=app,
unsafe_context_data=False,
transaction_style="endpoint",
mechanism_type="asgi",
span_origin=span_origin,
asgi_version=3,
)
def patch_app_init():
# type: () -> None
"""
Replaces the Starlite class's `__init__` function in order to inject `after_exception` handlers and set the
`SentryStarliteASGIMiddleware` as the outmost middleware in the stack.
See:
- https://starlite-api.github.io/starlite/usage/0-the-starlite-app/5-application-hooks/#after-exception
- https://starlite-api.github.io/starlite/usage/7-middleware/0-middleware-intro/
"""
old__init__ = Starlite.__init__
@ensure_integration_enabled(StarliteIntegration, old__init__)
def injection_wrapper(self, *args, **kwargs):
# type: (Starlite, *Any, **Any) -> None
after_exception = kwargs.pop("after_exception", [])
kwargs.update(
after_exception=[
exception_handler,
*(
after_exception
if isinstance(after_exception, list)
else [after_exception]
),
]
)
middleware = kwargs.get("middleware") or []
kwargs["middleware"] = [SentryStarliteASGIMiddleware, *middleware]
old__init__(self, *args, **kwargs)
Starlite.__init__ = injection_wrapper
def patch_middlewares():
# type: () -> None
old_resolve_middleware_stack = BaseRouteHandler.resolve_middleware
@ensure_integration_enabled(StarliteIntegration, old_resolve_middleware_stack)
def resolve_middleware_wrapper(self):
# type: (BaseRouteHandler) -> list[Middleware]
return [
enable_span_for_middleware(middleware)
for middleware in old_resolve_middleware_stack(self)
]
BaseRouteHandler.resolve_middleware = resolve_middleware_wrapper
def enable_span_for_middleware(middleware):
# type: (Middleware) -> Middleware
if (
not hasattr(middleware, "__call__") # noqa: B004
or middleware is SentryStarliteASGIMiddleware
):
return middleware
if isinstance(middleware, DefineMiddleware):
old_call = middleware.middleware.__call__ # type: ASGIApp
else:
old_call = middleware.__call__
async def _create_span_call(self, scope, receive, send):
# type: (MiddlewareProtocol, StarliteScope, Receive, Send) -> None
if sentry_sdk.get_client().get_integration(StarliteIntegration) is None:
return await old_call(self, scope, receive, send)
middleware_name = self.__class__.__name__
with sentry_sdk.start_span(
op=OP.MIDDLEWARE_STARLITE,
name=middleware_name,
origin=StarliteIntegration.origin,
) as middleware_span:
middleware_span.set_tag("starlite.middleware_name", middleware_name)
# Creating spans for the "receive" callback
async def _sentry_receive(*args, **kwargs):
# type: (*Any, **Any) -> Union[HTTPReceiveMessage, WebSocketReceiveMessage]
if sentry_sdk.get_client().get_integration(StarliteIntegration) is None:
return await receive(*args, **kwargs)
with sentry_sdk.start_span(
op=OP.MIDDLEWARE_STARLITE_RECEIVE,
name=getattr(receive, "__qualname__", str(receive)),
origin=StarliteIntegration.origin,
) as span:
span.set_tag("starlite.middleware_name", middleware_name)
return await receive(*args, **kwargs)
receive_name = getattr(receive, "__name__", str(receive))
receive_patched = receive_name == "_sentry_receive"
new_receive = _sentry_receive if not receive_patched else receive
# Creating spans for the "send" callback
async def _sentry_send(message):
# type: (Message) -> None
if sentry_sdk.get_client().get_integration(StarliteIntegration) is None:
return await send(message)
with sentry_sdk.start_span(
op=OP.MIDDLEWARE_STARLITE_SEND,
name=getattr(send, "__qualname__", str(send)),
origin=StarliteIntegration.origin,
) as span:
span.set_tag("starlite.middleware_name", middleware_name)
return await send(message)
send_name = getattr(send, "__name__", str(send))
send_patched = send_name == "_sentry_send"
new_send = _sentry_send if not send_patched else send
return await old_call(self, scope, new_receive, new_send)
not_yet_patched = old_call.__name__ not in ["_create_span_call"]
if not_yet_patched:
if isinstance(middleware, DefineMiddleware):
middleware.middleware.__call__ = _create_span_call
else:
middleware.__call__ = _create_span_call
return middleware
def patch_http_route_handle():
# type: () -> None
old_handle = HTTPRoute.handle
async def handle_wrapper(self, scope, receive, send):
# type: (HTTPRoute, HTTPScope, Receive, Send) -> None
if sentry_sdk.get_client().get_integration(StarliteIntegration) is None:
return await old_handle(self, scope, receive, send)
sentry_scope = sentry_sdk.get_isolation_scope()
request = scope["app"].request_class(scope=scope, receive=receive, send=send) # type: Request[Any, Any]
extracted_request_data = ConnectionDataExtractor(
parse_body=True, parse_query=True
)(request)
body = extracted_request_data.pop("body")
request_data = await body
def event_processor(event, _):
# type: (Event, Hint) -> Event
route_handler = scope.get("route_handler")
request_info = event.get("request", {})
request_info["content_length"] = len(scope.get("_body", b""))
if should_send_default_pii():
request_info["cookies"] = extracted_request_data["cookies"]
if request_data is not None:
request_info["data"] = request_data
func = None
if route_handler.name is not None:
tx_name = route_handler.name
elif isinstance(route_handler.fn, Ref):
func = route_handler.fn.value
else:
func = route_handler.fn
if func is not None:
tx_name = transaction_from_function(func)
tx_info = {"source": SOURCE_FOR_STYLE["endpoint"]}
if not tx_name:
tx_name = _DEFAULT_TRANSACTION_NAME
tx_info = {"source": TransactionSource.ROUTE}
event.update(
{
"request": deepcopy(request_info),
"transaction": tx_name,
"transaction_info": tx_info,
}
)
return event
sentry_scope._name = StarliteIntegration.identifier
sentry_scope.add_event_processor(event_processor)
return await old_handle(self, scope, receive, send)
HTTPRoute.handle = handle_wrapper
def retrieve_user_from_scope(scope):
# type: (StarliteScope) -> Optional[dict[str, Any]]
scope_user = scope.get("user")
if not scope_user:
return None
if isinstance(scope_user, dict):
return scope_user
if isinstance(scope_user, BaseModel):
return scope_user.dict()
if hasattr(scope_user, "asdict"): # dataclasses
return scope_user.asdict()
plugin = get_plugin_for_value(scope_user)
if plugin and not is_async_callable(plugin.to_dict):
return plugin.to_dict(scope_user)
return None
@ensure_integration_enabled(StarliteIntegration)
def exception_handler(exc, scope, _):
# type: (Exception, StarliteScope, State) -> None
user_info = None # type: Optional[dict[str, Any]]
if should_send_default_pii():
user_info = retrieve_user_from_scope(scope)
if user_info and isinstance(user_info, dict):
sentry_scope = sentry_sdk.get_isolation_scope()
sentry_scope.set_user(user_info)
event, hint = event_from_exception(
exc,
client_options=sentry_sdk.get_client().options,
mechanism={"type": StarliteIntegration.identifier, "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
| SentryStarliteASGIMiddleware |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/s3.py | {
"start": 19674,
"end": 25009
} | class ____(AwsBaseOperator[S3Hook]):
"""
To enable users to delete single object or multiple objects from a bucket using a single HTTP request.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:S3DeleteObjectsOperator`
:param bucket: Name of the bucket in which you are going to delete object(s). (templated)
:param keys: The key(s) to delete from S3 bucket. (templated)
When ``keys`` is a string, it's supposed to be the key name of
the single object to delete.
When ``keys`` is a list, it's supposed to be the list of the
keys to delete.
:param prefix: Prefix of objects to delete. (templated)
All objects matching this prefix in the bucket will be deleted.
:param from_datetime: Greater LastModified Date of objects to delete. (templated)
All objects which LastModified Date is greater than this datetime in the bucket will be deleted.
:param to_datetime: less LastModified Date of objects to delete. (templated)
All objects which LastModified Date is less than this datetime in the bucket will be deleted.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
template_fields: Sequence[str] = aws_template_fields(
"keys", "bucket", "prefix", "from_datetime", "to_datetime"
)
aws_hook_class = S3Hook
def __init__(
self,
*,
bucket: str,
keys: str | list | None = None,
prefix: str | None = None,
from_datetime: datetime | str | None = None,
to_datetime: datetime | str | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.bucket = bucket
self.keys = keys
self.prefix = prefix
self.from_datetime = from_datetime
self.to_datetime = to_datetime
self._keys: str | list[str] = ""
if not exactly_one(keys is None, all(var is None for var in [prefix, from_datetime, to_datetime])):
raise AirflowException(
"Either keys or at least one of prefix, from_datetime, to_datetime should be set."
)
def execute(self, context: Context):
if not exactly_one(
self.keys is None, all(var is None for var in [self.prefix, self.from_datetime, self.to_datetime])
):
raise AirflowException(
"Either keys or at least one of prefix, from_datetime, to_datetime should be set."
)
if isinstance(self.keys, (list, str)) and not self.keys:
return
# handle case where dates are strings, specifically when sent as template fields and macros.
if isinstance(self.to_datetime, str):
self.to_datetime = parser.parse(self.to_datetime).replace(tzinfo=pytz.UTC)
if isinstance(self.from_datetime, str):
self.from_datetime = parser.parse(self.from_datetime).replace(tzinfo=pytz.UTC)
keys = self.keys or self.hook.list_keys(
bucket_name=self.bucket,
prefix=self.prefix,
from_datetime=self.from_datetime,
to_datetime=self.to_datetime,
)
if keys:
self.hook.delete_objects(bucket=self.bucket, keys=keys)
self._keys = keys
def get_openlineage_facets_on_complete(self, task_instance):
"""Implement _on_complete because object keys are resolved in execute()."""
from airflow.providers.common.compat.openlineage.facet import (
Dataset,
LifecycleStateChange,
LifecycleStateChangeDatasetFacet,
PreviousIdentifier,
)
from airflow.providers.openlineage.extractors import OperatorLineage
if not self._keys:
return OperatorLineage()
keys = self._keys
if isinstance(keys, str):
keys = [keys]
bucket_url = f"s3://{self.bucket}"
input_datasets = [
Dataset(
namespace=bucket_url,
name=key,
facets={
"lifecycleStateChange": LifecycleStateChangeDatasetFacet(
lifecycleStateChange=LifecycleStateChange.DROP.value,
previousIdentifier=PreviousIdentifier(
namespace=bucket_url,
name=key,
),
)
},
)
for key in keys
]
return OperatorLineage(
inputs=input_datasets,
)
| S3DeleteObjectsOperator |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 706206,
"end": 709381
} | class ____(
sgqlc.types.Type, Node, UniformResourceLocatable, RequirableByPullRequest
):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"annotations",
"check_suite",
"completed_at",
"conclusion",
"database_id",
"deployment",
"details_url",
"external_id",
"name",
"pending_deployment_request",
"permalink",
"repository",
"started_at",
"status",
"steps",
"summary",
"text",
"title",
)
annotations = sgqlc.types.Field(
CheckAnnotationConnection,
graphql_name="annotations",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
check_suite = sgqlc.types.Field(
sgqlc.types.non_null("CheckSuite"), graphql_name="checkSuite"
)
completed_at = sgqlc.types.Field(DateTime, graphql_name="completedAt")
conclusion = sgqlc.types.Field(CheckConclusionState, graphql_name="conclusion")
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
deployment = sgqlc.types.Field("Deployment", graphql_name="deployment")
details_url = sgqlc.types.Field(URI, graphql_name="detailsUrl")
external_id = sgqlc.types.Field(String, graphql_name="externalId")
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
pending_deployment_request = sgqlc.types.Field(
DeploymentRequest, graphql_name="pendingDeploymentRequest"
)
permalink = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="permalink")
repository = sgqlc.types.Field(
sgqlc.types.non_null("Repository"), graphql_name="repository"
)
started_at = sgqlc.types.Field(DateTime, graphql_name="startedAt")
status = sgqlc.types.Field(
sgqlc.types.non_null(CheckStatusState), graphql_name="status"
)
steps = sgqlc.types.Field(
CheckStepConnection,
graphql_name="steps",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("number", sgqlc.types.Arg(Int, graphql_name="number", default=None)),
)
),
)
summary = sgqlc.types.Field(String, graphql_name="summary")
text = sgqlc.types.Field(String, graphql_name="text")
title = sgqlc.types.Field(String, graphql_name="title")
| CheckRun |
python | walkccc__LeetCode | solutions/2956. Find Common Elements Between Two Arrays/2956.py | {
"start": 0,
"end": 278
} | class ____:
def findIntersectionValues(
self,
nums1: list[int],
nums2: list[int],
) -> list[int]:
nums1Set = set(nums1)
nums2Set = set(nums2)
return [sum(num in nums2Set for num in nums1),
sum(num in nums1Set for num in nums2)]
| Solution |
python | coleifer__peewee | tests/keys.py | {
"start": 479,
"end": 592
} | class ____(TestModel):
name = CharField()
manufacturer = ForeignKeyField(Manufacturer, null=True)
| Component |
python | mlflow__mlflow | tests/dspy/test_dspy_autolog.py | {
"start": 10337,
"end": 10479
} | class ____(dspy.Retrieve):
def forward(self, query: str) -> list[str]:
time.sleep(0.1)
return ["test output"]
| DummyRetriever |
python | ansible__ansible | lib/ansible/module_utils/_internal/_patches/_socket_patch.py | {
"start": 252,
"end": 939
} | class ____(CallablePatch):
"""Patch `socket.getaddrinfo` so that its `port` arg works with `int` subclasses."""
target_container: t.ClassVar = socket
target_attribute = 'getaddrinfo'
@classmethod
def is_patch_needed(cls) -> bool:
with contextlib.suppress(OSError):
socket.getaddrinfo('127.0.0.1', _CustomInt(22))
return False
return True
def __call__(self, host, port, *args, **kwargs) -> t.Any:
if type(port) is not int and isinstance(port, int): # pylint: disable=unidiomatic-typecheck
port = int(port)
return type(self).unpatched_implementation(host, port, *args, **kwargs)
| GetAddrInfoPatch |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/unsupervised_learning/dcgan.py | {
"start": 490,
"end": 6384
} | class ____():
def __init__(self):
self.img_rows = 28
self.img_cols = 28
self.channels = 1
self.img_shape = (self.channels, self.img_rows, self.img_cols)
self.latent_dim = 100
optimizer = Adam(learning_rate=0.0002, b1=0.5)
loss_function = CrossEntropy
# Build the discriminator
self.discriminator = self.build_discriminator(optimizer, loss_function)
# Build the generator
self.generator = self.build_generator(optimizer, loss_function)
# Build the combined model
self.combined = NeuralNetwork(optimizer=optimizer, loss=loss_function)
self.combined.layers.extend(self.generator.layers)
self.combined.layers.extend(self.discriminator.layers)
print ()
self.generator.summary(name="Generator")
self.discriminator.summary(name="Discriminator")
def build_generator(self, optimizer, loss_function):
model = NeuralNetwork(optimizer=optimizer, loss=loss_function)
model.add(Dense(128 * 7 * 7, input_shape=(100,)))
model.add(Activation('leaky_relu'))
model.add(Reshape((128, 7, 7)))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(128, filter_shape=(3,3), padding='same'))
model.add(Activation("leaky_relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(UpSampling2D())
model.add(Conv2D(64, filter_shape=(3,3), padding='same'))
model.add(Activation("leaky_relu"))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(1, filter_shape=(3,3), padding='same'))
model.add(Activation("tanh"))
return model
def build_discriminator(self, optimizer, loss_function):
model = NeuralNetwork(optimizer=optimizer, loss=loss_function)
model.add(Conv2D(32, filter_shape=(3,3), stride=2, input_shape=self.img_shape, padding='same'))
model.add(Activation('leaky_relu'))
model.add(Dropout(0.25))
model.add(Conv2D(64, filter_shape=(3,3), stride=2, padding='same'))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(Activation('leaky_relu'))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(128, filter_shape=(3,3), stride=2, padding='same'))
model.add(Activation('leaky_relu'))
model.add(Dropout(0.25))
model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(256, filter_shape=(3,3), stride=1, padding='same'))
model.add(Activation('leaky_relu'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(2))
model.add(Activation('softmax'))
return model
def train(self, epochs, batch_size=128, save_interval=50):
mnist = fetch_mldata('MNIST original')
X = mnist.data.reshape((-1,) + self.img_shape)
y = mnist.target
# Rescale -1 to 1
X = (X.astype(np.float32) - 127.5) / 127.5
half_batch = int(batch_size / 2)
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
self.discriminator.set_trainable(True)
# Select a random half batch of images
idx = np.random.randint(0, X.shape[0], half_batch)
imgs = X[idx]
# Sample noise to use as generator input
noise = np.random.normal(0, 1, (half_batch, 100))
# Generate a half batch of images
gen_imgs = self.generator.predict(noise)
valid = np.concatenate((np.ones((half_batch, 1)), np.zeros((half_batch, 1))), axis=1)
fake = np.concatenate((np.zeros((half_batch, 1)), np.ones((half_batch, 1))), axis=1)
# Train the discriminator
d_loss_real, d_acc_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake, d_acc_fake = self.discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * (d_loss_real + d_loss_fake)
d_acc = 0.5 * (d_acc_real + d_acc_fake)
# ---------------------
# Train Generator
# ---------------------
# We only want to train the generator for the combined model
self.discriminator.set_trainable(False)
# Sample noise and use as generator input
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# The generator wants the discriminator to label the generated samples as valid
valid = np.concatenate((np.ones((batch_size, 1)), np.zeros((batch_size, 1))), axis=1)
# Train the generator
g_loss, g_acc = self.combined.train_on_batch(noise, valid)
# Display the progress
print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f, acc: %.2f%%]" % (epoch, d_loss, 100*d_acc, g_loss, 100*g_acc))
# If at save interval => save generated image samples
if epoch % save_interval == 0:
self.save_imgs(epoch)
def save_imgs(self, epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, 100))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1 (from -1 to 1)
gen_imgs = 0.5 * (gen_imgs + 1)
fig, axs = plt.subplots(r, c)
plt.suptitle("Deep Convolutional Generative Adversarial Network")
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt,0,:,:], cmap='gray')
axs[i,j].axis('off')
cnt += 1
fig.savefig("mnist_%d.png" % epoch)
plt.close()
if __name__ == '__main__':
dcgan = DCGAN()
dcgan.train(epochs=200000, batch_size=64, save_interval=50)
| DCGAN |
python | pytorch__pytorch | torch/_export/db/examples/dictionary.py | {
"start": 41,
"end": 404
} | class ____(torch.nn.Module):
"""
Dictionary structures are inlined and flattened along tracing.
"""
def forward(self, x, y):
elements = {}
elements["x2"] = x * x
y = y * elements["x2"]
return {"y": y}
example_args = (torch.randn(3, 2), torch.tensor(4))
tags = {"python.data-structure"}
model = Dictionary()
| Dictionary |
python | apache__airflow | helm-tests/tests/helm_tests/airflow_aux/test_pod_launcher_role.py | {
"start": 914,
"end": 4719
} | class ____:
"""Tests pod launcher."""
@pytest.mark.parametrize(
("executor", "rbac", "allow", "expected_accounts"),
[
("CeleryKubernetesExecutor", True, True, ["scheduler", "worker"]),
("KubernetesExecutor", True, True, ["scheduler", "worker"]),
("CeleryExecutor", True, True, ["worker"]),
("LocalExecutor", True, True, ["scheduler"]),
("LocalExecutor", False, False, []),
("CeleryExecutor,KubernetesExecutor", True, True, ["scheduler", "worker"]),
],
)
def test_pod_launcher_role(self, executor, rbac, allow, expected_accounts):
docs = render_chart(
values={
"rbac": {"create": rbac},
"allowPodLaunching": allow,
"executor": executor,
},
show_only=["templates/rbac/pod-launcher-rolebinding.yaml"],
)
if expected_accounts:
for idx, suffix in enumerate(expected_accounts):
assert f"release-name-airflow-{suffix}" == jmespath.search(f"subjects[{idx}].name", docs[0])
else:
assert docs == []
@pytest.mark.parametrize(
("multiNamespaceMode", "namespace", "expectedRole", "expectedRoleBinding"),
[
(
True,
"namespace",
"namespace-release-name-pod-launcher-role",
"namespace-release-name-pod-launcher-rolebinding",
),
(
True,
"other-ns",
"other-ns-release-name-pod-launcher-role",
"other-ns-release-name-pod-launcher-rolebinding",
),
(False, "namespace", "release-name-pod-launcher-role", "release-name-pod-launcher-rolebinding"),
],
)
def test_pod_launcher_rolebinding_multi_namespace(
self, multiNamespaceMode, namespace, expectedRole, expectedRoleBinding
):
docs = render_chart(
namespace=namespace,
values={"webserver": {"allowPodLogReading": True}, "multiNamespaceMode": multiNamespaceMode},
show_only=["templates/rbac/pod-launcher-rolebinding.yaml"],
)
actualRoleBinding = jmespath.search("metadata.name", docs[0])
assert actualRoleBinding == expectedRoleBinding
actualRoleRef = jmespath.search("roleRef.name", docs[0])
assert actualRoleRef == expectedRole
actualKind = jmespath.search("kind", docs[0])
actualRoleRefKind = jmespath.search("roleRef.kind", docs[0])
if multiNamespaceMode:
assert actualKind == "ClusterRoleBinding"
assert actualRoleRefKind == "ClusterRole"
else:
assert actualKind == "RoleBinding"
assert actualRoleRefKind == "Role"
@pytest.mark.parametrize(
("multiNamespaceMode", "namespace", "expectedRole"),
[
(True, "namespace", "namespace-release-name-pod-launcher-role"),
(True, "other-ns", "other-ns-release-name-pod-launcher-role"),
(False, "namespace", "release-name-pod-launcher-role"),
],
)
def test_pod_launcher_role_multi_namespace(self, multiNamespaceMode, namespace, expectedRole):
docs = render_chart(
namespace=namespace,
values={"webserver": {"allowPodLogReading": True}, "multiNamespaceMode": multiNamespaceMode},
show_only=["templates/rbac/pod-launcher-role.yaml"],
)
actualRole = jmespath.search("metadata.name", docs[0])
assert actualRole == expectedRole
actualKind = jmespath.search("kind", docs[0])
if multiNamespaceMode:
assert actualKind == "ClusterRole"
else:
assert actualKind == "Role"
| TestPodLauncher |
python | pytorch__pytorch | benchmarks/tensorexpr/normalization.py | {
"start": 1796,
"end": 2157
} | class ____(NormalizationBench):
def forward(self):
y = self.layer_norm(self.data, [self.H, self.W])
return y
@staticmethod
def module():
return "layernorm"
benchmark.register_benchmark_class(BatchNormBench)
benchmark.register_benchmark_class(InstanceNormBench)
benchmark.register_benchmark_class(LayerNormBench)
| LayerNormBench |
python | dask__distributed | distributed/tests/test_actor.py | {
"start": 774,
"end": 913
} | class ____:
L: list = []
def __init__(self, dummy=None):
self.L = []
def append(self, x):
self.L.append(x)
| List |
python | pytorch__pytorch | torch/distributed/_tools/fsdp2_mem_tracker.py | {
"start": 3837,
"end": 3977
} | class ____(Enum):
PRE_FW = auto()
FW = auto()
POST_FW = auto()
PRE_BW = auto()
BW = auto()
POST_BW = auto()
| _FSDPState |
python | geekcomputers__Python | linear-algebra-python/src/lib.py | {
"start": 6916,
"end": 12479
} | class ____(object):
"""
class: Matrix
This class represents a arbitrary matrix.
Overview about the methods:
__str__() : returns a string representation
operator * : implements the matrix vector multiplication
implements the matrix-scalar multiplication.
changeComponent(x,y,value) : changes the specified component.
component(x,y) : returns the specified component.
width() : returns the width of the matrix
height() : returns the height of the matrix
operator + : implements the matrix-addition.
operator - _ implements the matrix-subtraction
"""
def __init__(self, matrix, w, h):
"""
simple constructor for initialzes
the matrix with components.
"""
self.__matrix = matrix
self.__width = w
self.__height = h
def __str__(self):
"""
returns a string representation of this
matrix.
"""
ans = ""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def changeComponent(self, x, y, value):
"""
changes the x-y component of this matrix
"""
if x >= 0 and x < self.__height and y >= 0 and y < self.__width:
self.__matrix[x][y] = value
else:
raise Exception("changeComponent: indices out of bounds")
def component(self, x, y):
"""
returns the specified (x,y) component
"""
if x >= 0 and x < self.__height and y >= 0 and y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("changeComponent: indices out of bounds")
def width(self):
"""
getter for the width
"""
return self.__width
def height(self):
"""
getter for the height
"""
return self.__height
def __mul__(self, other):
"""
implements the matrix-vector multiplication.
implements the matrix-scalar multiplication
"""
if isinstance(other, Vector): # vector-matrix
if other.size() == self.__width:
ans = zeroVector(self.__height)
for i in range(self.__height):
summe = 0
for j in range(self.__width):
summe += other.component(j) * self.__matrix[i][j]
ans.changeComponent(i, summe)
summe = 0
return ans
else:
raise Exception(
"vector must have the same size as the "
+ "number of columns of the matrix!"
)
elif isinstance(other, int) or isinstance(other, float): # matrix-scalar
matrix = []
for i in range(self.__height):
row = []
for j in range(self.__width):
row.append(self.__matrix[i][j] * other)
matrix.append(row)
return Matrix(matrix, self.__width, self.__height)
def __add__(self, other):
"""
implements the matrix-addition.
"""
if self.__width == other.width() and self.__height == other.height():
matrix = []
for i in range(self.__height):
row = []
for j in range(self.__width):
row.append(self.__matrix[i][j] + other.component(i, j))
matrix.append(row)
return Matrix(matrix, self.__width, self.__height)
else:
raise Exception("matrix must have the same dimension!")
def __sub__(self, other):
"""
implements the matrix-subtraction.
"""
if self.__width == other.width() and self.__height == other.height():
matrix = []
for i in range(self.__height):
row = []
for j in range(self.__width):
row.append(self.__matrix[i][j] - other.component(i, j))
matrix.append(row)
return Matrix(matrix, self.__width, self.__height)
else:
raise Exception("matrix must have the same dimension!")
def __eq__(self, other):
"""
returns true if the matrices are equal otherwise false.
"""
ans = True
if self.__width == other.width() and self.__height == other.height():
for i in range(self.__height):
for j in range(self.__width):
if self.__matrix[i][j] != other.component(i, j):
ans = False
break
else:
ans = False
return ans
def squareZeroMatrix(N):
"""
returns a square zero-matrix of dimension NxN
"""
ans = []
for i in range(N):
row = []
for j in range(N):
row.append(0)
ans.append(row)
return Matrix(ans, N, N)
def randomMatrix(W, H, a, b):
"""
returns a random matrix WxH with integer components
between 'a' and 'b'
"""
matrix = []
random.seed(None)
for i in range(H):
row = []
for j in range(W):
row.append(random.randint(a, b))
matrix.append(row)
return Matrix(matrix, W, H)
| Matrix |
python | fluentpython__example-code | 21-class-metaprog/evaltime_meta.py | {
"start": 383,
"end": 591
} | class ____(metaclass=MetaAleph):
print('<[6]> ClassFive body')
def __init__(self):
print('<[7]> ClassFive.__init__')
def method_z(self):
print('<[8]> ClassFive.method_y')
| ClassFive |
python | google__jax | tests/pallas/tpu_sparsecore_pallas_test.py | {
"start": 1234,
"end": 2733
} | class ____(jtu.JaxTestCase):
COMPILER_OPTIONS = {"xla_tpu_use_tc_device_shape_on_sc": "false"}
def setUp(self):
if not jtu.is_device_tpu(5, "p") and not jtu.is_device_tpu_at_least(6):
self.skipTest("SparseCore only supported on TPU v5p+")
super().setUp()
@property
def sc_info(self):
return plsc.get_sparse_core_info()
def vector_subcore_kernel(self, **kwargs):
assert "compiler_params" not in kwargs
def wrapper(f):
f = pl.pallas_call(
f,
compiler_params=pltpu.CompilerParams(
kernel_type=pltpu.KernelType.SC_VECTOR_SUBCORE
),
**kwargs,
)
return jax.jit(f, compiler_options=self.COMPILER_OPTIONS)
return wrapper
def kernel(self, *args, jax_compiler_options=None, **kwargs):
if jax_compiler_options is None:
jax_compiler_options = self.COMPILER_OPTIONS
# We only implement the decorator version of pl.kernel for now.
def wrapper(f):
f = pl.kernel(f, *args, **kwargs)
return jax.jit(f, compiler_options=jax_compiler_options)
return wrapper
@property
def uses_tc_tiling(self):
return self.COMPILER_OPTIONS.get(
"xla_tpu_use_tc_device_shape_on_sc", "false"
) == "true"
def skip_if_tc_tiling(self, reason: str = ""):
use_tc_tiling = self.COMPILER_OPTIONS.get(
"xla_tpu_use_tc_device_shape_on_sc", "false"
)
if use_tc_tiling == "true":
self.skipTest(f"TC tiling is not supported. {reason}")
| PallasSCTest |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 98177,
"end": 101666
} | class ____(Request):
"""
Used to compare scalar stats histogram of multiple tasks
:param tasks: List of task Task IDs. Maximum amount of tasks is 10
:type tasks: Sequence[str]
:param samples: The amount of histogram points to return. Optional, the default
value is 6000
:type samples: int
:param key: Histogram x axis to use: iter - iteration number iso_time - event
time as ISO formatted string timestamp - event timestamp as milliseconds since
epoch
:type key: ScalarKeyEnum
"""
_service = "events"
_action = "multi_task_scalar_metrics_iter_histogram"
_version = "2.13"
_schema = {
"definitions": {
"scalar_key_enum": {
"enum": ["iter", "timestamp", "iso_time"],
"type": "string",
}
},
"properties": {
"key": {
"$ref": "#/definitions/scalar_key_enum",
"description": "\n Histogram x axis to use:\n iter - iteration number\n iso_time - event time as ISO formatted string\n timestamp - event timestamp as milliseconds since epoch\n ",
},
"samples": {
"description": "The amount of histogram points to return. Optional, the default value is 6000",
"type": "integer",
},
"tasks": {
"description": "List of task Task IDs. Maximum amount of tasks is 10",
"items": {"description": "List of task Task IDs", "type": "string"},
"type": "array",
},
},
"required": ["tasks"],
"type": "object",
}
def __init__(self, tasks: List[str], samples: Optional[int] = None, key: Any = None, **kwargs: Any) -> None:
super(MultiTaskScalarMetricsIterHistogramRequest, self).__init__(**kwargs)
self.tasks = tasks
self.samples = samples
self.key = key
@schema_property("tasks")
def tasks(self) -> List[str]:
return self._property_tasks
@tasks.setter
def tasks(self, value: List[str]) -> None:
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
@schema_property("samples")
def samples(self) -> Optional[int]:
return self._property_samples
@samples.setter
def samples(self, value: Optional[int]) -> None:
if value is None:
self._property_samples = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "samples", six.integer_types)
self._property_samples = value
@schema_property("key")
def key(self) -> Any:
return self._property_key
@key.setter
def key(self, value: Any) -> None:
if value is None:
self._property_key = None
return
if isinstance(value, six.string_types):
try:
value = ScalarKeyEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "key", enum.Enum)
self._property_key = value
| MultiTaskScalarMetricsIterHistogramRequest |
python | django__django | tests/admin_views/models.py | {
"start": 8306,
"end": 8378
} | class ____(models.Model):
name = models.CharField(max_length=60)
| Media |
python | openai__gym | tests/vector/utils.py | {
"start": 1419,
"end": 2225
} | class ____(gym.Env):
def __init__(self, slow_reset=0.3):
super().__init__()
self.slow_reset = slow_reset
self.observation_space = Box(
low=0, high=255, shape=(HEIGHT, WIDTH, 3), dtype=np.uint8
)
self.action_space = Box(low=0.0, high=1.0, shape=(), dtype=np.float32)
def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None):
super().reset(seed=seed)
if self.slow_reset > 0:
time.sleep(self.slow_reset)
return self.observation_space.sample(), {}
def step(self, action):
time.sleep(action)
observation = self.observation_space.sample()
reward, terminated, truncated = 0.0, False, False
return observation, reward, terminated, truncated, {}
| UnittestSlowEnv |
python | getsentry__sentry | src/sentry/testutils/hybrid_cloud.py | {
"start": 5943,
"end": 6033
} | class ____(TypedDict):
transaction: str | None
queries: list[str]
| TransactionDetails |
python | spyder-ide__spyder | spyder/widgets/collectionseditor.py | {
"start": 25998,
"end": 59934
} | class ____(QTableView, SpyderWidgetMixin):
"""Base collection editor table view"""
CONF_SECTION = 'variable_explorer'
sig_files_dropped = Signal(list)
redirect_stdio = Signal(bool)
sig_free_memory_requested = Signal()
sig_editor_creation_started = Signal()
sig_editor_shown = Signal()
def __init__(self, parent):
super().__init__(parent=parent)
# Main attributes
self.array_filename = None
self.menu = None
self.empty_ws_menu = None
self.paste_action = None
self.copy_action = None
self.edit_action = None
self.plot_action = None
self.hist_action = None
self.imshow_action = None
self.save_array_action = None
self.insert_action = None
self.insert_action_above = None
self.insert_action_below = None
self.remove_action = None
self.minmax_action = None
self.rename_action = None
self.duplicate_action = None
self.view_action = None
self.resize_action = None
self.resize_columns_action = None
self.delegate = None
self.proxy_model = None
self.source_model = None
self.setAcceptDrops(True)
self.automatic_column_width = True
# Headder attributes
self.setHorizontalHeader(BaseHeaderView(parent=self))
self.horizontalHeader().sig_user_resized_section.connect(
self.user_resize_columns)
# There is no need for us to show this header because we're not using
# it to show any information on it.
self.verticalHeader().hide()
# To use mouseMoveEvent
self.setMouseTracking(True)
# Delay editing values for a bit so that when users do a double click
# (the default behavior for editing since Spyder was created; now they
# only have to do a single click), our editor dialogs are focused.
self.__index_clicked = None
self._edit_value_timer = QTimer(self)
self._edit_value_timer.setInterval(100)
self._edit_value_timer.setSingleShot(True)
self._edit_value_timer.timeout.connect(self._edit_value)
# To paint the select row button and check if we are over it
self.hovered_row = -1
self.over_select_row_button = False
def setup_table(self):
"""Setup table"""
self.horizontalHeader().setStretchLastSection(True)
self.horizontalHeader().setSectionsMovable(True)
self.adjust_columns()
# Actions to take when the selection changes
self.selectionModel().selectionChanged.connect(self.refresh_menu)
self.selectionModel().selectionChanged.connect(
# We need this because selected_rows is cached
self.selected_rows.cache_clear
)
def setup_menu(self):
"""Setup actions and context menu"""
self.resize_action = self.create_action(
name=CollectionsEditorActions.ResizeRows,
text=_("Resize rows to contents"),
icon=ima.icon('collapse_row'),
triggered=self.resizeRowsToContents,
register_action=False
)
self.resize_columns_action = self.create_action(
name=CollectionsEditorActions.ResizeColumns,
text=_("Resize columns to contents"),
icon=ima.icon('collapse_column'),
triggered=self.resize_column_contents,
register_action=False
)
self.paste_action = self.create_action(
name=CollectionsEditorActions.ResizeRows,
text=_("Paste"),
icon=ima.icon('editpaste'),
triggered=self.paste,
register_action=False
)
self.copy_action = self.create_action(
name=CollectionsEditorActions.Copy,
text=_("Copy"),
icon=ima.icon('editcopy'),
triggered=self.copy,
register_action=False
)
self.edit_action = self.create_action(
name=CollectionsEditorActions.Edit,
text=_("Edit"),
icon=ima.icon('edit'),
triggered=self.edit_item,
register_action=False
)
self.plot_action = self.create_action(
name=CollectionsEditorActions.Plot,
text=_("Plot"),
icon=ima.icon('plot'),
triggered=lambda: self.plot_item('plot'),
register_action=False
)
self.plot_action.setVisible(False)
self.hist_action = self.create_action(
name=CollectionsEditorActions.Histogram,
text=_("Histogram"),
icon=ima.icon('hist'),
triggered=lambda: self.plot_item('hist'),
register_action=False
)
self.hist_action.setVisible(False)
self.imshow_action = self.create_action(
name=CollectionsEditorActions.ShowImage,
text=_("Show image"),
icon=ima.icon('imshow'),
triggered=self.imshow_item,
register_action=False
)
self.imshow_action.setVisible(False)
self.save_array_action = self.create_action(
name=CollectionsEditorActions.Save,
text=_("Save"),
icon=ima.icon('filesave'),
triggered=self.save_array,
register_action=False
)
self.save_array_action.setVisible(False)
self.insert_action = self.create_action(
name=CollectionsEditorActions.Insert,
text=_("Insert"),
icon=ima.icon('insert'),
triggered=lambda: self.insert_item(below=False),
register_action=False
)
self.insert_action_above = self.create_action(
name=CollectionsEditorActions.InsertAbove,
text=_("Insert above"),
icon=ima.icon('insert_above'),
triggered=lambda: self.insert_item(below=False),
register_action=False
)
self.insert_action_below = self.create_action(
name=CollectionsEditorActions.InsertBelow,
text=_("Insert below"),
icon=ima.icon('insert_below'),
triggered=lambda: self.insert_item(below=True),
register_action=False
)
self.remove_action = self.create_action(
name=CollectionsEditorActions.Remove,
text=_("Remove"),
icon=ima.icon('editdelete'),
triggered=self.remove_item,
register_action=False
)
self.rename_action = self.create_action(
name=CollectionsEditorActions.Rename,
text=_("Rename"),
icon=ima.icon('rename'),
triggered=self.rename_item,
register_action=False
)
self.duplicate_action = self.create_action(
name=CollectionsEditorActions.Duplicate,
text=_("Duplicate"),
icon=ima.icon('edit_add'),
triggered=self.duplicate_item,
register_action=False
)
self.view_action = self.create_action(
name=CollectionsEditorActions.ViewObject,
text=_("View with the Object Explorer"),
icon=ima.icon('outline_explorer'),
triggered=self.view_item,
register_action=False
)
menu = self.create_menu(
CollectionsEditorMenus.Context,
register=False
)
for action in [self.copy_action, self.paste_action, self.rename_action,
self.edit_action, self.save_array_action]:
self.add_item_to_menu(
action,
menu,
section=CollectionsEditorContextMenuSections.Edit
)
for action in [self.insert_action, self.insert_action_above,
self.insert_action_below, self.duplicate_action,
self.remove_action]:
self.add_item_to_menu(
action,
menu,
section=CollectionsEditorContextMenuSections.AddRemove
)
for action in [self.view_action, self.plot_action,
self.hist_action, self.imshow_action]:
self.add_item_to_menu(
action,
menu,
section=CollectionsEditorContextMenuSections.View
)
self.empty_ws_menu = self.create_menu(
CollectionsEditorMenus.ContextIfEmpty,
register=False
)
for action in [self.insert_action, self.paste_action]:
self.add_item_to_menu(action, self.empty_ws_menu)
return menu
# ------ Remote/local API -------------------------------------------------
def remove_values(self, keys):
"""Remove values from data"""
raise NotImplementedError
def copy_value(self, orig_key, new_key):
"""Copy value"""
raise NotImplementedError
def new_value(self, key, value):
"""Create new value in data"""
raise NotImplementedError
def is_list(self, key):
"""Return True if variable is a list, a set or a tuple"""
raise NotImplementedError
def get_len(self, key):
"""Return sequence length"""
raise NotImplementedError
def is_data_frame(self, key):
"""Return True if variable is a pandas dataframe"""
raise NotImplementedError
def is_array(self, key):
"""Return True if variable is a numpy array"""
raise NotImplementedError
def is_image(self, key):
"""Return True if variable is a PIL.Image image"""
raise NotImplementedError
def is_dict(self, key):
"""Return True if variable is a dictionary"""
raise NotImplementedError
def get_array_shape(self, key):
"""Return array's shape"""
raise NotImplementedError
def get_array_ndim(self, key):
"""Return array's ndim"""
raise NotImplementedError
def oedit(self, key):
"""Edit item"""
raise NotImplementedError
def plot(self, key, funcname):
"""Plot item"""
raise NotImplementedError
def imshow(self, key):
"""Show item's image"""
raise NotImplementedError
def show_image(self, key):
"""Show image (item is a PIL image)"""
raise NotImplementedError
#--------------------------------------------------------------------------
def refresh_menu(self):
"""Refresh context menu"""
index = self.currentIndex()
data = self.source_model.get_data()
is_list_instance = isinstance(data, list)
is_dict_instance = isinstance(data, dict)
def indexes_in_same_row():
indexes = self.selectedIndexes()
if len(indexes) > 1:
rows = [idx.row() for idx in indexes]
return len(set(rows)) == 1
else:
return True
# Enable/disable actions
condition_edit = (
(not isinstance(data, (tuple, set, frozenset))) and
index.isValid() and
(len(self.selectedIndexes()) > 0) and
indexes_in_same_row() and
not self.readonly
)
self.edit_action.setEnabled(condition_edit)
self.insert_action_above.setEnabled(condition_edit)
self.insert_action_below.setEnabled(condition_edit)
self.duplicate_action.setEnabled(condition_edit)
self.rename_action.setEnabled(condition_edit)
self.plot_action.setEnabled(condition_edit)
self.hist_action.setEnabled(condition_edit)
self.imshow_action.setEnabled(condition_edit)
self.save_array_action.setEnabled(condition_edit)
condition_select = (
index.isValid() and
(len(self.selectedIndexes()) > 0)
)
self.view_action.setEnabled(
condition_select and indexes_in_same_row())
self.copy_action.setEnabled(condition_select)
condition_remove = (
(not isinstance(data, (tuple, set, frozenset))) and
index.isValid() and
(len(self.selectedIndexes()) > 0) and
not self.readonly
)
self.remove_action.setEnabled(condition_remove)
self.insert_action.setEnabled(
is_dict_instance and not self.readonly)
self.paste_action.setEnabled(
is_dict_instance and not self.readonly)
# Hide/show actions
if index.isValid():
if self.proxy_model:
key = self.proxy_model.get_key(index)
else:
key = self.source_model.get_key(index)
is_list = self.is_list(key)
is_array = self.is_array(key) and self.get_len(key) != 0
is_dataframe = self.is_data_frame(key) and self.get_len(key) != 0
condition_plot = (
is_array and len(self.get_array_shape(key)) <= 2
) or is_dataframe
condition_hist = (is_array and self.get_array_ndim(key) == 1)
condition_imshow = condition_plot and self.get_array_ndim(key) == 2
condition_imshow = condition_imshow or self.is_image(key)
else:
is_array = condition_plot = condition_imshow = is_list \
= condition_hist = False
self.plot_action.setVisible(condition_plot or is_list)
self.hist_action.setVisible(condition_hist or is_list)
self.insert_action.setVisible(is_dict_instance)
self.insert_action_above.setVisible(is_list_instance)
self.insert_action_below.setVisible(is_list_instance)
self.rename_action.setVisible(is_dict_instance)
self.paste_action.setVisible(is_dict_instance)
self.imshow_action.setVisible(condition_imshow)
self.save_array_action.setVisible(is_array)
def resize_column_contents(self):
"""Resize columns to contents."""
self.automatic_column_width = True
self.adjust_columns()
def user_resize_columns(self, logical_index, old_size, new_size):
"""Handle the user resize action."""
self.automatic_column_width = False
def adjust_columns(self):
"""Resize two first columns to contents"""
if self.automatic_column_width:
for col in range(3):
self.resizeColumnToContents(col)
def set_data(self, data):
"""Set table data"""
if data is not None:
self.source_model.set_data(data, self.dictfilter)
self.source_model.reset()
# Sort table using current sort column and order
self.setSortingEnabled(True)
def _edit_value(self):
self.edit(self.__index_clicked)
def _update_hovered_row(self, event):
current_index = self.indexAt(event.pos())
if current_index.isValid():
self.hovered_row = current_index.row()
self.viewport().update()
else:
self.hovered_row = -1
def mousePressEvent(self, event):
"""Reimplement Qt method"""
if event.button() != Qt.LeftButton or self.over_select_row_button:
QTableView.mousePressEvent(self, event)
return
index_clicked = self.indexAt(event.pos())
if index_clicked.isValid():
if (
index_clicked == self.currentIndex()
and index_clicked in self.selectedIndexes()
):
self.clearSelection()
else:
row = index_clicked.row()
# TODO: Remove hard coded "Value" column number (3 here)
self.__index_clicked = self.model().index(row, 3)
# Wait for a bit to edit values so dialogs are focused on
# double clicks. That will preserve the way things worked in
# Spyder 5 for users that are accustomed to do double clicks.
self._edit_value_timer.start()
else:
self.clearSelection()
event.accept()
def mouseDoubleClickEvent(self, event):
"""Reimplement Qt method"""
# Make this event do nothing because variables are now edited with a
# single click.
pass
def mouseMoveEvent(self, event):
"""Actions to take when the mouse moves over the widget."""
self.over_select_row_button = False
self._update_hovered_row(event)
if self.rowAt(event.y()) != -1:
# The +3 here is necessary to avoid mismatches when trying to click
# the button in a position too close to its left border.
select_row_button_width = SELECT_ROW_BUTTON_SIZE + 3
# Include scrollbar width when computing the select row button
# width
if self.verticalScrollBar().isVisible():
select_row_button_width += self.verticalScrollBar().width()
# Decide if the cursor is on top of the select row button
if (self.width() - event.x()) < select_row_button_width:
self.over_select_row_button = True
self.setCursor(Qt.ArrowCursor)
else:
self.setCursor(Qt.PointingHandCursor)
else:
self.setCursor(Qt.ArrowCursor)
def keyPressEvent(self, event):
"""Reimplement Qt methods"""
if event.key() == Qt.Key_Delete:
self.remove_item()
elif event.key() == Qt.Key_F2:
self.rename_item()
elif event == QKeySequence.Copy:
self.copy()
elif event == QKeySequence.Paste:
self.paste()
else:
QTableView.keyPressEvent(self, event)
def contextMenuEvent(self, event):
"""Reimplement Qt method"""
if self.source_model.showndata:
self.refresh_menu()
self.menu.popup(event.globalPos())
event.accept()
else:
self.empty_ws_menu.popup(event.globalPos())
event.accept()
def dragEnterEvent(self, event):
"""Allow user to drag files"""
if mimedata2url(event.mimeData()):
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
"""Allow user to move files"""
if mimedata2url(event.mimeData()):
event.setDropAction(Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
"""Allow user to drop supported files"""
urls = mimedata2url(event.mimeData())
if urls:
event.setDropAction(Qt.CopyAction)
event.accept()
self.sig_files_dropped.emit(urls)
else:
event.ignore()
def leaveEvent(self, event):
"""Actions to take when the mouse leaves the widget."""
self.hovered_row = -1
super().leaveEvent(event)
def wheelEvent(self, event):
"""Actions to take on mouse wheel."""
self._update_hovered_row(event)
super().wheelEvent(event)
def showEvent(self, event):
"""Resize columns when the widget is shown."""
# This is probably the best we can do to adjust the columns width to
# their header contents at startup. However, it doesn't work for all
# fonts and font sizes and perhaps it depends on the user's screen dpi
# as well. See the discussion in
# https://github.com/spyder-ide/spyder/pull/20933#issuecomment-1585474443
# and the comments below for more details.
self.adjust_columns()
super().showEvent(event)
def _deselect_index(self, index):
"""
Deselect index after any operation that adds or removes rows to/from
the editor.
Notes
-----
* This avoids showing the wrong buttons in the editor's toolbar when
the operation is completed.
* Also, if we leave something selected, then the next operation won't
introduce the item in the expected row. That's why we need to force
users to select a row again after this.
"""
self.selectionModel().select(index, QItemSelectionModel.Select)
self.selectionModel().select(index, QItemSelectionModel.Deselect)
@Slot()
def edit_item(self):
"""Edit item"""
index = self.currentIndex()
if not index.isValid():
return
# TODO: Remove hard coded "Value" column number (3 here)
self.edit(self.model().index(index.row(), 3))
@Slot()
def remove_item(self, force=False):
"""Remove item"""
current_index = self.currentIndex()
indexes = self.selectedIndexes()
if not indexes:
return
for index in indexes:
if not index.isValid():
return
if not force:
if not self.get_conf('show_remove_message_collections'):
result = QMessageBox.Yes
else:
one = _("Do you want to remove the selected item?")
more = _("Do you want to remove all selected items?")
answer = MessageCheckBox(
icon=QMessageBox.Question, parent=self
)
answer.set_checkbox_text(_("Don't ask again."))
answer.set_checked(False)
answer.set_check_visible(True)
answer.setText(one if len(indexes) == 1 else more)
answer.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
result = answer.exec_()
check = answer.is_checked()
if check:
self.set_conf('show_remove_message_collections', False)
if force or result == QMessageBox.Yes:
if self.proxy_model:
idx_rows = unsorted_unique(
[self.proxy_model.mapToSource(idx).row()
for idx in indexes])
else:
idx_rows = unsorted_unique([idx.row() for idx in indexes])
keys = [self.source_model.keys[idx_row] for idx_row in idx_rows]
self.remove_values(keys)
# This avoids a segfault in our tests that doesn't happen when
# removing items manually.
if not running_under_pytest():
self._deselect_index(current_index)
def copy_item(self, erase_original=False, new_name=None):
"""Copy item"""
current_index = self.currentIndex()
indexes = self.selectedIndexes()
if not indexes:
return
if self.proxy_model:
idx_rows = unsorted_unique(
[self.proxy_model.mapToSource(idx).row() for idx in indexes])
else:
idx_rows = unsorted_unique([idx.row() for idx in indexes])
if len(idx_rows) > 1 or not indexes[0].isValid():
return
orig_key = self.source_model.keys[idx_rows[0]]
if erase_original:
if not isinstance(orig_key, str):
QMessageBox.warning(
self,
_("Warning"),
_("You can only rename keys that are strings")
)
return
title = _('Rename')
field_text = _('New variable name:')
else:
title = _('Duplicate')
field_text = _('Variable name:')
data = self.source_model.get_data()
if isinstance(data, (list, set, frozenset)):
new_key, valid = len(data), True
elif new_name is not None:
new_key, valid = new_name, True
else:
new_key, valid = QInputDialog.getText(self, title, field_text,
QLineEdit.Normal, orig_key)
if valid and str(new_key):
new_key = try_to_eval(str(new_key))
if new_key == orig_key:
return
self.copy_value(orig_key, new_key)
if erase_original:
self.remove_values([orig_key])
self._deselect_index(current_index)
@Slot()
def duplicate_item(self):
"""Duplicate item"""
self.copy_item()
@Slot()
def rename_item(self, new_name=None):
"""Rename item"""
if isinstance(new_name, bool):
new_name = None
self.copy_item(erase_original=True, new_name=new_name)
@Slot()
def insert_item(self, below=True):
"""Insert item"""
index = self.currentIndex()
if not index.isValid():
row = self.source_model.rowCount()
else:
if self.proxy_model:
if below:
row = self.proxy_model.mapToSource(index).row() + 1
else:
row = self.proxy_model.mapToSource(index).row()
else:
if below:
row = index.row() + 1
else:
row = index.row()
data = self.source_model.get_data()
if isinstance(data, list):
key = row
data.insert(row, '')
elif isinstance(data, dict):
key, valid = QInputDialog.getText(self, _('Insert'), _('Key:'),
QLineEdit.Normal)
if valid and str(key):
key = try_to_eval(str(key))
else:
return
else:
return
value, valid = QInputDialog.getText(self, _('Insert'), _('Value:'),
QLineEdit.Normal)
if valid and str(value):
self.new_value(key, try_to_eval(str(value)))
@Slot()
def view_item(self):
"""View item with the Object Explorer"""
index = self.currentIndex()
if not index.isValid():
return
# TODO: Remove hard coded "Value" column number (3 here)
index = index.model().index(index.row(), 3)
self.delegate.createEditor(self, None, index, object_explorer=True)
def __prepare_plot(self):
try:
import guiqwt.pyplot #analysis:ignore
return True
except:
try:
if 'matplotlib' not in sys.modules:
import matplotlib # noqa
return True
except Exception:
QMessageBox.warning(self, _("Import error"),
_("Please install <b>matplotlib</b>"
" or <b>guiqwt</b>."))
def plot_item(self, funcname):
"""Plot item"""
index = self.currentIndex()
if self.__prepare_plot():
if self.proxy_model:
key = self.source_model.get_key(
self.proxy_model.mapToSource(index))
else:
key = self.source_model.get_key(index)
try:
self.plot(key, funcname)
except (ValueError, TypeError) as error:
QMessageBox.critical(self, _( "Plot"),
_("<b>Unable to plot data.</b>"
"<br><br>Error message:<br>%s"
) % str(error))
@Slot()
def imshow_item(self):
"""Imshow item"""
index = self.currentIndex()
if self.__prepare_plot():
if self.proxy_model:
key = self.source_model.get_key(
self.proxy_model.mapToSource(index))
else:
key = self.source_model.get_key(index)
try:
if self.is_image(key):
self.show_image(key)
else:
self.imshow(key)
except (ValueError, TypeError) as error:
QMessageBox.critical(self, _( "Plot"),
_("<b>Unable to show image.</b>"
"<br><br>Error message:<br>%s"
) % str(error))
@Slot()
def save_array(self):
"""Save array"""
title = _( "Save array")
if self.array_filename is None:
self.array_filename = getcwd_or_home()
self.redirect_stdio.emit(False)
filename, _selfilter = getsavefilename(self, title,
self.array_filename,
_("NumPy arrays") + " (*.npy)")
self.redirect_stdio.emit(True)
if filename:
self.array_filename = filename
data = self.delegate.get_value(self.currentIndex())
try:
import numpy as np
np.save(self.array_filename, data)
except Exception as error:
QMessageBox.critical(self, title,
_("<b>Unable to save array</b>"
"<br><br>Error message:<br>%s"
) % str(error))
@Slot()
def copy(self):
"""
Copy text representation of objects to clipboard.
Notes
-----
For Numpy arrays and dataframes we try to get a better representation
by using their `savetxt` and `to_csv` methods, respectively.
"""
clipboard = QApplication.clipboard()
clipl = []
retrieve_failed = False
array_failed = False
dataframe_failed = False
for idx in self.selectedIndexes():
if not idx.isValid():
continue
# Prevent error when it's not possible to get the object's value
# Fixes spyder-ide/spyder#12913
try:
obj = self.delegate.get_value(idx)
except Exception:
retrieve_failed = True
continue
# Check if we are trying to copy a numpy array, and if so make sure
# to copy the whole thing in a tab separated format
if (isinstance(obj, (np.ndarray, np.ma.MaskedArray)) and
np.ndarray is not FakeObject):
output = io.BytesIO()
try:
np.savetxt(output, obj, delimiter='\t')
except Exception:
array_failed = True
continue
obj = output.getvalue().decode('utf-8')
output.close()
elif (isinstance(obj, (pd.DataFrame, pd.Series)) and
pd.DataFrame is not FakeObject):
output = io.StringIO()
try:
obj.to_csv(output, sep='\t', index=True, header=True)
except Exception:
dataframe_failed = True
continue
obj = output.getvalue()
output.close()
elif isinstance(obj, bytes):
obj = str(obj, 'utf8')
else:
obj = str(obj)
clipl.append(obj)
# Copy to clipboard the final result
clipboard.setText('\n'.join(clipl))
# Show appropriate error messages after we tried to copy all objects
# selected by users.
if retrieve_failed:
QMessageBox.warning(
self.parent(),
_("Warning"),
_(
"It was not possible to retrieve the value of one or more "
"of the variables you selected in order to copy them."
),
)
if array_failed and dataframe_failed:
QMessageBox.warning(
self,
_("Warning"),
_(
"It was not possible to copy one or more of the "
"dataframes and Numpy arrays you selected"
),
)
elif array_failed:
QMessageBox.warning(
self,
_("Warning"),
_(
"It was not possible to copy one or more of the "
"Numpy arrays you selected"
),
)
elif dataframe_failed:
QMessageBox.warning(
self,
_("Warning"),
_(
"It was not possible to copy one or more of the "
"dataframes you selected"
),
)
def import_from_string(self, text, title=None):
"""Import data from string"""
data = self.source_model.get_data()
# Check if data is a dict
if not hasattr(data, "keys"):
return
editor = ImportWizard(
self, text, title=title, contents_title=_("Clipboard contents"),
varname=fix_reference_name("data", blacklist=list(data.keys())))
if editor.exec_():
var_name, clip_data = editor.get_data()
self.new_value(var_name, clip_data)
@Slot()
def paste(self):
"""Import text/data/code from clipboard"""
clipboard = QApplication.clipboard()
cliptext = ''
if clipboard.mimeData().hasText():
cliptext = str(clipboard.text())
if cliptext.strip():
self.import_from_string(cliptext, title=_("Import from clipboard"))
else:
QMessageBox.warning(self, _( "Empty clipboard"),
_("Nothing to be imported from clipboard."))
@lru_cache(maxsize=1)
def selected_rows(self):
"""
Get the rows currently selected.
Notes
-----
The result of this function is cached because it's called in the paint
method of CollectionsDelegate. So, we need it to run as quickly as
possible.
"""
return {
index.row() for index in self.selectionModel().selectedRows()
}
| BaseTableView |
python | pytorch__pytorch | torch/_inductor/cudagraph_utils.py | {
"start": 1460,
"end": 6917
} | class ____:
"""
Represents a function that you want to record for CUDA graph replay,
with a little more metadata so we can identify if we have an applicable
CUDA graph in our CUDA graph tree for it.
"""
model: Callable[..., Any]
static_input_idxs: Sequence[int]
id: FunctionID
constants: tuple[torch.Tensor, ...]
placeholders: Sequence[PlaceholderInfo]
mutated_input_idxs: Sequence[int]
def get_mutating_use_stack_trace_from_node(
placeholder_node: torch.fx.Node,
) -> Optional[str]:
# reinplaced uses might have a single, non-copy_ use
if len(placeholder_node.users) == 1:
return next(iter(placeholder_node.users)).meta.get("stack_trace", None)
for use in placeholder_node.users:
if use.target is torch.ops.aten.copy_.default:
if stack_trace := use.meta.get("stack_trace", None):
return stack_trace
return None
def get_mutating_use_stack_trace(placeholder_info: PlaceholderInfo) -> Optional[str]:
return placeholder_info.mutating_use_stack_trace
def to_placeholder_info(placeholder_node: torch.fx.Node) -> PlaceholderInfo:
name = placeholder_node.name
stack_trace = placeholder_node.meta.get("stack_trace", None)
users = []
mutating_use_stack_trace = None
# Only recurse to users once, since we only care about user's stack traces
if placeholder_node.op == "placeholder":
users = [to_placeholder_info(i) for i in placeholder_node.users]
mutating_use_stack_trace = get_mutating_use_stack_trace_from_node(
placeholder_node
)
return PlaceholderInfo(name, stack_trace, users, mutating_use_stack_trace)
def get_placeholder_info(graph: torch.fx.Graph) -> list[PlaceholderInfo]:
return [
to_placeholder_info(node) for node in graph.nodes if node.op == "placeholder"
]
def format_default_skip_message(reason: str) -> str:
return f"skipping cudagraphs due to {reason}"
def get_mutation_stack_trace(
placeholders: Sequence[PlaceholderInfo],
mutation_indices: Union[AbstractSet[int], Sequence[int]],
) -> str:
stack_trace: Optional[str] = ""
for idx in mutation_indices:
placeholder = placeholders[idx]
if stack_trace := get_mutating_use_stack_trace(placeholder):
break
msg = format_default_skip_message(
f"mutated inputs ({len(mutation_indices)} instances)"
)
if stack_trace:
return f"{msg}. Found from : \n {stack_trace}"
return msg
def check_for_mutation(
func: WrappedFunction,
inputs: list[InputType],
is_cuda_graph_recorded_tensor: Callable[[torch.Tensor], bool],
) -> Optional[str]:
# doesn't work for non-trees because the warmup run would apply mutation twice
if torch._inductor.config.triton.cudagraph_trees:
# checking if mutation is only on parameters/static inputs
mutation_indices: Sequence[int] = [
idx
for idx in func.mutated_input_idxs
if not (
idx in func.static_input_idxs
or is_cuda_graph_recorded_tensor(inputs[idx]) # type: ignore[arg-type]
)
]
else:
mutation_indices = func.mutated_input_idxs
static_inputs_log.debug(
"check mutation static input indices: %s", func.static_input_idxs
)
static_inputs_log.debug("check mutation mutation indices: %s", mutation_indices)
return (
get_mutation_stack_trace(func.placeholders, mutation_indices)
if mutation_indices
else None
)
def _get_use_stack_trace(node: torch.fx.Node) -> Optional[str]:
for use in node.users:
if stack_trace := use.meta.get("stack_trace", None):
return stack_trace
return None
def check_multiple_devices_or_any_cpu_nodes(
device_node_mapping: dict[torch.device, torch.fx.Node],
) -> Optional[str]:
# meta tensors are supported since there is no compute
device_node_mapping.pop(torch.device("meta"), None)
# dynamo cudagraph does not support graph partition
if is_using_cudagraph_partition():
# graph partition supports splitting on cpu op. So we can ignore cpu nodes.
device_node_mapping.pop(torch.device("cpu"), None)
if cpu_node := device_node_mapping.get(torch.device("cpu")):
msg = f"cpu device ({cpu_node.name})"
if stack_trace := _get_use_stack_trace(cpu_node):
return format_default_skip_message(f"{msg}. Found from : \n {stack_trace}")
return format_default_skip_message(msg)
if (
len(device_node_mapping) == 1
and next(iter(device_node_mapping.keys())).type == "cuda"
):
return None
keys_repr = (repr(key) for key in device_node_mapping)
return format_default_skip_message(f"multiple devices: {', '.join(keys_repr)}")
def check_lowering_disable_cudagraph(
device_node_mapping: dict[torch.device, torch.fx.Node],
) -> Optional[str]:
return check_multiple_devices_or_any_cpu_nodes(device_node_mapping)
def log_cudagraph_skip_and_bump_counter(msg: str) -> None:
perf_hint_log.warning(msg)
counters["inductor"]["cudagraph_skips"] += 1
if torch._inductor.config.triton.cudagraph_or_error:
raise RuntimeError(msg)
metrics_context = get_metrics_context()
if metrics_context.in_progress():
metrics_context.set("cudagraph_skip_reason", msg, overwrite=True)
@dataclasses.dataclass
| WrappedFunction |
python | kamyu104__LeetCode-Solutions | Python/subsequences-with-a-unique-middle-mode-i.py | {
"start": 90,
"end": 2692
} | class ____(object):
def subsequencesWithMiddleMode(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def nC2(x):
return x*(x-1)//2
MOD = 10**9+7
result = 0
left = collections.defaultdict(int)
right = collections.defaultdict(int)
for x in nums:
right[x] += 1
left_x_sq = 0 # sum(left[x]^2 for x != v)
right_x_sq = sum(v**2 for v in right.itervalues()) # sum(right[x]^2 for x != v)
left_x_right_x = 0 # sum(left[x]*right[x] for x != v)
left_x_sq_right_x = 0 # sum(left[x]^2*right[x] for x != v)
left_x_right_x_sq = 0 # sum(left[x]*right[x]^2 for x != v)
for i, v in enumerate(nums):
left_x_sq -= left[v]**2
right_x_sq -= right[v]**2
left_x_right_x -= left[v]*right[v]
left_x_sq_right_x -= left[v]**2*right[v]
left_x_right_x_sq -= left[v]*right[v]**2
right[v] -= 1
l, r = i, len(nums)-(i+1)
# all possibles
result += nC2(l)*nC2(r)
# only mid is a
result -= nC2(l-left[v])*nC2(r-right[v])
# bb/a/ac
# sum((left[x]*(left[x]-1)//2)*right[v]*((r-right[v])-right[x]) for x != v)
result -= ((left_x_sq-(l-left[v]))*(r-right[v])-(left_x_sq_right_x-left_x_right_x))*right[v]//2
# ac/a/bb
# sum(left[v]*((l-left[v])-left[x])*(right[x]*(right[x]-1)//2) for x != v)
result -= ((right_x_sq-(r-right[v]))*(l-left[v])-(left_x_right_x_sq-left_x_right_x))*left[v]//2
# ab/a/bc
# sum(left[v]*left[x]*right[x]*((r-right[v])-right[x]) for x != v)
result -= left[v]*left_x_right_x*(r-right[v])-left[v]*left_x_right_x_sq
# bc/a/ab
# sum(left[x]*((l-left[v])-left[x])*right[v]*right[x] for x != v)
result -= right[v]*left_x_right_x*(l-left[v])-right[v]*left_x_sq_right_x
# bb/a/ab
# sum((left[x]*(left[x]-1)//2)*right[v]*right[x] for x != v)
result -= right[v]*(left_x_sq_right_x-left_x_right_x)//2
# ab/a/bb
# sum((right[x]*(right[x]-1)//2)*left[v]*left[x] for x != v)
result -= left[v]*(left_x_right_x_sq-left_x_right_x)//2
left[v] += 1
left_x_sq += left[v]**2
right_x_sq += right[v]**2
left_x_right_x += left[v]*right[v]
left_x_sq_right_x += left[v]**2*right[v]
left_x_right_x_sq += left[v]*right[v]**2
return result % MOD
| Solution |
python | pytorch__pytorch | test/quantization/ao_migration/test_quantization_fx.py | {
"start": 152,
"end": 5661
} | class ____(AOMigrationTestCase):
def test_function_import_quantize_fx(self):
function_list = [
"_check_is_graph_module",
"_swap_ff_with_fxff",
"_fuse_fx",
"QuantizationTracer",
"_prepare_fx",
"_prepare_standalone_module_fx",
"fuse_fx",
"Scope",
"ScopeContextManager",
"prepare_fx",
"prepare_qat_fx",
"_convert_fx",
"convert_fx",
"_convert_standalone_module_fx",
]
self._test_function_import("quantize_fx", function_list)
def test_function_import_fx(self):
function_list = [
"prepare",
"convert",
"fuse",
]
self._test_function_import("fx", function_list)
def test_function_import_fx_graph_module(self):
function_list = [
"FusedGraphModule",
"ObservedGraphModule",
"_is_observed_module",
"ObservedStandaloneGraphModule",
"_is_observed_standalone_module",
"QuantizedGraphModule",
]
self._test_function_import("fx.graph_module", function_list)
def test_function_import_fx_pattern_utils(self):
function_list = [
"QuantizeHandler",
"_register_fusion_pattern",
"get_default_fusion_patterns",
"_register_quant_pattern",
"get_default_quant_patterns",
"get_default_output_activation_post_process_map",
]
self._test_function_import("fx.pattern_utils", function_list)
def test_function_import_fx_equalize(self):
function_list = [
"reshape_scale",
"_InputEqualizationObserver",
"_WeightEqualizationObserver",
"calculate_equalization_scale",
"EqualizationQConfig",
"input_equalization_observer",
"weight_equalization_observer",
"default_equalization_qconfig",
"fused_module_supports_equalization",
"nn_module_supports_equalization",
"node_supports_equalization",
"is_equalization_observer",
"get_op_node_and_weight_eq_obs",
"maybe_get_weight_eq_obs_node",
"maybe_get_next_input_eq_obs",
"maybe_get_next_equalization_scale",
"scale_input_observer",
"scale_weight_node",
"scale_weight_functional",
"clear_weight_quant_obs_node",
"remove_node",
"update_obs_for_equalization",
"convert_eq_obs",
"_convert_equalization_ref",
"get_layer_sqnr_dict",
"get_equalization_qconfig_dict",
]
self._test_function_import("fx._equalize", function_list)
def test_function_import_fx_quantization_patterns(self):
function_list = [
"QuantizeHandler",
"BinaryOpQuantizeHandler",
"CatQuantizeHandler",
"ConvReluQuantizeHandler",
"LinearReLUQuantizeHandler",
"BatchNormQuantizeHandler",
"EmbeddingQuantizeHandler",
"RNNDynamicQuantizeHandler",
"DefaultNodeQuantizeHandler",
"FixedQParamsOpQuantizeHandler",
"CopyNodeQuantizeHandler",
"CustomModuleQuantizeHandler",
"GeneralTensorShapeOpQuantizeHandler",
"StandaloneModuleQuantizeHandler",
]
self._test_function_import(
"fx.quantization_patterns",
function_list,
new_package_name="fx.quantize_handler",
)
def test_function_import_fx_match_utils(self):
function_list = ["_MatchResult", "MatchAllNode", "_is_match", "_find_matches"]
self._test_function_import("fx.match_utils", function_list)
def test_function_import_fx_prepare(self):
function_list = ["prepare"]
self._test_function_import("fx.prepare", function_list)
def test_function_import_fx_convert(self):
function_list = ["convert"]
self._test_function_import("fx.convert", function_list)
def test_function_import_fx_fuse(self):
function_list = ["fuse"]
self._test_function_import("fx.fuse", function_list)
def test_function_import_fx_fusion_patterns(self):
function_list = ["FuseHandler", "DefaultFuseHandler"]
self._test_function_import(
"fx.fusion_patterns",
function_list,
new_package_name="fx.fuse_handler",
)
# we removed matching test for torch.quantization.fx.quantization_types
# old: torch.quantization.fx.quantization_types
# new: torch.ao.quantization.utils
# both are valid, but we'll deprecate the old path in the future
def test_function_import_fx_utils(self):
function_list = [
"get_custom_module_class_keys",
"get_linear_prepack_op_for_dtype",
"get_qconv_prepack_op",
"get_new_attr_name_with_prefix",
"graph_module_from_producer_nodes",
"assert_and_get_unique_device",
"create_getattr_from_value",
"all_node_args_have_no_tensors",
"get_non_observable_arg_indexes_and_types",
"maybe_get_next_module",
]
self._test_function_import("fx.utils", function_list)
if __name__ == "__main__":
raise_on_run_directly("test/test_quantization.py")
| TestAOMigrationQuantizationFx |
python | dask__distributed | distributed/deploy/tests/test_slow_adaptive.py | {
"start": 264,
"end": 2965
} | class ____:
def __init__(self, *args, delay=0, **kwargs):
self.worker = Worker(*args, **kwargs)
self.delay = delay
self.status = None
@property
def address(self):
return self.worker.address
def __await__(self):
async def now():
if self.status is not Status.running:
self.worker.loop.call_later(self.delay, self.worker.start)
self.status = Status.running
return self
return now().__await__()
async def close(self):
await self.worker.close()
self.status = Status.closed
scheduler = {"cls": Scheduler, "options": {"dashboard_address": ":0"}}
@gen_test()
async def test_startup():
start = time()
async with SpecCluster(
scheduler=scheduler,
workers={
0: {"cls": Worker, "options": {}},
1: {"cls": SlowWorker, "options": {"delay": 120}},
2: {"cls": SlowWorker, "options": {"delay": 0}},
},
asynchronous=True,
) as cluster:
assert len(cluster.workers) == len(cluster.worker_spec) == 3
assert time() < start + 60
assert 0 <= len(cluster.scheduler_info["workers"]) <= 2
async with Client(cluster, asynchronous=True) as client:
await client.wait_for_workers(n_workers=2)
@pytest.mark.flaky(reruns=10, reruns_delay=5)
@gen_test()
async def test_scale_up_down():
start = time()
async with SpecCluster(
scheduler=scheduler,
workers={
"slow": {"cls": SlowWorker, "options": {"delay": 5}},
"fast": {"cls": Worker, "options": {}},
},
asynchronous=True,
) as cluster:
cluster.scale(1) # remove a worker, hopefully the one we don't have
await cluster
assert list(cluster.worker_spec) == ["fast"]
cluster.scale(0)
await cluster
assert not cluster.worker_spec
@gen_test()
async def test_adaptive():
start = time()
async with SpecCluster(
scheduler=scheduler,
workers={"fast": {"cls": Worker, "options": {}}},
worker={"cls": SlowWorker, "options": {"delay": 5}},
asynchronous=True,
) as cluster:
cluster.adapt(minimum=1, maximum=4, target_duration="1s", interval="20ms")
async with Client(cluster, asynchronous=True) as client:
futures = client.map(slowinc, range(200), delay=0.1)
while len(cluster.worker_spec) <= 1:
await asyncio.sleep(0.05)
del futures
while len(cluster.worker_spec) > 1:
await asyncio.sleep(0.05)
assert list(cluster.worker_spec) == ["fast"]
| SlowWorker |
python | ansible__ansible | lib/ansible/executor/powershell/module_manifest.py | {
"start": 846,
"end": 1147
} | class ____:
scripts: dict[str, _ScriptInfo] = dataclasses.field(default_factory=dict)
actions: list[_ManifestAction] = dataclasses.field(default_factory=list)
signed_hashlist: list[str] = dataclasses.field(default_factory=list)
@dataclasses.dataclass(frozen=True, kw_only=True)
| _ExecManifest |
python | pennersr__django-allauth | allauth/headless/account/inputs.py | {
"start": 7114,
"end": 8214
} | class ____(inputs.Input):
email = inputs.CharField()
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super().__init__(*args, **kwargs)
def clean_email(self):
self.process = None
email = self.cleaned_data["email"]
validate_email(email)
# Select a database backed email...
try:
return EmailAddress.objects.get_for_user(user=self.user, email=email)
except EmailAddress.DoesNotExist:
pass
# Or, if email verification by code is active, try the pending email
request = context.request
self.process = flows.email_verification_by_code.EmailVerificationProcess.resume(
request
)
if self.process:
email_address = self.process.email_address
if email_address.email.lower() == email.lower() and (
request.user.is_anonymous or email_address.user_id == request.user.pk
):
return email_address
raise get_adapter().validation_error("unknown_email")
| SelectEmailInput |
python | doocs__leetcode | lcci/02.06.Palindrome Linked List/Solution.py | {
"start": 134,
"end": 757
} | class ____:
def isPalindrome(self, head: ListNode) -> bool:
if head is None:
return True
slow, fast = head, head.next
while fast and fast.next:
slow = slow.next
fast = fast.next.next
p = slow.next
slow.next = None
dummy = ListNode()
while p:
next = p.next
p.next = dummy.next
dummy.next = p
p = next
p = dummy.next
while p:
if head.val != p.val:
return False
head = head.next
p = p.next
return True
| Solution |
python | apache__airflow | providers/apache/kafka/tests/unit/apache/kafka/hooks/test_base.py | {
"start": 1228,
"end": 1395
} | class ____(KafkaBaseHook):
def _get_client(self, config):
return config
@pytest.fixture
def hook():
return SomeKafkaHook()
TIMEOUT = 10
| SomeKafkaHook |
python | uqfoundation__dill | dill/_objects.py | {
"start": 2028,
"end": 2226
} | class ____(object):
def _method(self):
pass
# @classmethod
# def _clsmethod(cls): #XXX: test me
# pass
# @staticmethod
# def _static(self): #XXX: test me
# pass
| _newclass |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 267346,
"end": 269424
} | class ____(ExternKernel):
"""
Computes the output size of a slice call, handling the correct semantics in codegen.
We do this for flexible handling for unbacked indices (to not data-dependent error).
Slicing has 4 semantics for indices, i.e. x[start:] could be:
1) start < -x.size(0) -> x[0:] # negative out-of-bounds
2) start in [-x.size(0), 0) -> x[x.size(0) + start:] # negative slicing
3) start in [0, x.size(0)) -> x[start:] # standard slicing
4) start >= x.size(0) -> empty slice # positive out-of-bounds
If the appropriate semantics are known beforehand, the output size is computed based on
the start & end indices. If not (with unbacked indices), a new unbacked symbol is created
to represent the output size, and codegen handles computing the correct case.
"""
def get_reads(self) -> OrderedSet[Dep]:
return OrderedSet()
def should_allocate(self) -> bool:
return False
def __init__(
self,
unbacked_size_symbol: sympy.Symbol,
start: Union[sympy.Symbol, int],
end: Union[sympy.Symbol, int],
step: Union[sympy.Symbol, int],
size: Union[sympy.Symbol, int],
):
super().__init__(None, NoneLayout(device=torch.device("cpu")), [])
# This node codegen
self.unbacked_size_symbol = unbacked_size_symbol
self.start = start
self.end = end
self.step = step
self.size = size
def get_unbacked_symbol_defs(self) -> OrderedSet[sympy.Symbol]:
return OrderedSet([self.unbacked_size_symbol])
@cache_on_self_and_args("DynamicSliceSize")
def get_free_symbol_uses(
self, unbacked_only: bool = False
) -> OrderedSet[sympy.Symbol]:
return get_free_symbols(self.start, unbacked_only).union(
get_free_symbols(self.end, unbacked_only)
)
def codegen(self, wrapper: PythonWrapperCodegen) -> None:
wrapper.codegen_dynamic_slice_size(self)
| DynamicSliceSize |
python | numba__numba | numba/cuda/cudadecl.py | {
"start": 6038,
"end": 6632
} | class ____(ConcreteTemplate):
"""
Supported types from `llvm.ctlz`
[here](http://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#bit-manipulations-intrinics)
"""
key = cuda.clz
cases = [
signature(types.int8, types.int8),
signature(types.int16, types.int16),
signature(types.int32, types.int32),
signature(types.int64, types.int64),
signature(types.uint8, types.uint8),
signature(types.uint16, types.uint16),
signature(types.uint32, types.uint32),
signature(types.uint64, types.uint64),
]
@register
| Cuda_clz |
python | huggingface__transformers | src/transformers/testing_utils.py | {
"start": 57358,
"end": 58918
} | class ____:
"""Create a temporary Hub repository and return its `RepoUrl` object. This is similar to
`tempfile.TemporaryDirectory` and can be used as a context manager. For example:
with TemporaryHubRepo(token=self._token) as temp_repo:
...
Upon exiting the context, the repository and everything contained in it are removed.
Example:
```python
with TemporaryHubRepo(token=self._token) as temp_repo:
model.push_to_hub(tmp_repo.repo_id, token=self._token)
```
"""
def __init__(self, namespace: str | None = None, token: str | None = None) -> None:
self.token = token
with tempfile.TemporaryDirectory() as tmp_dir:
repo_id = Path(tmp_dir).name
if namespace is not None:
repo_id = f"{namespace}/{repo_id}"
self.repo_url = create_repo(repo_id, token=self.token)
def __enter__(self):
return self.repo_url
def __exit__(self, exc, value, tb):
delete_repo(repo_id=self.repo_url.repo_id, token=self.token, missing_ok=True)
@contextlib.contextmanager
# adapted from https://stackoverflow.com/a/64789046/9201239
def ExtendSysPath(path: str | os.PathLike) -> Iterator[None]:
"""
Temporary add given path to `sys.path`.
Usage :
```python
with ExtendSysPath("/path/to/dir"):
mymodule = importlib.import_module("mymodule")
```
"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
| TemporaryHubRepo |
python | doocs__leetcode | solution/0300-0399/0389.Find the Difference/Solution.py | {
"start": 0,
"end": 193
} | class ____:
def findTheDifference(self, s: str, t: str) -> str:
cnt = Counter(s)
for c in t:
cnt[c] -= 1
if cnt[c] < 0:
return c
| Solution |
python | tornadoweb__tornado | tornado/test/httpserver_test.py | {
"start": 11434,
"end": 13908
} | class ____(AsyncHTTPTestCase):
def get_app(self):
return Application(
[
("/echo", EchoHandler),
("/typecheck", TypeCheckHandler),
("//doubleslash", EchoHandler),
("/post_utf8", PostEchoHandler),
("/post_gbk", PostEchoGBKHandler),
]
)
def test_query_string_encoding(self):
response = self.fetch("/echo?foo=%C3%A9")
data = json_decode(response.body)
self.assertEqual(data, {"foo": ["\u00e9"]})
def test_empty_query_string(self):
response = self.fetch("/echo?foo=&foo=")
data = json_decode(response.body)
self.assertEqual(data, {"foo": ["", ""]})
def test_empty_post_parameters(self):
response = self.fetch("/echo", method="POST", body="foo=&bar=")
data = json_decode(response.body)
self.assertEqual(data, {"foo": [""], "bar": [""]})
def test_types(self):
headers = {"Cookie": "foo=bar"}
response = self.fetch("/typecheck?foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch(
"/typecheck", method="POST", body="foo=bar", headers=headers
)
data = json_decode(response.body)
self.assertEqual(data, {})
def test_double_slash(self):
# urlparse.urlsplit (which tornado.httpserver used to use
# incorrectly) would parse paths beginning with "//" as
# protocol-relative urls.
response = self.fetch("//doubleslash")
self.assertEqual(200, response.code)
self.assertEqual(json_decode(response.body), {})
def test_post_encodings(self):
headers = {"Content-Type": "application/x-www-form-urlencoded"}
uni_text = "chinese: \u5f20\u4e09"
for enc in ("utf8", "gbk"):
for quote in (True, False):
with self.subTest(enc=enc, quote=quote):
bin_text = uni_text.encode(enc)
if quote:
bin_text = urllib.parse.quote(bin_text).encode("ascii")
response = self.fetch(
"/post_" + enc,
method="POST",
headers=headers,
body=(b"data=" + bin_text),
)
self.assertEqual(json_decode(response.body), {"echo": uni_text})
| HTTPServerTest |
python | astropy__astropy | astropy/io/fits/hdu/image.py | {
"start": 41118,
"end": 44856
} | class ____(_ImageBaseHDU):
"""
FITS primary HDU class.
"""
_default_name = "PRIMARY"
def __init__(
self,
data=None,
header=None,
do_not_scale_image_data=False,
ignore_blank=False,
uint=True,
scale_back=None,
):
"""
Construct a primary HDU.
Parameters
----------
data : array or ``astropy.io.fits.hdu.base.DELAYED``, optional
The data in the HDU.
header : `~astropy.io.fits.Header`, optional
The header to be used (as a template). If ``header`` is `None`, a
minimal header will be provided.
do_not_scale_image_data : bool, optional
If `True`, image data is not scaled using BSCALE/BZERO values
when read. (default: False)
ignore_blank : bool, optional
If `True`, the BLANK header keyword will be ignored if present.
Otherwise, pixels equal to this value will be replaced with
NaNs. (default: False)
uint : bool, optional
Interpret signed integer data where ``BZERO`` is the
central value and ``BSCALE == 1`` as unsigned integer
data. For example, ``int16`` data with ``BZERO = 32768``
and ``BSCALE = 1`` would be treated as ``uint16`` data.
(default: True)
scale_back : bool, optional
If `True`, when saving changes to a file that contained scaled
image data, restore the data to the original type and reapply the
original BSCALE/BZERO values. This could lead to loss of accuracy
if scaling back to integer values after performing floating point
operations on the data. Pseudo-unsigned integers are automatically
rescaled unless scale_back is explicitly set to `False`.
(default: None)
"""
super().__init__(
data=data,
header=header,
do_not_scale_image_data=do_not_scale_image_data,
uint=uint,
ignore_blank=ignore_blank,
scale_back=scale_back,
)
# insert the keywords EXTEND
if header is None:
dim = self._header["NAXIS"]
if dim == 0:
dim = ""
self._header.set("EXTEND", True, after="NAXIS" + str(dim))
@classmethod
def match_header(cls, header):
card = header.cards[0]
# Due to problems discussed in #5808, we cannot assume the 'GROUPS'
# keyword to be True/False, have to check the value
return (
card.keyword == "SIMPLE"
and ("GROUPS" not in header or header["GROUPS"] is not True)
and card.value
)
def update_header(self):
super().update_header()
# Update the position of the EXTEND keyword if it already exists
if "EXTEND" in self._header:
if len(self._axes):
after = "NAXIS" + str(len(self._axes))
else:
after = "NAXIS"
self._header.set("EXTEND", after=after)
def _verify(self, option="warn"):
errs = super()._verify(option=option)
# Verify location and value of mandatory keywords.
# The EXTEND keyword is only mandatory if the HDU has extensions; this
# condition is checked by the HDUList object. However, if we already
# have an EXTEND keyword check that its position is correct
if "EXTEND" in self._header:
naxis = self._header.get("NAXIS", 0)
self.req_cards(
"EXTEND", naxis + 3, lambda v: isinstance(v, bool), True, option, errs
)
return errs
| PrimaryHDU |
python | kamyu104__LeetCode-Solutions | Python/find-minimum-in-rotated-sorted-array.py | {
"start": 436,
"end": 851
} | class ____(object):
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
left, right = 0, len(nums) - 1
while left < right and nums[left] >= nums[right]:
mid = left + (right - left) / 2
if nums[mid] < nums[left]:
right = mid
else:
left = mid + 1
return nums[left]
| Solution2 |
python | great-expectations__great_expectations | contrib/time_series_expectations/time_series_expectations/generator/monthly_time_series_generator.py | {
"start": 255,
"end": 3268
} | class ____(DailyTimeSeriesGenerator):
"""Generate a monthly time series with trend, seasonality, and outliers."""
def generate_df(
self,
size: Optional[int] = 365 * 3,
start_date: Optional[str] = "2018-01-01",
trend_params: Optional[List[TrendParams]] = None,
weekday_dummy_params: Optional[List[float]] = None,
annual_seasonality_params: Optional[List[Tuple[float, float]]] = None,
holiday_alpha: float = 3.5,
outlier_alpha: float = 2.5,
noise_scale: float = 1.0,
) -> pd.DataFrame:
"""Generate a time series as a pandas dataframe.
Keyword Args:
size: The number of days in the time series.
start_date: The start date of the time series.
trend_params: A list of trend parameters corresponding to cutpoints in the time series.
weekday_dummy_params: A list of weekday dummy parameters. Should be a list of length 7, with each day corresponding to the average difference in the time series on that day.
annual_seasonality_params: A list of annual seasonality parameters used to create a cyclic component in the time series.
holiday_alpha: The alpha parameter for the pareto distribution used to generate holiday effects.
outlier_alpha: The alpha parameter for the pareto distribution used to generate outlier effects.
noise_scale: The scale parameter for the standard deviation of the normal distribution used to generate noise.
Returns:
A pandas dataframe with a date column and a time series column.
Notes:
* Holiday and outlier effects are generated using a pareto distribution. The alpha parameter controls the shape of the distribution. A higher alpha value will result in more extreme holiday and outlier effects.
* Holidays don't correspond to actual holidays. Instead, they are generated by randomly selecting days in the time series.
* Annual seasonality is generated by Fourier series. The number of fourier terms is determined by the length of the annual_seasonality_params list. The first element of each tuple in the list is the amplitude of the sine term, and the second element is the amplitude of the cosine term.
"""
# Start with a daily time series that includes all dates in the target range
df = pd.DataFrame(
{
"ds": pd.date_range(start_date, periods=size * 31, freq="D"),
"y": self._generate_daily_time_series(
size * 31,
trend_params,
weekday_dummy_params,
annual_seasonality_params,
holiday_alpha,
outlier_alpha,
noise_scale,
),
}
)
# Limit to the first of each month
df_sub = df[df.ds.map(lambda x: x.day == 1)]
return df_sub[:size]
| MonthlyTimeSeriesGenerator |
python | pytorch__pytorch | benchmarks/gpt_fast/mixtral_moe_model.py | {
"start": 9179,
"end": 10532
} | class ____(nn.Module):
def __init__(self, dim: int, eps: float = 1e-5):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def _norm(self, x):
return x * torch.rsqrt(torch.mean(x * x, dim=-1, keepdim=True) + self.eps)
def forward(self, x: Tensor) -> Tensor:
output = self._norm(x.float()).type_as(x)
return output * self.weight
def precompute_freqs_cis(seq_len: int, n_elem: int, base: int = 10000) -> Tensor:
freqs = 1.0 / (
base ** (torch.arange(0, n_elem, 2)[: (n_elem // 2)].float() / n_elem)
)
t = torch.arange(seq_len, device=freqs.device)
freqs = torch.outer(t, freqs)
freqs_cis = torch.polar(torch.ones_like(freqs), freqs)
cache = torch.stack([freqs_cis.real, freqs_cis.imag], dim=-1)
return cache.to(dtype=torch.bfloat16)
def apply_rotary_emb(x: Tensor, freqs_cis: Tensor) -> Tensor:
xshaped = x.float().reshape(*x.shape[:-1], -1, 2)
freqs_cis = freqs_cis.view(1, xshaped.size(1), 1, xshaped.size(3), 2)
x_out2 = torch.stack(
[
xshaped[..., 0] * freqs_cis[..., 0] - xshaped[..., 1] * freqs_cis[..., 1],
xshaped[..., 1] * freqs_cis[..., 0] + xshaped[..., 0] * freqs_cis[..., 1],
],
-1,
)
x_out2 = x_out2.flatten(3)
return x_out2.type_as(x)
| RMSNorm |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/_typing.py | {
"start": 3030,
"end": 4966
} | class ____(Protocol):
def __call__(
self, state: InstanceState[Any], passive: PassiveFlag
) -> Any: ...
def is_orm_option(
opt: ExecutableOption,
) -> TypeGuard[ORMOption]:
return not opt._is_core
def is_user_defined_option(
opt: ExecutableOption,
) -> TypeGuard[UserDefinedOption]:
return not opt._is_core and opt._is_user_defined # type: ignore
def is_composite_class(obj: Any) -> bool:
# inlining is_dataclass(obj)
return hasattr(obj, "__composite_values__") or hasattr(
obj, "__dataclass_fields__"
)
if TYPE_CHECKING:
def insp_is_mapper_property(
obj: Any,
) -> TypeGuard[MapperProperty[Any]]: ...
def insp_is_mapper(obj: Any) -> TypeGuard[Mapper[Any]]: ...
def insp_is_aliased_class(obj: Any) -> TypeGuard[AliasedInsp[Any]]: ...
def insp_is_attribute(
obj: InspectionAttr,
) -> TypeGuard[QueryableAttribute[Any]]: ...
def attr_is_internal_proxy(
obj: InspectionAttr,
) -> TypeGuard[QueryableAttribute[Any]]: ...
def prop_is_relationship(
prop: MapperProperty[Any],
) -> TypeGuard[RelationshipProperty[Any]]: ...
def is_collection_impl(
impl: _AttributeImpl,
) -> TypeGuard[_CollectionAttributeImpl]: ...
def is_has_collection_adapter(
impl: _AttributeImpl,
) -> TypeGuard[_HasCollectionAdapter]: ...
else:
insp_is_mapper_property = operator.attrgetter("is_property")
insp_is_mapper = operator.attrgetter("is_mapper")
insp_is_aliased_class = operator.attrgetter("is_aliased_class")
insp_is_attribute = operator.attrgetter("is_attribute")
attr_is_internal_proxy = operator.attrgetter("_is_internal_proxy")
is_collection_impl = operator.attrgetter("collection")
prop_is_relationship = operator.attrgetter("_is_relationship")
is_has_collection_adapter = operator.attrgetter(
"_is_has_collection_adapter"
)
| _LoaderCallable |
python | ipython__ipython | IPython/lib/pretty.py | {
"start": 16242,
"end": 17043
} | class ____:
def __init__(self, *groups):
self.queue = []
for group in groups:
self.enq(group)
def enq(self, group):
depth = group.depth
while depth > len(self.queue) - 1:
self.queue.append([])
self.queue[depth].append(group)
def deq(self):
for stack in self.queue:
for idx, group in enumerate(reversed(stack)):
if group.breakables:
del stack[idx]
group.want_break = True
return group
for group in stack:
group.want_break = True
del stack[:]
def remove(self, group):
try:
self.queue[group.depth].remove(group)
except ValueError:
pass
| GroupQueue |
python | sqlalchemy__sqlalchemy | test/sql/test_types.py | {
"start": 42520,
"end": 51182
} | class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
class MyType(types.TypeDecorator):
impl = String(50)
cache_ok = True
def process_bind_param(self, value, dialect):
return "BIND_IN" + str(value)
def process_result_value(self, value, dialect):
return value + "BIND_OUT"
cls.MyType = MyType
Table("t", metadata, Column("data", String(50)))
def test_insert_round_trip_cast(self, connection):
self._test_insert_round_trip(cast, connection)
def test_insert_round_trip_type_coerce(self, connection):
self._test_insert_round_trip(type_coerce, connection)
def _test_insert_round_trip(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
eq_(
conn.execute(select(coerce_fn(t.c.data, MyType))).fetchall(),
[("BIND_INd1BIND_OUT",)],
)
def test_coerce_from_nulltype_cast(self, connection):
self._test_coerce_from_nulltype(cast, connection)
def test_coerce_from_nulltype_type_coerce(self, connection):
self._test_coerce_from_nulltype(type_coerce, connection)
def _test_coerce_from_nulltype(self, coerce_fn, conn):
MyType = self.MyType
# test coerce from nulltype - e.g. use an object that
# doesn't match to a known type
class MyObj:
def __str__(self):
return "THISISMYOBJ"
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn(MyObj(), MyType)))
eq_(
conn.execute(select(coerce_fn(t.c.data, MyType))).fetchall(),
[("BIND_INTHISISMYOBJBIND_OUT",)],
)
def test_vs_non_coerced_cast(self, connection):
self._test_vs_non_coerced(cast, connection)
def test_vs_non_coerced_type_coerce(self, connection):
self._test_vs_non_coerced(type_coerce, connection)
def _test_vs_non_coerced(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
eq_(
conn.execute(
select(t.c.data, coerce_fn(t.c.data, MyType))
).fetchall(),
[("BIND_INd1", "BIND_INd1BIND_OUT")],
)
def test_vs_non_coerced_alias_cast(self, connection):
self._test_vs_non_coerced_alias(cast, connection)
def test_vs_non_coerced_alias_type_coerce(self, connection):
self._test_vs_non_coerced_alias(type_coerce, connection)
def _test_vs_non_coerced_alias(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
eq_(
conn.execute(
select(t.c.data.label("x"), coerce_fn(t.c.data, MyType))
.alias()
.select()
).fetchall(),
[("BIND_INd1", "BIND_INd1BIND_OUT")],
)
def test_vs_non_coerced_where_cast(self, connection):
self._test_vs_non_coerced_where(cast, connection)
def test_vs_non_coerced_where_type_coerce(self, connection):
self._test_vs_non_coerced_where(type_coerce, connection)
def _test_vs_non_coerced_where(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
# coerce on left side
eq_(
conn.execute(
select(t.c.data, coerce_fn(t.c.data, MyType)).where(
coerce_fn(t.c.data, MyType) == "d1"
)
).fetchall(),
[("BIND_INd1", "BIND_INd1BIND_OUT")],
)
# coerce on right side
eq_(
conn.execute(
select(t.c.data, coerce_fn(t.c.data, MyType)).where(
t.c.data == coerce_fn("d1", MyType)
)
).fetchall(),
[("BIND_INd1", "BIND_INd1BIND_OUT")],
)
def test_coerce_none_cast(self, connection):
self._test_coerce_none(cast, connection)
def test_coerce_none_type_coerce(self, connection):
self._test_coerce_none(type_coerce, connection)
def _test_coerce_none(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
eq_(
conn.execute(
select(t.c.data, coerce_fn(t.c.data, MyType)).where(
t.c.data == coerce_fn(None, MyType)
)
).fetchall(),
[],
)
eq_(
conn.execute(
select(t.c.data, coerce_fn(t.c.data, MyType)).where(
coerce_fn(t.c.data, MyType) == None
)
).fetchall(), # noqa
[],
)
def test_resolve_clause_element_cast(self, connection):
self._test_resolve_clause_element(cast, connection)
def test_resolve_clause_element_type_coerce(self, connection):
self._test_resolve_clause_element(type_coerce, connection)
def _test_resolve_clause_element(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
class MyFoob:
def __clause_element__(self):
return t.c.data
eq_(
conn.execute(
select(t.c.data, coerce_fn(MyFoob(), MyType))
).fetchall(),
[("BIND_INd1", "BIND_INd1BIND_OUT")],
)
def test_cast_replace_col_w_bind(self, connection):
self._test_replace_col_w_bind(cast, connection)
def test_type_coerce_replace_col_w_bind(self, connection):
self._test_replace_col_w_bind(type_coerce, connection)
def _test_replace_col_w_bind(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
stmt = select(t.c.data, coerce_fn(t.c.data, MyType))
def col_to_bind(col):
if col is t.c.data:
return bindparam(None, "x", type_=col.type, unique=True)
return None
# ensure we evaluate the expression so that we can see
# the clone resets this info
stmt.compile()
new_stmt = visitors.replacement_traverse(stmt, {}, col_to_bind)
# original statement
eq_(
conn.execute(stmt).fetchall(),
[("BIND_INd1", "BIND_INd1BIND_OUT")],
)
# replaced with binds; CAST can't affect the bound parameter
# on the way in here
eq_(
conn.execute(new_stmt).fetchall(),
(
[("x", "BIND_INxBIND_OUT")]
if coerce_fn is type_coerce
else [("x", "xBIND_OUT")]
),
)
def test_cast_bind(self, connection):
self._test_bind(cast, connection)
def test_type_bind(self, connection):
self._test_bind(type_coerce, connection)
def _test_bind(self, coerce_fn, conn):
MyType = self.MyType
t = self.tables.t
conn.execute(t.insert().values(data=coerce_fn("d1", MyType)))
stmt = select(
bindparam(None, "x", String(50), unique=True),
coerce_fn(bindparam(None, "x", String(50), unique=True), MyType),
)
eq_(
conn.execute(stmt).fetchall(),
(
[("x", "BIND_INxBIND_OUT")]
if coerce_fn is type_coerce
else [("x", "xBIND_OUT")]
),
)
def test_cast_existing_typed(self, connection):
MyType = self.MyType
coerce_fn = cast
# when cast() is given an already typed value,
# the type does not take effect on the value itself.
eq_(
connection.scalar(select(coerce_fn(literal("d1"), MyType))),
"d1BIND_OUT",
)
def test_type_coerce_existing_typed(self, connection):
MyType = self.MyType
coerce_fn = type_coerce
t = self.tables.t
# type_coerce does upgrade the given expression to the
# given type.
connection.execute(
t.insert().values(data=coerce_fn(literal("d1"), MyType))
)
eq_(
connection.execute(select(coerce_fn(t.c.data, MyType))).fetchall(),
[("BIND_INd1BIND_OUT",)],
)
| TypeCoerceCastTest |
python | python__mypy | mypy/find_sources.py | {
"start": 445,
"end": 3030
} | class ____(Exception):
"""Exception indicating a problem in the list of sources given to mypy."""
def create_source_list(
paths: Sequence[str],
options: Options,
fscache: FileSystemCache | None = None,
allow_empty_dir: bool = False,
) -> list[BuildSource]:
"""From a list of source files/directories, makes a list of BuildSources.
Raises InvalidSourceList on errors.
"""
fscache = fscache or FileSystemCache()
finder = SourceFinder(fscache, options)
sources = []
for path in paths:
path = os.path.normpath(path)
if path.endswith(PY_EXTENSIONS):
# Can raise InvalidSourceList if a directory doesn't have a valid module name.
name, base_dir = finder.crawl_up(path)
sources.append(BuildSource(path, name, None, base_dir))
elif fscache.isdir(path):
sub_sources = finder.find_sources_in_dir(path)
if not sub_sources and not allow_empty_dir:
raise InvalidSourceList(f"There are no .py[i] files in directory '{path}'")
sources.extend(sub_sources)
else:
mod = os.path.basename(path) if options.scripts_are_modules else None
sources.append(BuildSource(path, mod, None))
return sources
def keyfunc(name: str) -> tuple[bool, int, str]:
"""Determines sort order for directory listing.
The desirable properties are:
1) foo < foo.pyi < foo.py
2) __init__.py[i] < foo
"""
base, suffix = os.path.splitext(name)
for i, ext in enumerate(PY_EXTENSIONS):
if suffix == ext:
return (base != "__init__", i, base)
return (base != "__init__", -1, name)
def normalise_package_base(root: str) -> str:
if not root:
root = os.curdir
root = os.path.abspath(root)
if root.endswith(os.sep):
root = root[:-1]
return root
def get_explicit_package_bases(options: Options) -> list[str] | None:
"""Returns explicit package bases to use if the option is enabled, or None if disabled.
We currently use MYPYPATH and the current directory as the package bases. In the future,
when --namespace-packages is the default could also use the values passed with the
--package-root flag, see #9632.
Values returned are normalised so we can use simple string comparisons in
SourceFinder.is_explicit_package_base
"""
if not options.explicit_package_bases:
return None
roots = mypy_path() + options.mypy_path + [os.getcwd()]
return [normalise_package_base(root) for root in roots]
| InvalidSourceList |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.