language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ansible__ansible | lib/ansible/plugins/connection/winrm.py | {
"start": 9078,
"end": 38390
} | class ____(ConnectionBase):
"""WinRM connections over HTTP/HTTPS."""
transport = 'winrm'
module_implementation_preferences = ('.ps1', '.exe', '')
allow_executable = False
has_pipelining = True
allow_extras = True
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
self.always_pipeline_modules = True
self.has_native_async = True
self.protocol: winrm.Protocol | None = None
self.shell_id: str | None = None
self.delegate = None
self._shell: PowerShellBase
self._shell_type = 'powershell'
super(Connection, self).__init__(*args, **kwargs)
if not C.DEFAULT_DEBUG:
logging.getLogger('requests_credssp').setLevel(logging.INFO)
logging.getLogger('requests_kerberos').setLevel(logging.INFO)
logging.getLogger('urllib3').setLevel(logging.INFO)
def _build_winrm_kwargs(self) -> None:
# this used to be in set_options, as win_reboot needs to be able to
# override the conn timeout, we need to be able to build the args
# after setting individual options. This is called by _connect before
# starting the WinRM connection
self._winrm_host = self.get_option('remote_addr')
self._winrm_user = self.get_option('remote_user')
self._winrm_pass = self.get_option('remote_password')
self._winrm_port = self.get_option('port')
self._winrm_scheme = self.get_option('scheme')
# old behaviour, scheme should default to http if not set and the port
# is 5985 otherwise https
if self._winrm_scheme is None:
self._winrm_scheme = 'http' if self._winrm_port == 5985 else 'https'
self._winrm_path = self.get_option('path')
self._kinit_cmd = self.get_option('kerberos_command')
self._winrm_transport = self.get_option('transport')
self._winrm_connection_timeout = self.get_option('connection_timeout')
if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES)
else:
# for legacy versions of pywinrm, use the values we know are supported
self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos'])
# calculate transport if needed
if self._winrm_transport is None or self._winrm_transport[0] is None:
# TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
transport_selector = ['ssl'] if self._winrm_scheme == 'https' else ['plaintext']
if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
self._winrm_transport = ['kerberos'] + transport_selector
else:
self._winrm_transport = transport_selector
unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes)
if unsupported_transports:
raise AnsibleError('The installed version of WinRM does not support transport(s) %s' %
to_native(list(unsupported_transports), nonstring='simplerepr'))
# if kerberos is among our transports and there's a password specified, we're managing the tickets
kinit_mode = self.get_option('kerberos_mode')
if kinit_mode is None:
# HACK: ideally, remove multi-transport stuff
self._kerb_managed = "kerberos" in self._winrm_transport and (self._winrm_pass is not None and self._winrm_pass != "")
elif kinit_mode == "managed":
self._kerb_managed = True
elif kinit_mode == "manual":
self._kerb_managed = False
# arg names we're going passing directly
internal_kwarg_mask = {'self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd'}
self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass)
argspec = getfullargspec(Protocol.__init__)
supported_winrm_args = set(argspec.args)
supported_winrm_args.update(internal_kwarg_mask)
passed_winrm_args = {v.replace('ansible_winrm_', '') for v in self.get_option('_extras')}
unsupported_args = passed_winrm_args.difference(supported_winrm_args)
# warn for kwargs unsupported by the installed version of pywinrm
for arg in unsupported_args:
display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg))
# pass through matching extras, excluding the list we want to treat specially
for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args):
self._winrm_kwargs[arg] = self.get_option('_extras')['ansible_winrm_%s' % arg]
# Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection
# auth itself with a private CCACHE.
def _kerb_auth(self, principal: str, password: str) -> None:
if password is None:
password = ""
b_password = to_bytes(password, encoding='utf-8', errors='surrogate_or_strict')
self._kerb_ccache = tempfile.NamedTemporaryFile()
display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name)
krb5ccname = "FILE:%s" % self._kerb_ccache.name
os.environ["KRB5CCNAME"] = krb5ccname
krb5env = dict(PATH=os.environ["PATH"], KRB5CCNAME=krb5ccname)
# Add any explicit environment vars into the krb5env block
kinit_env_vars = self.get_option('kinit_env_vars')
for var in kinit_env_vars:
if var not in krb5env and var in os.environ:
krb5env[var] = os.environ[var]
# Stores various flags to call with kinit, these could be explicit args set by 'ansible_winrm_kinit_args' OR
# '-f' if kerberos delegation is requested (ansible_winrm_kerberos_delegation).
kinit_cmdline = [self._kinit_cmd]
kinit_args = self.get_option('kinit_args')
if kinit_args:
kinit_args = [to_text(a) for a in shlex.split(kinit_args) if a.strip()]
kinit_cmdline.extend(kinit_args)
elif boolean(self.get_option('_extras').get('ansible_winrm_kerberos_delegation', False)):
kinit_cmdline.append('-f')
kinit_cmdline.append(principal)
display.vvvv(f"calling kinit for principal {principal}")
# It is important to use start_new_session which spawns the process
# with setsid() to avoid it inheriting the current tty. On macOS it
# will force it to read from stdin rather than the tty.
try:
p = subprocess.Popen(
kinit_cmdline,
start_new_session=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=krb5env,
)
except OSError as err:
err_msg = "Kerberos auth failure when calling kinit cmd " \
"'%s': %s" % (self._kinit_cmd, to_native(err))
raise AnsibleConnectionFailure(err_msg)
stdout, stderr = p.communicate(b_password + b'\n')
rc = p.returncode
if rc != 0:
# one last attempt at making sure the password does not exist
# in the output
exp_msg = to_native(stderr.strip())
exp_msg = exp_msg.replace(to_native(password), "<redacted>")
err_msg = f"Kerberos auth failure for principal {principal}: {exp_msg}"
raise AnsibleConnectionFailure(err_msg)
display.vvvvv("kinit succeeded for principal %s" % principal)
def _winrm_connect(self) -> winrm.Protocol:
"""
Establish a WinRM connection over HTTP/HTTPS.
"""
display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
(self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
winrm_host = self._winrm_host
if HAS_IPADDRESS:
display.debug("checking if winrm_host %s is an IPv6 address" % winrm_host)
try:
ipaddress.IPv6Address(winrm_host)
except ipaddress.AddressValueError:
pass
else:
winrm_host = "[%s]" % winrm_host
netloc = '%s:%d' % (winrm_host, self._winrm_port)
endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
errors = []
for transport in self._winrm_transport:
if transport == 'kerberos':
if not HAVE_KERBEROS:
errors.append('kerberos: the python kerberos library is not installed')
continue
if self._kerb_managed:
self._kerb_auth(self._winrm_user, self._winrm_pass)
display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
try:
winrm_kwargs = self._winrm_kwargs.copy()
if self._winrm_connection_timeout:
winrm_kwargs['operation_timeout_sec'] = self._winrm_connection_timeout
winrm_kwargs['read_timeout_sec'] = self._winrm_connection_timeout + 10
protocol = Protocol(endpoint, transport=transport, **winrm_kwargs)
# open the shell from connect so we know we're able to talk to the server
if not self.shell_id:
self.shell_id = protocol.open_shell(codepage=65001) # UTF-8
display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host)
return protocol
except Exception as e:
err_msg = to_text(e).strip()
if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I):
raise AnsibleError('the connection attempt timed out')
m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
err_msg = 'the specified credentials were rejected by the server'
elif code == 411:
return protocol
errors.append(u'%s: %s' % (transport, err_msg))
display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host)
if errors:
raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
else:
raise AnsibleError('No transport found for WinRM connection')
def _winrm_write_stdin(self, command_id: str, stdin_iterator: t.Iterable[tuple[bytes, bool]]) -> None:
for (data, is_last) in stdin_iterator:
for attempt in range(1, 4):
try:
self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last)
except WinRMOperationTimeoutError:
# A WSMan OperationTimeout can be received for a Send
# operation when the server is under severe load. On manual
# testing the input is still processed and it's safe to
# continue. As the calling method still tries to wait for
# the proc to end if this failed it shouldn't hurt to just
# treat this as a warning.
display.warning(
"WSMan OperationTimeout during send input, attempting to continue. "
"If this continues to occur, try increasing the connection_timeout "
"value for this host."
)
if not is_last:
time.sleep(5)
except WinRMError as e:
# Error 170 == ERROR_BUSY. This could be the result of a
# timed out Send from above still being processed on the
# server. Add a 5 second delay and try up to 3 times before
# fully giving up.
# pywinrm does not expose the internal WSMan fault details
# through an actual object but embeds it as a repr.
if attempt == 3 or "'wsmanfault_code': '170'" not in str(e):
raise
display.warning(f"WSMan send failed on attempt {attempt} as the command is busy, trying to send data again")
time.sleep(5)
continue
break
def _winrm_send_input(self, protocol: winrm.Protocol, shell_id: str, command_id: str, stdin: bytes, eof: bool = False) -> None:
rq = {'env:Envelope': protocol._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send',
shell_id=shell_id)}
stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\
.setdefault('rsp:Stream', {})
stream['@Name'] = 'stdin'
stream['@CommandId'] = command_id
stream['#text'] = base64.b64encode(to_bytes(stdin))
if eof:
stream['@End'] = 'true'
protocol.send_message(xmltodict.unparse(rq))
def _winrm_get_raw_command_output(
self,
protocol: winrm.Protocol,
shell_id: str,
command_id: str,
) -> tuple[bytes, bytes, int, bool]:
rq = {'env:Envelope': protocol._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Receive',
shell_id=shell_id)}
stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Receive', {})\
.setdefault('rsp:DesiredStream', {})
stream['@CommandId'] = command_id
stream['#text'] = 'stdout stderr'
res = protocol.send_message(xmltodict.unparse(rq))
root = ET.fromstring(res)
stream_nodes = [
node for node in root.findall('.//*')
if node.tag.endswith('Stream')]
stdout = []
stderr = []
return_code = -1
for stream_node in stream_nodes:
if not stream_node.text:
continue
if stream_node.attrib['Name'] == 'stdout':
stdout.append(base64.b64decode(stream_node.text.encode('ascii')))
elif stream_node.attrib['Name'] == 'stderr':
stderr.append(base64.b64decode(stream_node.text.encode('ascii')))
command_done = len([
node for node in root.findall('.//*')
if node.get('State', '').endswith('CommandState/Done')]) == 1
if command_done:
return_code = int(
next(node for node in root.findall('.//*')
if node.tag.endswith('ExitCode')).text)
return b"".join(stdout), b"".join(stderr), return_code, command_done
def _winrm_get_command_output(
self,
protocol: winrm.Protocol,
shell_id: str,
command_id: str,
try_once: bool = False,
) -> tuple[bytes, bytes, int]:
stdout_buffer, stderr_buffer = [], []
command_done = False
return_code = -1
while not command_done:
try:
stdout, stderr, return_code, command_done = \
self._winrm_get_raw_command_output(protocol, shell_id, command_id)
stdout_buffer.append(stdout)
stderr_buffer.append(stderr)
# If we were able to get output at least once then we should be
# able to get the rest.
try_once = False
except WinRMOperationTimeoutError:
# This is an expected error when waiting for a long-running process,
# just silently retry if we haven't been set to do one attempt.
if try_once:
break
continue
return b''.join(stdout_buffer), b''.join(stderr_buffer), return_code
def _winrm_exec(
self,
command: str,
args: t.Iterable[bytes | str] = (),
from_exec: bool = False,
stdin_iterator: t.Iterable[tuple[bytes, bool]] = None,
) -> tuple[int, bytes, bytes]:
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
if from_exec:
display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
else:
display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
command_id = None
try:
stdin_push_failed = False
command_id = self._winrm_run_command(
to_bytes(command),
tuple(map(to_bytes, args)),
console_mode_stdin=(stdin_iterator is None),
)
try:
if stdin_iterator:
self._winrm_write_stdin(command_id, stdin_iterator)
except Exception as ex:
display.error_as_warning("ERROR DURING WINRM SEND INPUT. Attempting to recover.", ex)
stdin_push_failed = True
# Even on a failure above we try at least once to get the output
# in case the stdin was actually written and it an normally.
b_stdout, b_stderr, rc = self._winrm_get_command_output(
self.protocol,
self.shell_id,
command_id,
try_once=stdin_push_failed,
)
stdout = to_text(b_stdout)
stderr = to_text(b_stderr)
if from_exec:
display.vvvvv('WINRM RESULT <Response code %d, out %r, err %r>' % (rc, stdout, stderr), host=self._winrm_host)
display.vvvvvv('WINRM RC %d' % rc, host=self._winrm_host)
display.vvvvvv('WINRM STDOUT %s' % stdout, host=self._winrm_host)
display.vvvvvv('WINRM STDERR %s' % stderr, host=self._winrm_host)
# This is done after logging so we can still see the raw stderr for
# debugging purposes.
if b_stderr.startswith(b"#< CLIXML"):
b_stderr = _parse_clixml(b_stderr)
stderr = to_text(stderr)
if stdin_push_failed:
# There are cases where the stdin input failed but the WinRM service still processed it. We attempt to
# see if stdout contains a valid json return value so we can ignore this error
try:
filtered_output, dummy = _filter_non_json_lines(stdout)
json.loads(filtered_output)
except ValueError:
# stdout does not contain a return response, stdin input was a fatal error
raise AnsibleError(f'winrm send_input failed; \nstdout: {stdout}\nstderr {stderr}')
return rc, b_stdout, b_stderr
except requests.exceptions.Timeout as exc:
raise AnsibleConnectionFailure('winrm connection error: %s' % to_native(exc))
finally:
if command_id:
# Due to a bug in how pywinrm works with message encryption we
# ignore a 400 error which can occur when a task timeout is
# set and the code tries to clean up the command. This happens
# as the cleanup msg is sent over a new socket but still uses
# the already encrypted payload bound to the other socket
# causing the server to reply with 400 Bad Request.
try:
self.protocol.cleanup_command(self.shell_id, command_id)
except WinRMTransportError as e:
if e.code != 400:
raise
display.warning("Failed to cleanup running WinRM command, resources might still be in use on the target server")
def _winrm_run_command(
self,
command: bytes,
args: tuple[bytes, ...],
console_mode_stdin: bool = False,
) -> str:
"""Starts a command with handling when the WSMan quota is exceeded."""
try:
return self.protocol.run_command(
self.shell_id,
command,
args,
console_mode_stdin=console_mode_stdin,
)
except WSManFaultError as fault_error:
if fault_error.wmierror_code != 0x803381A6:
raise
# 0x803381A6 == ERROR_WSMAN_QUOTA_MAX_OPERATIONS
# WinRS does not decrement the operation count for commands,
# only way to avoid this is to re-create the shell. This is
# important for action plugins that might be running multiple
# processes in the same connection.
display.vvvvv("Shell operation quota exceeded, re-creating shell", host=self._winrm_host)
self.close()
self._connect()
return self.protocol.run_command(
self.shell_id,
command,
args,
console_mode_stdin=console_mode_stdin,
)
def _connect(self) -> Connection:
if not HAS_WINRM:
raise AnsibleError("winrm or requests is not installed: %s" % to_native(WINRM_IMPORT_ERR))
elif not HAS_XMLTODICT:
raise AnsibleError("xmltodict is not installed: %s" % to_native(XMLTODICT_IMPORT_ERR))
super(Connection, self)._connect()
if not self.protocol:
self._build_winrm_kwargs() # build the kwargs from the options set
self.protocol = self._winrm_connect()
self._connected = True
return self
def reset(self) -> None:
if not self._connected:
return
self.protocol = None
self.shell_id = None
self._connect()
def _wrapper_payload_stream(self, payload: bytes, buffer_size: int = 200000) -> t.Iterable[tuple[bytes, bool]]:
payload_bytes = to_bytes(payload)
byte_count = len(payload_bytes)
for i in range(0, byte_count, buffer_size):
yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count
def exec_command(self, cmd: str, in_data: bytes | None = None, sudoable: bool = True) -> tuple[int, bytes, bytes]:
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
encoded_prefix = self._shell._encode_script('', as_list=False, strict_mode=False, preserve_rc=False)
if cmd.startswith(encoded_prefix) or cmd.startswith("type "):
# Avoid double encoding the script, the first means we are already
# running the standard PowerShell command, the latter is used for
# the no pipeline case where it uses type to pipe the script into
# powershell which is known to work without re-encoding as pwsh.
cmd_parts = cmd.split(" ")
else:
cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)
# TODO: display something meaningful here
display.vvv("EXEC (via pipeline wrapper)")
stdin_iterator = None
if in_data:
stdin_iterator = self._wrapper_payload_stream(in_data)
return self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator)
# FUTURE: determine buffer size at runtime via remote winrm config?
def _put_file_stdin_iterator(
self,
initial_stdin: bytes,
in_path: str,
out_path: str,
buffer_size: int = 250000,
) -> t.Iterable[tuple[bytes, bool]]:
yield initial_stdin, False
in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict'))
offset = 0
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
for out_data in iter((lambda: in_file.read(buffer_size)), b''):
offset += len(out_data)
self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
# yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded
b64_data = base64.b64encode(out_data) + b'\r\n'
# cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal
yield b64_data, (in_file.tell() == in_size)
if offset == 0: # empty file, return an empty buffer + eof to close it
yield b"", True
def put_file(self, in_path: str, out_path: str) -> None:
super(Connection, self).put_file(in_path, out_path)
display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % to_native(in_path))
copy_script, copy_script_stdin = _bootstrap_powershell_script('winrm_put_file.ps1', {
'Path': out_path,
}, has_input=True)
cmd_parts = self._shell._encode_script(copy_script, as_list=True, strict_mode=False, preserve_rc=False)
status_code, b_stdout, b_stderr = self._winrm_exec(
cmd_parts[0],
cmd_parts[1:],
stdin_iterator=self._put_file_stdin_iterator(copy_script_stdin, in_path, out_path),
)
stdout = to_text(b_stdout)
stderr = to_text(b_stderr)
if status_code != 0:
raise AnsibleError(stderr)
try:
put_output = json.loads(stdout)
except ValueError:
# stdout does not contain a valid response
raise AnsibleError('winrm put_file failed; \nstdout: %s\nstderr %s' % (stdout, stderr))
remote_sha1 = put_output.get("sha1")
if not remote_sha1:
raise AnsibleError("Remote sha1 was not returned")
local_sha1 = secure_hash(in_path)
if not remote_sha1 == local_sha1:
raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1)))
def fetch_file(self, in_path: str, out_path: str) -> None:
super(Connection, self).fetch_file(in_path, out_path)
out_path = out_path.replace('\\', '/')
# consistent with other connection plugins, we assume the caller has created the target dir
display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
buffer_size = 2**19 # 0.5MB chunks
out_file = None
try:
offset = 0
while True:
try:
script, in_data = _bootstrap_powershell_script('winrm_fetch_file.ps1', {
'Path': in_path,
'BufferSize': buffer_size,
'Offset': offset,
})
display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False)
status_code, b_stdout, b_stderr = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._wrapper_payload_stream(in_data))
stdout = to_text(b_stdout)
stderr = to_text(b_stderr)
if status_code != 0:
raise OSError(stderr)
if stdout.strip() == '[DIR]':
data = None
else:
data = base64.b64decode(stdout.strip())
if data is None:
break
else:
if not out_file:
# If out_path is a directory and we're expecting a file, bail out now.
if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')):
break
out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb')
out_file.write(data)
if len(data) < buffer_size:
break
offset += len(data)
except Exception:
traceback.print_exc()
raise AnsibleError('failed to transfer file to "%s"' % to_native(out_path))
finally:
if out_file:
out_file.close()
def close(self) -> None:
if self.protocol and self.shell_id:
display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host)
self.protocol.close_shell(self.shell_id)
self.shell_id = None
self.protocol = None
self._connected = False
| Connection |
python | spyder-ide__spyder | spyder/plugins/editor/widgets/status.py | {
"start": 848,
"end": 1287
} | class ____(StatusBarWidget):
"""Status bar widget for the current file end of line."""
ID = "eol_status"
def update_eol(self, os_name):
"""Update end of line status."""
os_name = str(os_name)
value = {"nt": "CRLF", "posix": "LF"}.get(os_name, "CR")
self.set_value(value)
def get_tooltip(self):
"""Return localized tool tip for widget."""
return _("File EOL Status")
| EOLStatus |
python | scrapy__scrapy | tests/test_utils_iterators.py | {
"start": 10448,
"end": 13486
} | class ____(TestXmliterBase):
def xmliter(
self, obj: Response | str | bytes, nodename: str, *args: Any
) -> Iterator[Selector]:
return xmliter_lxml(obj, nodename, *args)
def test_xmliter_iterate_namespace(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns="http://base.google.com/ns/1.0">
<channel>
<title>My Dummy Company</title>
<link>http://www.mydummycompany.com</link>
<description>This is a dummy company. We do nothing.</description>
<item>
<title>Item 1</title>
<description>This is item 1</description>
<link>http://www.mydummycompany.com/items/1</link>
<image_link>http://www.mydummycompany.com/images/item1.jpg</image_link>
<image_link>http://www.mydummycompany.com/images/item2.jpg</image_link>
</item>
</channel>
</rss>
"""
response = XmlResponse(url="http://mydummycompany.com", body=body)
no_namespace_iter = self.xmliter(response, "image_link")
assert len(list(no_namespace_iter)) == 0
namespace_iter = self.xmliter(
response, "image_link", "http://base.google.com/ns/1.0"
)
node = next(namespace_iter)
assert node.xpath("text()").getall() == [
"http://www.mydummycompany.com/images/item1.jpg"
]
node = next(namespace_iter)
assert node.xpath("text()").getall() == [
"http://www.mydummycompany.com/images/item2.jpg"
]
def test_xmliter_namespaces_prefix(self):
body = b"""
<?xml version="1.0" encoding="UTF-8"?>
<root>
<h:table xmlns:h="http://www.w3.org/TR/html4/">
<h:tr>
<h:td>Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="http://www.w3schools.com/furniture">
<f:name>African Coffee Table</f:name>
<f:width>80</f:width>
<f:length>120</f:length>
</f:table>
</root>
"""
response = XmlResponse(url="http://mydummycompany.com", body=body)
my_iter = self.xmliter(response, "table", "http://www.w3.org/TR/html4/", "h")
node = next(my_iter)
assert len(node.xpath("h:tr/h:td").getall()) == 2
assert node.xpath("h:tr/h:td[1]/text()").getall() == ["Apples"]
assert node.xpath("h:tr/h:td[2]/text()").getall() == ["Bananas"]
my_iter = self.xmliter(
response, "table", "http://www.w3schools.com/furniture", "f"
)
node = next(my_iter)
assert node.xpath("f:name/text()").getall() == ["African Coffee Table"]
def test_xmliter_objtype_exception(self):
i = self.xmliter(42, "product")
with pytest.raises(TypeError):
next(i)
| TestLxmlXmliter |
python | apache__airflow | providers/google/tests/unit/google/cloud/transfers/test_bigquery_to_mssql.py | {
"start": 1694,
"end": 8018
} | class ____:
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_mssql.BigQueryTableLink")
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_sql.BigQueryHook")
def test_execute_good_request_to_bq(self, mock_hook, mock_link):
destination_table = "table"
operator = BigQueryToMsSqlOperator(
task_id=TASK_ID,
source_project_dataset_table=f"{TEST_PROJECT}.{TEST_DATASET}.{TEST_TABLE_ID}",
target_table_name=destination_table,
replace=False,
)
operator.execute(None)
mock_hook.return_value.list_rows.assert_called_once_with(
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
max_results=1000,
selected_fields=None,
start_index=0,
)
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_mssql.MsSqlHook")
def test_get_sql_hook(self, mock_hook):
hook_expected = mock_hook.return_value
destination_table = "table"
operator = BigQueryToMsSqlOperator(
task_id=TASK_ID,
source_project_dataset_table=f"{TEST_PROJECT}.{TEST_DATASET}.{TEST_TABLE_ID}",
target_table_name=destination_table,
replace=False,
)
hook_actual = operator.get_sql_hook()
assert hook_actual == hook_expected
mock_hook.assert_called_once_with(schema=operator.database, mssql_conn_id=operator.mssql_conn_id)
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_mssql.BigQueryTableLink")
def test_persist_links(self, mock_link):
mock_context = mock.MagicMock()
destination_table = "table"
operator = BigQueryToMsSqlOperator(
task_id=TASK_ID,
source_project_dataset_table=f"{TEST_PROJECT}.{TEST_DATASET}.{TEST_TABLE_ID}",
target_table_name=destination_table,
replace=False,
)
operator.persist_links(context=mock_context)
mock_link.persist.assert_called_once_with(
context=mock_context,
dataset_id=TEST_DATASET,
project_id=TEST_PROJECT,
table_id=TEST_TABLE_ID,
)
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_mssql.MsSqlHook")
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_sql.BigQueryHook")
def test_get_openlineage_facets_on_complete_no_selected_fields(self, mock_bq_hook, mock_mssql_hook):
mock_bq_client = MagicMock()
table_obj = _make_bq_table(["id", "name", "value"])
mock_bq_client.get_table.return_value = table_obj
mock_bq_hook.get_client.return_value = mock_bq_client
mock_bq_hook.return_value = mock_bq_hook
db_info = MagicMock(scheme="mssql", authority="localhost:1433", database="mydb")
mock_mssql_hook.get_openlineage_database_info.return_value = db_info
mock_mssql_hook.get_openlineage_default_schema.return_value = "dbo"
mock_mssql_hook.return_value = mock_mssql_hook
op = BigQueryToMsSqlOperator(
task_id="test",
source_project_dataset_table="proj.dataset.table",
target_table_name="dbo.destination",
selected_fields=None,
database="mydb",
)
op.bigquery_hook = mock_bq_hook
op.mssql_hook = mock_mssql_hook
context = mock.MagicMock()
op.execute(context=context)
result = op.get_openlineage_facets_on_complete(task_instance=MagicMock())
assert len(result.inputs) == 1
assert len(result.outputs) == 1
input_ds = result.inputs[0]
assert input_ds.namespace == "bigquery"
assert input_ds.name == "proj.dataset.table"
assert "schema" in input_ds.facets
schema_fields = [f.name for f in input_ds.facets["schema"].fields]
assert set(schema_fields) == {"id", "name", "value"}
output_ds = result.outputs[0]
assert output_ds.namespace == "mssql://localhost:1433"
assert output_ds.name == "mydb.dbo.destination"
assert "columnLineage" in output_ds.facets
col_lineage = output_ds.facets["columnLineage"]
assert set(col_lineage.fields.keys()) == {"id", "name", "value"}
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_mssql.MsSqlHook")
@mock.patch("airflow.providers.google.cloud.transfers.bigquery_to_sql.BigQueryHook")
def test_get_openlineage_facets_on_complete_selected_fields(self, mock_bq_hook, mock_mssql_hook):
mock_bq_client = MagicMock()
table_obj = _make_bq_table(["id", "name", "value"])
mock_bq_client.get_table.return_value = table_obj
mock_bq_hook.get_client.return_value = mock_bq_client
mock_bq_hook.return_value = mock_bq_hook
db_info = MagicMock(scheme="mssql", authority="server.example:1433", database="mydb")
mock_mssql_hook.get_openlineage_database_info.return_value = db_info
mock_mssql_hook.get_openlineage_default_schema.return_value = "dbo"
mock_mssql_hook.return_value = mock_mssql_hook
op = BigQueryToMsSqlOperator(
task_id="test",
source_project_dataset_table="proj.dataset.table",
target_table_name="dbo.destination",
selected_fields=["id", "name"],
database="mydb",
)
op.bigquery_hook = mock_bq_hook
op.mssql_hook = mock_mssql_hook
context = mock.MagicMock()
op.execute(context=context)
result = op.get_openlineage_facets_on_complete(task_instance=MagicMock())
assert len(result.inputs) == 1
assert len(result.outputs) == 1
input_ds = result.inputs[0]
assert input_ds.namespace == "bigquery"
assert "schema" in input_ds.facets
schema_fields = [f.name for f in input_ds.facets["schema"].fields]
assert set(schema_fields) == {"id", "name"}
output_ds = result.outputs[0]
assert output_ds.namespace == "mssql://server.example:1433"
assert output_ds.name == "mydb.dbo.destination"
assert "columnLineage" in output_ds.facets
col_lineage = output_ds.facets["columnLineage"]
assert set(col_lineage.fields.keys()) == {"id", "name"}
| TestBigQueryToMsSqlOperator |
python | ray-project__ray | doc/source/ray-core/doc_code/cgraph_nccl.py | {
"start": 1171,
"end": 1971
} | class ____:
def recv(self, tensor: torch.Tensor):
assert tensor.device.type == "cuda"
return tensor.shape
sender = GPUSender.remote()
receiver = GPUReceiver.remote()
# __cgraph_nccl_setup_end__
# __cgraph_nccl_exec_start__
with ray.dag.InputNode() as inp:
dag = sender.send.bind(inp)
# Add a type hint that the return value of `send` should use NCCL.
dag = dag.with_tensor_transport("nccl")
# NOTE: With ray<2.42, use `with_type_hint()` instead.
# dag = dag.with_type_hint(TorchTensorType(transport="nccl"))
dag = receiver.recv.bind(dag)
# Compile API prepares the NCCL communicator across all workers and schedule operations
# accordingly.
dag = dag.experimental_compile()
assert ray.get(dag.execute((10,))) == (10,)
# __cgraph_nccl_exec_end__
| GPUReceiver |
python | PrefectHQ__prefect | src/prefect/cli/transfer/_dag.py | {
"start": 1372,
"end": 13115
} | class ____:
"""
Execution DAG for managing resource transfer dependencies.
Uses Kahn's algorithm for topological sorting and concurrent execution.
See: https://en.wikipedia.org/wiki/Topological_sorting#Kahn%27s_algorithm
The DAG ensures resources are transferred in dependency order while
maximizing parallelism for independent resources.
"""
def __init__(self):
self.nodes: dict[uuid.UUID, MigratableProtocol] = {}
self._dependencies: dict[uuid.UUID, set[uuid.UUID]] = defaultdict(set)
self._dependents: dict[uuid.UUID, set[uuid.UUID]] = defaultdict(set)
self._status: dict[uuid.UUID, NodeStatus] = {}
self._lock = asyncio.Lock()
def add_node(self, node: MigratableProtocol) -> uuid.UUID:
"""
Add a node to the graph, deduplicating by source ID.
Args:
node: Resource to add to the graph
Returns:
The node's source UUID
"""
if node.source_id not in self.nodes:
self.nodes[node.source_id] = node
self._status[node.source_id] = NodeStatus(node)
return node.source_id
def add_edge(self, dependent_id: uuid.UUID, dependency_id: uuid.UUID) -> None:
"""
Add a dependency edge where dependent depends on dependency.
Args:
dependent_id: ID of the resource that has a dependency
dependency_id: ID of the resource being depended upon
"""
if dependency_id in self._dependencies[dependent_id]:
return
self._dependencies[dependent_id].add(dependency_id)
self._dependents[dependency_id].add(dependent_id)
self._status[dependent_id].dependencies.add(dependency_id)
self._status[dependency_id].dependents.add(dependent_id)
async def build_from_roots(self, roots: Sequence[MigratableProtocol]) -> None:
"""
Build the graph from root resources by recursively discovering dependencies.
Args:
roots: Collection of root resources to start discovery from
"""
visited: set[uuid.UUID] = set()
async def visit(resource: MigratableProtocol):
if resource.source_id in visited:
return
visited.add(resource.source_id)
rid = self.add_node(resource)
visit_coroutines: list[Coroutine[Any, Any, None]] = []
for dep in await resource.get_dependencies():
did = self.add_node(dep)
self.add_edge(rid, did)
visit_coroutines.append(visit(dep))
await asyncio.gather(*visit_coroutines)
visit_coroutines = [visit(r) for r in roots]
await asyncio.gather(*visit_coroutines)
def has_cycles(self) -> bool:
"""
Check if the graph has cycles using three-color DFS.
Uses the classic three-color algorithm where:
- WHITE (0): Unvisited node
- GRAY (1): Currently being explored (in DFS stack)
- BLACK (2): Fully explored
A cycle exists if we encounter a GRAY node during traversal (back edge).
See: https://en.wikipedia.org/wiki/Depth-first_search#Vertex_orderings
Returns:
True if the graph contains cycles, False otherwise
"""
WHITE, GRAY, BLACK = 0, 1, 2
color = {node_id: WHITE for node_id in self.nodes}
def visit(node_id: uuid.UUID) -> bool:
if color[node_id] == GRAY:
return True # Back edge found - cycle detected
if color[node_id] == BLACK:
return False # Already fully explored
color[node_id] = GRAY
for dep_id in self._dependencies[node_id]:
if visit(dep_id):
return True
color[node_id] = BLACK
return False
for node_id in self.nodes:
if color[node_id] == WHITE:
if visit(node_id):
return True
return False
def get_execution_layers(
self, *, _assume_acyclic: bool = False
) -> list[list[MigratableProtocol]]:
"""
Get execution layers using Kahn's algorithm.
Each layer contains nodes that can be executed in parallel.
Kahn's algorithm repeatedly removes nodes with no dependencies,
forming layers of concurrent work.
See: https://en.wikipedia.org/wiki/Topological_sorting#Kahn%27s_algorithm
Args:
_assume_acyclic: Skip cycle check if caller already verified
Returns:
List of layers, each containing nodes that can run in parallel
Raises:
ValueError: If the graph contains cycles
"""
if not _assume_acyclic and self.has_cycles():
raise ValueError("Cannot sort DAG with cycles")
in_degree = {n: len(self._dependencies[n]) for n in self.nodes}
layers: list[list[MigratableProtocol]] = []
cur = [n for n in self.nodes if in_degree[n] == 0]
while cur:
layers.append([self.nodes[n] for n in cur])
nxt: list[uuid.UUID] = []
for n in cur:
for d in self._dependents[n]:
in_degree[d] -= 1
if in_degree[d] == 0:
nxt.append(d)
cur = nxt
return layers
async def execute_concurrent(
self,
process_node: Callable[[MigratableProtocol], Awaitable[Any]],
max_workers: int = 10,
skip_on_failure: bool = True,
) -> dict[uuid.UUID, Any]:
"""
Execute the DAG concurrently using Kahn's algorithm.
Processes nodes in topological order while maximizing parallelism.
When a node completes, its dependents are checked to see if they're
ready to execute (all dependencies satisfied).
Args:
process_node: Async function to process each node
max_workers: Maximum number of concurrent workers
skip_on_failure: Whether to skip descendants when a node fails
Returns:
Dictionary mapping node IDs to their results (or exceptions)
Raises:
ValueError: If the graph contains cycles
"""
if self.has_cycles():
raise ValueError("Cannot execute DAG with cycles")
layers = self.get_execution_layers(_assume_acyclic=True)
logger.debug(f"Execution plan has {len(layers)} layers")
for i, layer in enumerate(layers):
# Count each type in the layer
type_counts: dict[str, int] = {}
for node in layer:
node_type = type(node).__name__
type_counts[node_type] = type_counts.get(node_type, 0) + 1
type_summary = ", ".join(
[f"{count} {type_name}" for type_name, count in type_counts.items()]
)
logger.debug(f"Layer {i}: ({type_summary})")
# Initialize with nodes that have no dependencies
ready_queue: list[uuid.UUID] = []
for nid in self.nodes:
if not self._dependencies[nid]:
ready_queue.append(nid)
self._status[nid].state = NodeState.READY
results: dict[uuid.UUID, Any] = {}
limiter = anyio.CapacityLimiter(max_workers)
processing: set[uuid.UUID] = set()
async def worker(nid: uuid.UUID, tg: TaskGroup):
"""Process a single node."""
node = self.nodes[nid]
# Check if node was skipped after being queued
if self._status[nid].state != NodeState.READY:
logger.debug(f"Node {node} was skipped before execution")
return
async with limiter:
try:
self._status[nid].state = NodeState.IN_PROGRESS
logger.debug(f"Processing {node}")
res = await process_node(node)
results[nid] = res
self._status[nid].state = NodeState.COMPLETED
logger.debug(f"Completed {node}")
# Mark dependents as ready if all their dependencies are satisfied
async with self._lock:
for did in self._status[nid].dependents:
dst = self._status[did]
if dst.state == NodeState.PENDING:
if all(
self._status[d].state == NodeState.COMPLETED
for d in dst.dependencies
):
dst.state = NodeState.READY
# Start the newly ready task immediately
if did not in processing:
processing.add(did)
tg.start_soon(worker, did, tg)
except TransferSkipped as e:
results[nid] = e
self._status[nid].state = NodeState.SKIPPED
self._status[nid].error = e
logger.debug(f"Skipped {node}: {e}")
except Exception as e:
results[nid] = e
self._status[nid].state = NodeState.FAILED
self._status[nid].error = e
logger.debug(f"Failed to process {node}: {e}")
if skip_on_failure:
# Skip all descendants of the failed node
to_skip = deque([nid])
seen_failed: set[uuid.UUID] = set()
while to_skip:
cur = to_skip.popleft()
if cur in seen_failed:
continue
seen_failed.add(cur)
for did in self._status[cur].dependents:
st = self._status[did]
# Skip nodes that haven't started yet
if st.state in {NodeState.PENDING, NodeState.READY}:
st.state = NodeState.SKIPPED
results[did] = TransferSkipped(
"Skipped due to upstream resource failure"
)
logger.debug(
f"Skipped {self.nodes[did]} due to upstream failure"
)
to_skip.append(did)
finally:
processing.discard(nid)
async with create_task_group() as tg:
# Start processing all initially ready nodes
for nid in ready_queue:
if self._status[nid].state == NodeState.READY:
processing.add(nid)
tg.start_soon(worker, nid, tg)
return results
def get_statistics(self) -> dict[str, Any]:
"""
Get statistics about the DAG structure.
Returns:
Dictionary with node counts, edge counts, and cycle detection
"""
deps = self._dependencies
return {
"total_nodes": len(self.nodes),
"total_edges": sum(len(v) for v in deps.values()),
"max_in_degree": max((len(deps[n]) for n in self.nodes), default=0),
"max_out_degree": max(
(len(self._dependents[n]) for n in self.nodes), default=0
),
"has_cycles": self.has_cycles(),
}
| TransferDAG |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1410633,
"end": 1462412
} | class ____(
sgqlc.types.Type, Node, ProjectV2Recent, ProjectOwner, PackageOwner, Subscribable, Starrable, UniformResourceLocatable, RepositoryInfo
):
"""A repository contains the content for a project."""
__schema__ = github_schema
__field_names__ = (
"allow_update_branch",
"assignable_users",
"auto_merge_allowed",
"branch_protection_rules",
"code_of_conduct",
"codeowners",
"collaborators",
"commit_comments",
"contact_links",
"database_id",
"default_branch_ref",
"delete_branch_on_merge",
"deploy_keys",
"deployments",
"discussion",
"discussion_categories",
"discussion_category",
"discussions",
"disk_usage",
"environment",
"environments",
"forking_allowed",
"forks",
"funding_links",
"has_vulnerability_alerts_enabled",
"interaction_ability",
"is_blank_issues_enabled",
"is_disabled",
"is_empty",
"is_security_policy_enabled",
"is_user_configuration_repository",
"issue",
"issue_or_pull_request",
"issue_templates",
"issues",
"label",
"labels",
"languages",
"latest_release",
"mentionable_users",
"merge_commit_allowed",
"merge_commit_message",
"merge_commit_title",
"merge_queue",
"milestone",
"milestones",
"object",
"parent",
"pinned_discussions",
"pinned_issues",
"primary_language",
"project_v2",
"projects_v2",
"pull_request",
"pull_request_templates",
"pull_requests",
"rebase_merge_allowed",
"ref",
"refs",
"release",
"releases",
"repository_topics",
"ruleset",
"rulesets",
"security_policy_url",
"squash_merge_allowed",
"squash_merge_commit_message",
"squash_merge_commit_title",
"ssh_url",
"submodules",
"temp_clone_token",
"template_repository",
"viewer_can_administer",
"viewer_can_update_topics",
"viewer_default_commit_email",
"viewer_default_merge_method",
"viewer_permission",
"viewer_possible_commit_emails",
"vulnerability_alert",
"vulnerability_alerts",
"watchers",
"web_commit_signoff_required",
)
allow_update_branch = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="allowUpdateBranch")
"""Whether or not a pull request head branch that is behind its base
branch can always be updated even if it is not required to be up
to date before merging.
"""
assignable_users = sgqlc.types.Field(
sgqlc.types.non_null(UserConnection),
graphql_name="assignableUsers",
args=sgqlc.types.ArgDict(
(
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of users that can be assigned to issues in this repository.
Arguments:
* `query` (`String`): Filters users with query on user name and
login
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
auto_merge_allowed = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="autoMergeAllowed")
"""Whether or not Auto-merge can be enabled on pull requests in this
repository.
"""
branch_protection_rules = sgqlc.types.Field(
sgqlc.types.non_null(BranchProtectionRuleConnection),
graphql_name="branchProtectionRules",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of branch protection rules for this repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
code_of_conduct = sgqlc.types.Field(CodeOfConduct, graphql_name="codeOfConduct")
"""Returns the code of conduct for this repository"""
codeowners = sgqlc.types.Field(
RepositoryCodeowners,
graphql_name="codeowners",
args=sgqlc.types.ArgDict((("ref_name", sgqlc.types.Arg(String, graphql_name="refName", default=None)),)),
)
"""Information extracted from the repository's `CODEOWNERS` file.
Arguments:
* `ref_name` (`String`): The ref name used to return the
associated `CODEOWNERS` file.
"""
collaborators = sgqlc.types.Field(
RepositoryCollaboratorConnection,
graphql_name="collaborators",
args=sgqlc.types.ArgDict(
(
("affiliation", sgqlc.types.Arg(CollaboratorAffiliation, graphql_name="affiliation", default=None)),
("login", sgqlc.types.Arg(String, graphql_name="login", default=None)),
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of collaborators associated with the repository.
Arguments:
* `affiliation` (`CollaboratorAffiliation`): Collaborators
affiliation level with a repository.
* `login` (`String`): The login of one specific collaborator.
* `query` (`String`): Filters users with query on user name and
login
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
commit_comments = sgqlc.types.Field(
sgqlc.types.non_null(CommitCommentConnection),
graphql_name="commitComments",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of commit comments associated with the repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
contact_links = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(RepositoryContactLink)), graphql_name="contactLinks")
"""Returns a list of contact links associated to the repository"""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
default_branch_ref = sgqlc.types.Field(Ref, graphql_name="defaultBranchRef")
"""The Ref associated with the repository's default branch."""
delete_branch_on_merge = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="deleteBranchOnMerge")
"""Whether or not branches are automatically deleted when merged in
this repository.
"""
deploy_keys = sgqlc.types.Field(
sgqlc.types.non_null(DeployKeyConnection),
graphql_name="deployKeys",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of deploy keys that are on this repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
deployments = sgqlc.types.Field(
sgqlc.types.non_null(DeploymentConnection),
graphql_name="deployments",
args=sgqlc.types.ArgDict(
(
(
"environments",
sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="environments", default=None),
),
("order_by", sgqlc.types.Arg(DeploymentOrder, graphql_name="orderBy", default={"field": "CREATED_AT", "direction": "ASC"})),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""Deployments associated with the repository
Arguments:
* `environments` (`[String!]`): Environments to list deployments
for
* `order_by` (`DeploymentOrder`): Ordering options for deployments
returned from the connection. (default: `{field: CREATED_AT,
direction: ASC}`)
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
discussion = sgqlc.types.Field(
Discussion,
graphql_name="discussion",
args=sgqlc.types.ArgDict((("number", sgqlc.types.Arg(sgqlc.types.non_null(Int), graphql_name="number", default=None)),)),
)
"""Returns a single discussion from the current repository by number.
Arguments:
* `number` (`Int!`): The number for the discussion to be returned.
"""
discussion_categories = sgqlc.types.Field(
sgqlc.types.non_null(DiscussionCategoryConnection),
graphql_name="discussionCategories",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("filter_by_assignable", sgqlc.types.Arg(Boolean, graphql_name="filterByAssignable", default=False)),
)
),
)
"""A list of discussion categories that are available in the
repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `filter_by_assignable` (`Boolean`): Filter by categories that
are assignable by the viewer. (default: `false`)
"""
discussion_category = sgqlc.types.Field(
DiscussionCategory,
graphql_name="discussionCategory",
args=sgqlc.types.ArgDict((("slug", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="slug", default=None)),)),
)
"""A discussion category by slug.
Arguments:
* `slug` (`String!`): The slug of the discussion category to be
returned.
"""
discussions = sgqlc.types.Field(
sgqlc.types.non_null(DiscussionConnection),
graphql_name="discussions",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("category_id", sgqlc.types.Arg(ID, graphql_name="categoryId", default=None)),
("states", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(DiscussionState)), graphql_name="states", default=())),
(
"order_by",
sgqlc.types.Arg(DiscussionOrder, graphql_name="orderBy", default={"field": "UPDATED_AT", "direction": "DESC"}),
),
)
),
)
"""A list of discussions that have been opened in the repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `category_id` (`ID`): Only include discussions that belong to
the category with this ID. (default: `null`)
* `states` (`[DiscussionState!]`): A list of states to filter the
discussions by. (default: `[]`)
* `order_by` (`DiscussionOrder`): Ordering options for discussions
returned from the connection. (default: `{field: UPDATED_AT,
direction: DESC}`)
"""
disk_usage = sgqlc.types.Field(Int, graphql_name="diskUsage")
"""The number of kilobytes this repository occupies on disk."""
environment = sgqlc.types.Field(
Environment,
graphql_name="environment",
args=sgqlc.types.ArgDict((("name", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="name", default=None)),)),
)
"""Returns a single active environment from the current repository by
name.
Arguments:
* `name` (`String!`): The name of the environment to be returned.
"""
environments = sgqlc.types.Field(
sgqlc.types.non_null(EnvironmentConnection),
graphql_name="environments",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of environments that are in this repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
forking_allowed = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="forkingAllowed")
"""Whether this repository allows forks."""
forks = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryConnection),
graphql_name="forks",
args=sgqlc.types.ArgDict(
(
("privacy", sgqlc.types.Arg(RepositoryPrivacy, graphql_name="privacy", default=None)),
("order_by", sgqlc.types.Arg(RepositoryOrder, graphql_name="orderBy", default=None)),
("affiliations", sgqlc.types.Arg(sgqlc.types.list_of(RepositoryAffiliation), graphql_name="affiliations", default=None)),
(
"owner_affiliations",
sgqlc.types.Arg(
sgqlc.types.list_of(RepositoryAffiliation), graphql_name="ownerAffiliations", default=("OWNER", "COLLABORATOR")
),
),
("is_locked", sgqlc.types.Arg(Boolean, graphql_name="isLocked", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of direct forked repositories.
Arguments:
* `privacy` (`RepositoryPrivacy`): If non-null, filters
repositories according to privacy
* `order_by` (`RepositoryOrder`): Ordering options for
repositories returned from the connection
* `affiliations` (`[RepositoryAffiliation]`): Array of viewer's
affiliation options for repositories returned from the
connection. For example, OWNER will include only repositories
that the current viewer owns.
* `owner_affiliations` (`[RepositoryAffiliation]`): Array of
owner's affiliation options for repositories returned from the
connection. For example, OWNER will include only repositories
that the organization or user being viewed owns. (default:
`[OWNER, COLLABORATOR]`)
* `is_locked` (`Boolean`): If non-null, filters repositories
according to whether they have been locked
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
funding_links = sgqlc.types.Field(
sgqlc.types.non_null(sgqlc.types.list_of(sgqlc.types.non_null(FundingLink))), graphql_name="fundingLinks"
)
"""The funding links for this repository"""
has_vulnerability_alerts_enabled = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="hasVulnerabilityAlertsEnabled")
"""Whether vulnerability alerts are enabled for the repository."""
interaction_ability = sgqlc.types.Field(RepositoryInteractionAbility, graphql_name="interactionAbility")
"""The interaction ability settings for this repository."""
is_blank_issues_enabled = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isBlankIssuesEnabled")
"""Returns true if blank issue creation is allowed"""
is_disabled = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isDisabled")
"""Returns whether or not this repository disabled."""
is_empty = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isEmpty")
"""Returns whether or not this repository is empty."""
is_security_policy_enabled = sgqlc.types.Field(Boolean, graphql_name="isSecurityPolicyEnabled")
"""Returns true if this repository has a security policy"""
is_user_configuration_repository = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isUserConfigurationRepository")
"""Is this repository a user configuration repository?"""
issue = sgqlc.types.Field(
Issue,
graphql_name="issue",
args=sgqlc.types.ArgDict((("number", sgqlc.types.Arg(sgqlc.types.non_null(Int), graphql_name="number", default=None)),)),
)
"""Returns a single issue from the current repository by number.
Arguments:
* `number` (`Int!`): The number for the issue to be returned.
"""
issue_or_pull_request = sgqlc.types.Field(
"IssueOrPullRequest",
graphql_name="issueOrPullRequest",
args=sgqlc.types.ArgDict((("number", sgqlc.types.Arg(sgqlc.types.non_null(Int), graphql_name="number", default=None)),)),
)
"""Returns a single issue-like object from the current repository by
number.
Arguments:
* `number` (`Int!`): The number for the issue to be returned.
"""
issue_templates = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(IssueTemplate)), graphql_name="issueTemplates")
"""Returns a list of issue templates associated to the repository"""
issues = sgqlc.types.Field(
sgqlc.types.non_null(IssueConnection),
graphql_name="issues",
args=sgqlc.types.ArgDict(
(
("order_by", sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None)),
("labels", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="labels", default=None)),
("states", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(IssueState)), graphql_name="states", default=None)),
("filter_by", sgqlc.types.Arg(IssueFilters, graphql_name="filterBy", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of issues that have been opened in the repository.
Arguments:
* `order_by` (`IssueOrder`): Ordering options for issues returned
from the connection.
* `labels` (`[String!]`): A list of label names to filter the pull
requests by.
* `states` (`[IssueState!]`): A list of states to filter the
issues by.
* `filter_by` (`IssueFilters`): Filtering options for issues
returned from the connection.
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
label = sgqlc.types.Field(
Label,
graphql_name="label",
args=sgqlc.types.ArgDict((("name", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="name", default=None)),)),
)
"""Returns a single label by name
Arguments:
* `name` (`String!`): Label name
"""
labels = sgqlc.types.Field(
LabelConnection,
graphql_name="labels",
args=sgqlc.types.ArgDict(
(
("order_by", sgqlc.types.Arg(LabelOrder, graphql_name="orderBy", default={"field": "CREATED_AT", "direction": "ASC"})),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
)
),
)
"""A list of labels associated with the repository.
Arguments:
* `order_by` (`LabelOrder`): Ordering options for labels returned
from the connection. (default: `{field: CREATED_AT, direction:
ASC}`)
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `query` (`String`): If provided, searches labels by name and
description.
"""
languages = sgqlc.types.Field(
LanguageConnection,
graphql_name="languages",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("order_by", sgqlc.types.Arg(LanguageOrder, graphql_name="orderBy", default=None)),
)
),
)
"""A list containing a breakdown of the language composition of the
repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`LanguageOrder`): Order for connection
"""
latest_release = sgqlc.types.Field(Release, graphql_name="latestRelease")
"""Get the latest release for the repository if one exists."""
mentionable_users = sgqlc.types.Field(
sgqlc.types.non_null(UserConnection),
graphql_name="mentionableUsers",
args=sgqlc.types.ArgDict(
(
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of Users that can be mentioned in the context of the
repository.
Arguments:
* `query` (`String`): Filters users with query on user name and
login
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
merge_commit_allowed = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="mergeCommitAllowed")
"""Whether or not PRs are merged with a merge commit on this
repository.
"""
merge_commit_message = sgqlc.types.Field(sgqlc.types.non_null(MergeCommitMessage), graphql_name="mergeCommitMessage")
"""How the default commit message will be generated when merging a
pull request.
"""
merge_commit_title = sgqlc.types.Field(sgqlc.types.non_null(MergeCommitTitle), graphql_name="mergeCommitTitle")
"""How the default commit title will be generated when merging a pull
request.
"""
merge_queue = sgqlc.types.Field(
MergeQueue,
graphql_name="mergeQueue",
args=sgqlc.types.ArgDict((("branch", sgqlc.types.Arg(String, graphql_name="branch", default=None)),)),
)
"""The merge queue for a specified branch, otherwise the default
branch if not provided.
Arguments:
* `branch` (`String`): The name of the branch to get the merge
queue for. Case sensitive.
"""
milestone = sgqlc.types.Field(
Milestone,
graphql_name="milestone",
args=sgqlc.types.ArgDict((("number", sgqlc.types.Arg(sgqlc.types.non_null(Int), graphql_name="number", default=None)),)),
)
"""Returns a single milestone from the current repository by number.
Arguments:
* `number` (`Int!`): The number for the milestone to be returned.
"""
milestones = sgqlc.types.Field(
MilestoneConnection,
graphql_name="milestones",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("states", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(MilestoneState)), graphql_name="states", default=None)),
("order_by", sgqlc.types.Arg(MilestoneOrder, graphql_name="orderBy", default=None)),
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
)
),
)
"""A list of milestones associated with the repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `states` (`[MilestoneState!]`): Filter by the state of the
milestones.
* `order_by` (`MilestoneOrder`): Ordering options for milestones.
* `query` (`String`): Filters milestones with a query on the title
"""
object = sgqlc.types.Field(
GitObject,
graphql_name="object",
args=sgqlc.types.ArgDict(
(
("oid", sgqlc.types.Arg(GitObjectID, graphql_name="oid", default=None)),
("expression", sgqlc.types.Arg(String, graphql_name="expression", default=None)),
)
),
)
"""A Git object in the repository
Arguments:
* `oid` (`GitObjectID`): The Git object ID
* `expression` (`String`): A Git revision expression suitable for
rev-parse
"""
parent = sgqlc.types.Field("Repository", graphql_name="parent")
"""The repository parent, if this is a fork."""
pinned_discussions = sgqlc.types.Field(
sgqlc.types.non_null(PinnedDiscussionConnection),
graphql_name="pinnedDiscussions",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of discussions that have been pinned in this repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
pinned_issues = sgqlc.types.Field(
PinnedIssueConnection,
graphql_name="pinnedIssues",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of pinned issues for this repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
primary_language = sgqlc.types.Field(Language, graphql_name="primaryLanguage")
"""The primary language of the repository's code."""
project_v2 = sgqlc.types.Field(
ProjectV2,
graphql_name="projectV2",
args=sgqlc.types.ArgDict((("number", sgqlc.types.Arg(sgqlc.types.non_null(Int), graphql_name="number", default=None)),)),
)
"""Finds and returns the Project according to the provided Project
number.
Arguments:
* `number` (`Int!`): The Project number.
"""
projects_v2 = sgqlc.types.Field(
sgqlc.types.non_null(ProjectV2Connection),
graphql_name="projectsV2",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
("order_by", sgqlc.types.Arg(ProjectV2Order, graphql_name="orderBy", default={"field": "NUMBER", "direction": "DESC"})),
)
),
)
"""List of projects linked to this repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `query` (`String`): A project to search for linked to the repo.
* `order_by` (`ProjectV2Order`): How to order the returned
projects. (default: `{field: NUMBER, direction: DESC}`)
"""
pull_request = sgqlc.types.Field(
PullRequest,
graphql_name="pullRequest",
args=sgqlc.types.ArgDict((("number", sgqlc.types.Arg(sgqlc.types.non_null(Int), graphql_name="number", default=None)),)),
)
"""Returns a single pull request from the current repository by
number.
Arguments:
* `number` (`Int!`): The number for the pull request to be
returned.
"""
pull_request_templates = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(PullRequestTemplate)), graphql_name="pullRequestTemplates"
)
"""Returns a list of pull request templates associated to the
repository
"""
pull_requests = sgqlc.types.Field(
sgqlc.types.non_null(PullRequestConnection),
graphql_name="pullRequests",
args=sgqlc.types.ArgDict(
(
(
"states",
sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(PullRequestState)), graphql_name="states", default=None),
),
("labels", sgqlc.types.Arg(sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="labels", default=None)),
("head_ref_name", sgqlc.types.Arg(String, graphql_name="headRefName", default=None)),
("base_ref_name", sgqlc.types.Arg(String, graphql_name="baseRefName", default=None)),
("order_by", sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of pull requests that have been opened in the repository.
Arguments:
* `states` (`[PullRequestState!]`): A list of states to filter the
pull requests by.
* `labels` (`[String!]`): A list of label names to filter the pull
requests by.
* `head_ref_name` (`String`): The head ref name to filter the pull
requests by.
* `base_ref_name` (`String`): The base ref name to filter the pull
requests by.
* `order_by` (`IssueOrder`): Ordering options for pull requests
returned from the connection.
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
rebase_merge_allowed = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="rebaseMergeAllowed")
"""Whether or not rebase-merging is enabled on this repository."""
ref = sgqlc.types.Field(
Ref,
graphql_name="ref",
args=sgqlc.types.ArgDict(
(("qualified_name", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="qualifiedName", default=None)),)
),
)
"""Fetch a given ref from the repository
Arguments:
* `qualified_name` (`String!`): The ref to retrieve. Fully
qualified matches are checked in order (`refs/heads/master`)
before falling back onto checks for short name matches
(`master`).
"""
refs = sgqlc.types.Field(
RefConnection,
graphql_name="refs",
args=sgqlc.types.ArgDict(
(
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("ref_prefix", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="refPrefix", default=None)),
("direction", sgqlc.types.Arg(OrderDirection, graphql_name="direction", default=None)),
("order_by", sgqlc.types.Arg(RefOrder, graphql_name="orderBy", default=None)),
)
),
)
"""Fetch a list of refs from the repository
Arguments:
* `query` (`String`): Filters refs with query on name
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `ref_prefix` (`String!`): A ref name prefix like `refs/heads/`,
`refs/tags/`, etc.
* `direction` (`OrderDirection`): DEPRECATED: use orderBy. The
ordering direction.
* `order_by` (`RefOrder`): Ordering options for refs returned from
the connection.
"""
release = sgqlc.types.Field(
Release,
graphql_name="release",
args=sgqlc.types.ArgDict((("tag_name", sgqlc.types.Arg(sgqlc.types.non_null(String), graphql_name="tagName", default=None)),)),
)
"""Lookup a single release given various criteria.
Arguments:
* `tag_name` (`String!`): The name of the Tag the Release was
created from
"""
releases = sgqlc.types.Field(
sgqlc.types.non_null(ReleaseConnection),
graphql_name="releases",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("order_by", sgqlc.types.Arg(ReleaseOrder, graphql_name="orderBy", default=None)),
)
),
)
"""List of releases which are dependent on this repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `order_by` (`ReleaseOrder`): Order for connection
"""
repository_topics = sgqlc.types.Field(
sgqlc.types.non_null(RepositoryTopicConnection),
graphql_name="repositoryTopics",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of applied repository-topic associations for this
repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
ruleset = sgqlc.types.Field(
"RepositoryRuleset",
graphql_name="ruleset",
args=sgqlc.types.ArgDict((("database_id", sgqlc.types.Arg(sgqlc.types.non_null(Int), graphql_name="databaseId", default=None)),)),
)
"""Returns a single ruleset from the current repository by ID.
Arguments:
* `database_id` (`Int!`): The ID of the ruleset to be returned.
"""
rulesets = sgqlc.types.Field(
RepositoryRulesetConnection,
graphql_name="rulesets",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
("include_parents", sgqlc.types.Arg(Boolean, graphql_name="includeParents", default=False)),
)
),
)
"""A list of rulesets for this repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `include_parents` (`Boolean`): Return rulesets configured at
higher levels that apply to this repository (default: `false`)
"""
security_policy_url = sgqlc.types.Field(URI, graphql_name="securityPolicyUrl")
"""The security policy URL."""
squash_merge_allowed = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="squashMergeAllowed")
"""Whether or not squash-merging is enabled on this repository."""
squash_merge_commit_message = sgqlc.types.Field(sgqlc.types.non_null(SquashMergeCommitMessage), graphql_name="squashMergeCommitMessage")
"""How the default commit message will be generated when squash
merging a pull request.
"""
squash_merge_commit_title = sgqlc.types.Field(sgqlc.types.non_null(SquashMergeCommitTitle), graphql_name="squashMergeCommitTitle")
"""How the default commit title will be generated when squash merging
a pull request.
"""
ssh_url = sgqlc.types.Field(sgqlc.types.non_null(GitSSHRemote), graphql_name="sshUrl")
"""The SSH URL to clone this repository"""
submodules = sgqlc.types.Field(
sgqlc.types.non_null(SubmoduleConnection),
graphql_name="submodules",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""Returns a list of all submodules in this repository parsed from
the .gitmodules file as of the default branch's HEAD commit.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
temp_clone_token = sgqlc.types.Field(String, graphql_name="tempCloneToken")
"""Temporary authentication token for cloning this repository."""
template_repository = sgqlc.types.Field("Repository", graphql_name="templateRepository")
"""The repository from which this repository was generated, if any."""
viewer_can_administer = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanAdminister")
"""Indicates whether the viewer has admin permissions on this
repository.
"""
viewer_can_update_topics = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanUpdateTopics")
"""Indicates whether the viewer can update the topics of this
repository.
"""
viewer_default_commit_email = sgqlc.types.Field(String, graphql_name="viewerDefaultCommitEmail")
"""The last commit email for the viewer."""
viewer_default_merge_method = sgqlc.types.Field(sgqlc.types.non_null(PullRequestMergeMethod), graphql_name="viewerDefaultMergeMethod")
"""The last used merge method by the viewer or the default for the
repository.
"""
viewer_permission = sgqlc.types.Field(RepositoryPermission, graphql_name="viewerPermission")
"""The users permission level on the repository. Will return null if
authenticated as an GitHub App.
"""
viewer_possible_commit_emails = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="viewerPossibleCommitEmails"
)
"""A list of emails this viewer can commit with."""
vulnerability_alert = sgqlc.types.Field(
"RepositoryVulnerabilityAlert",
graphql_name="vulnerabilityAlert",
args=sgqlc.types.ArgDict((("number", sgqlc.types.Arg(sgqlc.types.non_null(Int), graphql_name="number", default=None)),)),
)
"""Returns a single vulnerability alert from the current repository
by number.
Arguments:
* `number` (`Int!`): The number for the vulnerability alert to be
returned.
"""
vulnerability_alerts = sgqlc.types.Field(
RepositoryVulnerabilityAlertConnection,
graphql_name="vulnerabilityAlerts",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"states",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(RepositoryVulnerabilityAlertState)), graphql_name="states", default=None
),
),
(
"dependency_scopes",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(RepositoryVulnerabilityAlertDependencyScope)),
graphql_name="dependencyScopes",
default=None,
),
),
)
),
)
"""A list of vulnerability alerts that are on this repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
* `states` (`[RepositoryVulnerabilityAlertState!]`): Filter by the
state of the alert
* `dependency_scopes`
(`[RepositoryVulnerabilityAlertDependencyScope!]`): Filter by
the scope of the alert's dependency
"""
watchers = sgqlc.types.Field(
sgqlc.types.non_null(UserConnection),
graphql_name="watchers",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""A list of users watching the repository.
Arguments:
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
web_commit_signoff_required = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="webCommitSignoffRequired")
"""Whether contributors are required to sign off on web-based commits
in this repository.
"""
| Repository |
python | python__mypy | mypyc/ir/ops.py | {
"start": 12031,
"end": 12734
} | class ____(ControlOp):
"""Unconditional jump."""
error_kind = ERR_NEVER
def __init__(self, label: BasicBlock, line: int = -1) -> None:
super().__init__(line)
self.label = label
def targets(self) -> Sequence[BasicBlock]:
return (self.label,)
def set_target(self, i: int, new: BasicBlock) -> None:
assert i == 0
self.label = new
def __repr__(self) -> str:
return "<Goto %s>" % self.label.label
def sources(self) -> list[Value]:
return []
def set_sources(self, new: list[Value]) -> None:
assert not new
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_goto(self)
@final
| Goto |
python | google__jax | jax/_src/errors.py | {
"start": 23837,
"end": 24777
} | class ____(JAXTypeError):
"""
This error occurs when a PRNG key is reused in an unsafe manner.
Key reuse is checked only when `jax_debug_key_reuse` is
set to `True`.
Here is a simple example of code that would lead to such an error::
>>> with jax.debug_key_reuse(True): # doctest: +SKIP
... key = jax.random.key(0)
... value = jax.random.uniform(key)
... new_value = jax.random.uniform(key)
...
---------------------------------------------------------------------------
KeyReuseError Traceback (most recent call last)
...
KeyReuseError: Previously-consumed key passed to jit-compiled function at index 0
This sort of key reuse is problematic because the JAX PRNG is stateless, and keys
must be manually split; For more information on this see `the Pseudorandom Numbers
tutorial <https://docs.jax.dev/en/latest/random-numbers.html>`_.
"""
| KeyReuseError |
python | huggingface__transformers | src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py | {
"start": 51627,
"end": 61288
} | class ____(MMGroundingDinoPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a
[`MMGroundingDinoEncoderLayer`].
The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers.
Args:
config: MMGroundingDinoConfig
"""
def __init__(self, config: MMGroundingDinoConfig):
super().__init__(config)
self.dropout = config.dropout
self.layers = nn.ModuleList([MMGroundingDinoEncoderLayer(config) for _ in range(config.encoder_layers)])
# Initialize weights and apply final processing
self.post_init()
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
"""
Get reference points for each feature map.
Args:
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Valid ratios of each feature map.
device (`torch.device`):
Device on which to create the tensors.
Returns:
`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`
"""
reference_points_list = []
for level, (height, width) in enumerate(spatial_shapes):
ref_y, ref_x = meshgrid(
torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device),
torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device),
indexing="ij",
)
# TODO: valid_ratios could be useless here. check https://github.com/fundamentalvision/Deformable-DETR/issues/36
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(
self,
vision_features: Tensor,
vision_attention_mask: Tensor,
vision_position_embedding: Tensor,
spatial_shapes: Tensor,
spatial_shapes_list: list[tuple[int, int]],
level_start_index: Tensor,
valid_ratios=None,
text_features: Optional[Tensor] = None,
text_attention_mask: Optional[Tensor] = None,
text_position_embedding: Optional[Tensor] = None,
text_self_attention_masks: Optional[Tensor] = None,
text_position_ids: Optional[Tensor] = None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
vision_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
vision_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 0 for pixel features that are real (i.e. **not masked**),
- 1 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
vision_position_embedding (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`):
Spatial shapes of each feature map.
spatial_shapes_list (`list[tuple[int, int]]`):
Spatial shapes of each feature map (but as list for export compatibility).
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`):
Starting index of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Ratio of valid area in each feature level.
text_features (`torch.FloatTensor` of shape `(batch_size, text_seq_len, hidden_size)`):
Flattened text features that are passed to the encoder.
text_attention_mask (`torch.Tensor` of shape `(batch_size, text_seq_len)`, *optional*):
Mask to avoid performing attention on padding text features. Mask values selected in `[0, 1]`:
- 0 for text features that are real (i.e. **not masked**),
- 1 for text features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
text_position_embedding (`torch.FloatTensor` of shape `(batch_size, text_seq_len)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
text_self_attention_masks (`torch.BoolTensor` of shape `(batch_size, text_seq_len, text_seq_len)`):
Masks to avoid performing attention between padding text features. Mask values selected in `[0, 1]`:
- 1 for text features that are real (i.e. **not masked**),
- 0 for text features that are padding (i.e. **masked**).
text_position_ids (`torch.LongTensor` of shape `(batch_size, num_queries)`):
Position ids for text features.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=vision_features.device)
encoder_vision_states = () if output_hidden_states else None
encoder_text_states = () if output_hidden_states else None
all_attns = () if output_attentions else None
all_attn_fused_text = () if output_attentions else None
all_attn_fused_vision = () if output_attentions else None
all_attn_enhanced_text = () if output_attentions else None
all_attn_deformable = () if output_attentions else None
for i, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_vision_states += (vision_features,)
encoder_text_states += (text_features,)
(vision_features, text_features), attentions = encoder_layer(
vision_features=vision_features,
vision_position_embedding=vision_position_embedding,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
key_padding_mask=vision_attention_mask,
reference_points=reference_points,
text_features=text_features,
text_attention_mask=text_attention_mask,
text_position_embedding=text_position_embedding,
text_self_attention_masks=text_self_attention_masks,
text_position_ids=text_position_ids,
)
if output_attentions:
all_attn_fused_vision += (attentions[0],)
all_attn_fused_text += (attentions[1],)
all_attn_enhanced_text += (attentions[2],)
all_attn_deformable += (attentions[3],)
if output_hidden_states:
encoder_vision_states += (vision_features,)
encoder_text_states += (text_features,)
if output_attentions:
all_attns = (all_attn_fused_vision, all_attn_fused_text, all_attn_enhanced_text, all_attn_deformable)
if not return_dict:
enc_outputs = [vision_features, text_features, encoder_vision_states, encoder_text_states, all_attns]
return tuple(v for v in enc_outputs if v is not None)
return MMGroundingDinoEncoderOutput(
last_hidden_state_vision=vision_features,
last_hidden_state_text=text_features,
vision_hidden_states=encoder_vision_states,
text_hidden_states=encoder_text_states,
attentions=all_attns,
)
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the MMGroundingDinoDecoder. This class adds two attributes to
BaseModelOutputWithCrossAttentions, namely:
- a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer)
- a stacked tensor of intermediate reference points.
"""
)
| MMGroundingDinoEncoder |
python | Textualize__textual | src/textual/widgets/_tooltip.py | {
"start": 73,
"end": 450
} | class ____(Static, inherit_css=False):
DEFAULT_CSS = """
Tooltip {
layer: _tooltips;
margin: 1 0;
padding: 1 2;
background: $panel;
width: auto;
height: auto;
constrain: inside inflect;
max-width: 40;
display: none;
offset-x: -50%;
}
"""
DEFAULT_CLASSES = "-textual-system"
| Tooltip |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/operators/custom_object_launcher.py | {
"start": 8202,
"end": 15447
} | class ____(LoggingMixin):
"""Launches PODS."""
def __init__(
self,
name: str | None,
namespace: str | None,
kube_client: CoreV1Api,
custom_obj_api: CustomObjectsApi,
template_body: str | None = None,
):
"""
Create custom object launcher(sparkapplications crd).
:param kube_client: kubernetes client.
"""
super().__init__()
self.name = name
self.namespace = namespace
self.template_body = template_body
self.body: dict = self.get_body()
self.kind = self.body["kind"]
self.plural = f"{self.kind.lower()}s"
if self.body.get("apiVersion"):
self.api_group, self.api_version = self.body["apiVersion"].split("/")
else:
self.api_group = self.body["apiGroup"]
self.api_version = self.body["version"]
self._client = kube_client
self.custom_obj_api = custom_obj_api
self.spark_obj_spec: dict = {}
self.pod_spec: k8s.V1Pod | None = None
@cached_property
def pod_manager(self) -> PodManager:
return PodManager(kube_client=self._client)
def get_body(self):
self.body: dict = SparkJobSpec(**self.template_body["spark"])
if not hasattr(self.body, "metadata") or not isinstance(self.body.metadata, dict):
self.body.metadata = {}
self.body.metadata.update({"name": self.name, "namespace": self.namespace})
if self.template_body.get("kubernetes"):
k8s_spec: dict = KubernetesSpec(**self.template_body["kubernetes"])
self.body.spec["volumes"] = k8s_spec.volumes
if k8s_spec.image_pull_secrets:
self.body.spec["imagePullSecrets"] = k8s_spec.image_pull_secrets
for item in ["driver", "executor"]:
# Env List
self.body.spec[item]["env"] = k8s_spec.env_vars
self.body.spec[item]["envFrom"] = k8s_spec.env_from
# Volumes
self.body.spec[item]["volumeMounts"] = k8s_spec.volume_mounts
# Add affinity
self.body.spec[item]["affinity"] = k8s_spec.affinity
self.body.spec[item]["tolerations"] = k8s_spec.tolerations
self.body.spec[item]["nodeSelector"] = k8s_spec.node_selector
# Labels
self.body.spec[item]["labels"] = self.body.spec["labels"]
return self.body.__dict__
@tenacity.retry(
stop=tenacity.stop_after_attempt(3),
wait=tenacity.wait_random_exponential(),
reraise=True,
retry=tenacity.retry_if_exception(should_retry_start_spark_job),
)
def start_spark_job(self, image=None, code_path=None, startup_timeout: int = 600):
"""
Launch the pod synchronously and waits for completion.
:param image: image name
:param code_path: path to the .py file for python and jar file for scala
:param startup_timeout: Timeout for startup of the pod (if pod is pending for too long, fails task)
:return:
"""
try:
if image:
self.body["spec"]["image"] = image
if code_path:
self.body["spec"]["mainApplicationFile"] = code_path
self.log.debug("Spark Job Creation Request Submitted")
self.spark_obj_spec = self.custom_obj_api.create_namespaced_custom_object(
group=self.api_group,
version=self.api_version,
namespace=self.namespace,
plural=self.plural,
body=self.body,
)
self.log.debug("Spark Job Creation Response: %s", self.spark_obj_spec)
# Wait for the driver pod to come alive
self.pod_spec = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(
labels=self.spark_obj_spec["spec"]["driver"].get("labels"),
name=self.spark_obj_spec["metadata"]["name"] + "-driver",
namespace=self.namespace,
)
)
curr_time = dt.now()
while self.spark_job_not_running(self.spark_obj_spec):
self.log.warning(
"Spark job submitted but not yet started. job_id: %s",
self.spark_obj_spec["metadata"]["name"],
)
self.check_pod_start_failure()
delta = dt.now() - curr_time
if delta.total_seconds() >= startup_timeout:
pod_status = self.pod_manager.read_pod(self.pod_spec).status.container_statuses
raise AirflowException(f"Job took too long to start. pod status: {pod_status}")
time.sleep(10)
except Exception as e:
self.log.exception("Exception when attempting to create spark job")
raise e
return self.pod_spec, self.spark_obj_spec
def spark_job_not_running(self, spark_obj_spec):
"""Test if spark_obj_spec has not started."""
spark_job_info = self.custom_obj_api.get_namespaced_custom_object_status(
group=self.api_group,
version=self.api_version,
namespace=self.namespace,
name=spark_obj_spec["metadata"]["name"],
plural=self.plural,
)
driver_state = spark_job_info.get("status", {}).get("applicationState", {}).get("state", "SUBMITTED")
if driver_state == CustomObjectStatus.FAILED:
err = spark_job_info.get("status", {}).get("applicationState", {}).get("errorMessage", "N/A")
with contextlib.suppress(Exception):
self.pod_manager.fetch_container_logs(
pod=self.pod_spec, container_name="spark-kubernetes-driver"
)
raise AirflowException(f"Spark Job Failed. Error stack: {err}")
return driver_state == CustomObjectStatus.SUBMITTED
def check_pod_start_failure(self):
try:
waiting_status = (
self.pod_manager.read_pod(self.pod_spec).status.container_statuses[0].state.waiting
)
waiting_reason = waiting_status.reason
waiting_message = waiting_status.message
except Exception:
return
if waiting_reason not in ("ContainerCreating", "PodInitializing"):
raise AirflowException(f"Spark Job Failed. Status: {waiting_reason}, Error: {waiting_message}")
def delete_spark_job(self, spark_job_name=None):
"""Delete spark job."""
spark_job_name = spark_job_name or self.spark_obj_spec.get("metadata", {}).get("name")
if not spark_job_name:
self.log.warning("Spark job not found: %s", spark_job_name)
return
try:
self.custom_obj_api.delete_namespaced_custom_object(
group=self.api_group,
version=self.api_version,
namespace=self.namespace,
plural=self.plural,
name=spark_job_name,
)
except ApiException as e:
# If the pod is already deleted
if str(e.status) != "404":
raise
| CustomObjectLauncher |
python | django__django | tests/one_to_one/models.py | {
"start": 1630,
"end": 1897
} | class ____(models.Model):
link1 = models.OneToOneField(Place, models.CASCADE)
link2 = models.OneToOneField(ManualPrimaryKey, models.CASCADE)
name = models.CharField(max_length=50)
def __str__(self):
return "Multimodel %s" % self.name
| MultiModel |
python | walkccc__LeetCode | solutions/870. Advantage Shuffle/870.py | {
"start": 42,
"end": 323
} | class ____:
def advantageCount(self, nums1: list[int], nums2: list[int]) -> list[int]:
sl = SortedList(nums1)
for i, num in enumerate(nums2):
index = 0 if sl[-1] <= num else sl.bisect_right(num)
nums1[i] = sl[index]
del sl[index]
return nums1
| Solution |
python | pennersr__django-allauth | allauth/socialaccount/providers/strava/views.py | {
"start": 181,
"end": 938
} | class ____(OAuth2Adapter):
provider_id = "strava"
access_token_url = "https://www.strava.com/oauth/token" # nosec
authorize_url = "https://www.strava.com/oauth/authorize"
profile_url = "https://www.strava.com/api/v3/athlete"
def complete_login(self, request, app, token, **kwargs):
headers = {"Authorization": "Bearer {0}".format(token.token)}
resp = (
get_adapter().get_requests_session().get(self.profile_url, headers=headers)
)
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(StravaOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(StravaOAuth2Adapter)
| StravaOAuth2Adapter |
python | google__pytype | pytype/tests/test_operators1.py | {
"start": 110,
"end": 6830
} | class ____(test_base.BaseTest, test_utils.OperatorsTestMixin):
"""Tests for operators on concrete values (no unknowns)."""
def test_add(self):
self.check_expr("x + y", ["x=1", "y=2"], "int")
self.check_expr("x + y", ["x=1.0", "y=2"], "float")
self.check_expr("x + y", ["x=1", "y=2.0"], "float")
self.check_expr("x + y", ["x=1.1", "y=2.1"], "float")
def test_add2(self):
# split out from test_add for better sharding
self.check_expr("x + y", ["x=1", "y=2j"], "complex")
self.check_expr("x + y", ["x=1.0", "y=2j"], "complex")
self.check_expr("x + y", ["x=2j", "y=1"], "complex")
self.check_expr("x + y", ["x=3+2j", "y=1.0"], "complex")
self.check_expr("x + y", ["x=1j", "y=2j"], "complex")
def test_add3(self):
# split out from test_add for better sharding
self.check_expr("x + y", ["x='1'", "y='2'"], "str")
self.check_expr("x + y", ["x=[1]", "y=[2]"], "list[int]")
self.check_expr("x + y", ["a=1", "x=[a,a,a]", "y=[a,a,a]"], "list[int]")
self.check_expr("x + y", ["a=1", "x=[a,a,a]", "y=[]"], "list[int]")
self.check_expr("x + y", ["a=1", "x=[]", "y=[a,a,a]"], "list[int]")
def test_add4(self):
# split out from test_add for better sharding
self.check_expr("x + y", ["x=[]", "y=[]"], "list[nothing]")
self.check_expr("x + y", ["x=[1]", "y=['abc']"], "list[int | str]")
self.check_expr("x + y", ["x=(1,)", "y=(2,)"], "tuple[int, int]")
self.check_expr("x + y", ["x=(1,)", "y=(2.0,)"], "tuple[int, float]")
def test_and(self):
self.check_expr("x & y", ["x=3", "y=5"], "int")
self.check_expr("x & y", ["x={1}", "y={1, 2}"], "set[int]")
self.check_expr("x & y", ["x={1}", "y={1.2}"], "set[int]")
self.check_expr("x & y", ["x={1, 2}", "y=set([1])"], "set[int]")
self.check_expr("x & y", ["x=1", "y=2"], "int")
def test_frozenset_ops(self):
self.check_expr(
"x & y", ["x=frozenset()", "y=frozenset()"], "frozenset[nothing]"
)
self.check_expr(
"x - y", ["x=frozenset()", "y=frozenset()"], "frozenset[nothing]"
)
self.check_expr(
"x | y",
["x=frozenset([1.0])", "y=frozenset([2.2])"],
"frozenset[float]",
)
def test_contains(self):
self.check_expr("x in y", ["x=[1]", "y=[1, 2]"], "bool")
self.check_expr("x in y", ["x='ab'", "y='abcd'"], "bool")
self.check_expr("x in y", ["x='ab'", "y=['abcd']"], "bool")
def test_div(self):
self.check_expr("x / y", ["x=1.0", "y=2"], "float")
self.check_expr("x / y", ["x=1", "y=2.0"], "float")
self.check_expr("x / y", ["x=1.1", "y=2.1"], "float")
self.check_expr("x / y", ["x=1j", "y=2j"], "complex")
def test_div2(self):
# split out from test_div for better sharding
self.check_expr("x / y", ["x=1", "y=2j"], "complex")
self.check_expr("x / y", ["x=1.0", "y=2j"], "complex")
self.check_expr("x / y", ["x=2j", "y=1j"], "complex")
self.check_expr("x / y", ["x=2j", "y=1"], "complex")
self.check_expr("x / y", ["x=3+2j", "y=1.0"], "complex")
def test_floordiv(self):
self.check_expr("x // y", ["x=1", "y=2"], "int")
self.check_expr("x // y", ["x=1.0", "y=2"], "float")
self.check_expr("x // y", ["x=1", "y=2.0"], "float")
self.check_expr("x // y", ["x=1.1", "y=2.1"], "float")
self.check_expr("x // y", ["x=1j", "y=2j"], "complex")
def test_floordiv2(self):
# split out from test_floordiv for better sharding
self.check_expr("x // y", ["x=1", "y=2j"], "complex")
self.check_expr("x // y", ["x=1.0", "y=2j"], "complex")
self.check_expr("x // y", ["x=2j", "y=1j"], "complex")
self.check_expr("x // y", ["x=2j", "y=1"], "complex")
self.check_expr("x // y", ["x=3+2j", "y=1.0"], "complex")
def test_invert(self):
self.check_expr("~x", ["x=3"], "int")
self.check_expr("~x", ["x=False"], "int")
def test_lshift(self):
self.check_expr("x << y", ["x=1", "y=2"], "int")
def test_rshift(self):
self.check_expr("x >> y", ["x=1", "y=2"], "int")
def test_sub(self):
self.check_expr("x - y", ["x=1", "y=2"], "int")
self.check_expr("x - y", ["x=1.0", "y=2"], "float")
self.check_expr("x - y", ["x=1", "y=2.0"], "float")
self.check_expr("x - y", ["x=1.1", "y=2.1"], "float")
def test_sub2(self):
# split out from test_sub for better sharding
self.check_expr("x - y", ["x=1j", "y=2j"], "complex")
self.check_expr("x - y", ["x={1}", "y={1, 2}"], "set[int]")
self.check_expr("x - y", ["x={1}", "y={1.2}"], "set[int]")
self.check_expr("x - y", ["x={1, 2}", "y=set([1])"], "set[int]")
def test_sub_frozenset(self):
self.check_expr("x - y", ["x={1, 2}", "y=frozenset([1.0])"], "set[int]")
def test_mod(self):
self.check_expr("x % y", ["x=1", "y=2"], "int")
self.check_expr("x % y", ["x=1.5", "y=2.5"], "float")
self.check_expr("x % y", ["x='%r'", "y=set()"], "str")
def test_mul(self):
self.check_expr("x * y", ["x=1", "y=2"], "int")
self.check_expr("x * y", ["x=1", "y=2.1"], "float")
self.check_expr("x * y", ["x=1+2j", "y=2.1+3.4j"], "complex")
self.check_expr("x * y", ["x='x'", "y=3"], "str")
self.check_expr("x * y", ["x=3", "y='x'"], "str")
def test_mul2(self):
# split out from test_mul for better sharding
self.check_expr("x * y", ["x=[1, 2]", "y=3"], "list[int]")
self.check_expr("x * y", ["x=99", "y=[1.0, 2]"], "list[int | float]")
self.check_expr("x * y", ["x=(1, 2)", "y=3"], "tuple[int, ...]")
self.check_expr("x * y", ["x=0", "y=(1, 2.0)"], "tuple[int | float, ...]")
def test_neg(self):
self.check_expr("-x", ["x=1"], "int")
self.check_expr("-x", ["x=1.5"], "float")
self.check_expr("-x", ["x=1j"], "complex")
def test_or(self):
self.check_expr("x | y", ["x=1", "y=2"], "int")
self.check_expr("x | y", ["x={1}", "y={2}"], "set[int]")
def test_pos(self):
self.check_expr("+x", ["x=1"], "int")
self.check_expr("+x", ["x=1.5"], "float")
self.check_expr("+x", ["x=2 + 3.1j"], "complex")
def test_pow(self):
self.check_expr("x ** y", ["x=1", "y=2"], "int | float")
self.check_expr("x ** y", ["x=1", "y=-2"], "int | float")
self.check_expr("x ** y", ["x=1.0", "y=2"], "float")
self.check_expr("x ** y", ["x=1", "y=2.0"], "float")
self.check_expr("x ** y", ["x=1.1", "y=2.1"], "float")
self.check_expr("x ** y", ["x=1j", "y=2j"], "complex")
def test_xor(self):
self.check_expr("x ^ y", ["x=1", "y=2"], "int")
self.check_expr("x ^ y", ["x={1}", "y={2}"], "set[int]")
def test_add_type_parameter_instance(self):
self.Check("""
from typing import Union
v = None # type: Union[str]
d = {v: 42}
for k, _ in sorted(d.items()):
k + " as "
""")
| ConcreteTest |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/pygments/formatters/svg.py | {
"start": 726,
"end": 7174
} | class ____(Formatter):
"""
Format tokens as an SVG graphics file. This formatter is still experimental.
Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
coordinates containing ``<tspan>`` elements with the individual token styles.
By default, this formatter outputs a full SVG document including doctype
declaration and the ``<svg>`` root element.
.. versionadded:: 0.9
Additional options accepted:
`nowrap`
Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
don't add a XML declaration and a doctype. If true, the `fontfamily`
and `fontsize` options are ignored. Defaults to ``False``.
`fontfamily`
The value to give the wrapping ``<g>`` element's ``font-family``
attribute, defaults to ``"monospace"``.
`fontsize`
The value to give the wrapping ``<g>`` element's ``font-size``
attribute, defaults to ``"14px"``.
`linenos`
If ``True``, add line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenowidth`
Maximum width devoted to line numbers (default: ``3*ystep``, sufficient
for up to 4-digit line numbers. Increase width for longer code blocks).
`xoffset`
Starting offset in X direction, defaults to ``0``.
`yoffset`
Starting offset in Y direction, defaults to the font size if it is given
in pixels, or ``20`` else. (This is necessary since text coordinates
refer to the text baseline, not the top edge.)
`ystep`
Offset to add to the Y coordinate for each subsequent line. This should
roughly be the text size plus 5. It defaults to that value if the text
size is given in pixels, or ``25`` else.
`spacehack`
Convert spaces in the source to `` ``, which are non-breaking
spaces. SVG provides the ``xml:space`` attribute to control how
whitespace inside tags is handled, in theory, the ``preserve`` value
could be used to keep all whitespace as-is. However, many current SVG
viewers don't obey that rule, so this option is provided as a workaround
and defaults to ``True``.
"""
name = 'SVG'
aliases = ['svg']
filenames = ['*.svg']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.fontfamily = options.get('fontfamily', 'monospace')
self.fontsize = options.get('fontsize', '14px')
self.xoffset = get_int_opt(options, 'xoffset', 0)
fs = self.fontsize.strip()
if fs.endswith('px'):
fs = fs[:-2].strip()
try:
int_fs = int(fs)
except ValueError:
int_fs = 20
self.yoffset = get_int_opt(options, 'yoffset', int_fs)
self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
self.spacehack = get_bool_opt(options, 'spacehack', True)
self.linenos = get_bool_opt(options,'linenos',False)
self.linenostart = get_int_opt(options,'linenostart',1)
self.linenostep = get_int_opt(options,'linenostep',1)
self.linenowidth = get_int_opt(options,'linenowidth', 3*self.ystep)
self._stylecache = {}
def format_unencoded(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
For our implementation we put all lines in their own 'line group'.
"""
x = self.xoffset
y = self.yoffset
if not self.nowrap:
if self.encoding:
outfile.write(f'<?xml version="1.0" encoding="{self.encoding}"?>\n')
else:
outfile.write('<?xml version="1.0"?>\n')
outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
'"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
'svg10.dtd">\n')
outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
outfile.write(f'<g font-family="{self.fontfamily}" font-size="{self.fontsize}">\n')
counter = self.linenostart
counter_step = self.linenostep
counter_style = self._get_style(Comment)
line_x = x
if self.linenos:
if counter % counter_step == 0:
outfile.write(f'<text x="{x+self.linenowidth}" y="{y}" {counter_style} text-anchor="end">{counter}</text>')
line_x += self.linenowidth + self.ystep
counter += 1
outfile.write(f'<text x="{line_x}" y="{y}" xml:space="preserve">')
for ttype, value in tokensource:
style = self._get_style(ttype)
tspan = style and '<tspan' + style + '>' or ''
tspanend = tspan and '</tspan>' or ''
value = escape_html(value)
if self.spacehack:
value = value.expandtabs().replace(' ', ' ')
parts = value.split('\n')
for part in parts[:-1]:
outfile.write(tspan + part + tspanend)
y += self.ystep
outfile.write('</text>\n')
if self.linenos and counter % counter_step == 0:
outfile.write(f'<text x="{x+self.linenowidth}" y="{y}" text-anchor="end" {counter_style}>{counter}</text>')
counter += 1
outfile.write(f'<text x="{line_x}" y="{y}" ' 'xml:space="preserve">')
outfile.write(tspan + parts[-1] + tspanend)
outfile.write('</text>')
if not self.nowrap:
outfile.write('</g></svg>\n')
def _get_style(self, tokentype):
if tokentype in self._stylecache:
return self._stylecache[tokentype]
otokentype = tokentype
while not self.style.styles_token(tokentype):
tokentype = tokentype.parent
value = self.style.style_for_token(tokentype)
result = ''
if value['color']:
result = ' fill="#' + value['color'] + '"'
if value['bold']:
result += ' font-weight="bold"'
if value['italic']:
result += ' font-style="italic"'
self._stylecache[otokentype] = result
return result
| SvgFormatter |
python | tiangolo__fastapi | docs_src/path_operation_configuration/tutorial005.py | {
"start": 109,
"end": 741
} | class ____(BaseModel):
name: str
description: Union[str, None] = None
price: float
tax: Union[float, None] = None
tags: Set[str] = set()
@app.post(
"/items/",
response_model=Item,
summary="Create an item",
response_description="The created item",
)
async def create_item(item: Item):
"""
Create an item with all the information:
- **name**: each item must have a name
- **description**: a long description
- **price**: required
- **tax**: if the item doesn't have tax, you can omit this
- **tags**: a set of unique tag strings for this item
"""
return item
| Item |
python | getsentry__sentry | src/sentry/search/events/builder/profile_functions.py | {
"start": 5003,
"end": 9489
} | class ____(ProfileFunctionsTimeseriesQueryBuilder):
config_class = ProfileFunctionsDatasetConfig
def __init__(
self,
dataset: Dataset,
params: ParamsType,
interval: int,
top_events: list[dict[str, Any]],
snuba_params: SnubaParams | None = None,
other: bool = False,
query: str | None = None,
selected_columns: list[str] | None = None,
timeseries_columns: list[str] | None = None,
equations: list[str] | None = None,
config: QueryBuilderConfig | None = None,
limit: int | None = 10000,
):
selected_columns = [] if selected_columns is None else selected_columns
timeseries_columns = [] if timeseries_columns is None else timeseries_columns
_, timeseries_functions = categorize_columns(timeseries_columns)
super().__init__(
dataset,
params,
snuba_params=snuba_params,
interval=interval,
query=query,
selected_columns=list(set(selected_columns + timeseries_functions)),
equations=None, # TODO: equations are not supported at this time
limit=limit,
config=config,
)
self.fields = [self.tag_to_prefixed_map.get(c, c) for c in selected_columns]
if (conditions := self.resolve_top_event_conditions(top_events, other)) is not None:
self.where.append(conditions)
if not other:
self.groupby.extend(
[column for column in self.columns if column not in self.aggregates]
)
@property
def translated_groupby(self) -> list[str]:
"""Get the names of the groupby columns to create the series names"""
translated = []
for groupby in self.groupby:
if groupby == self.time_column:
continue
if isinstance(groupby, (CurriedFunction, AliasedExpression)):
assert groupby.alias is not None
translated.append(groupby.alias)
else:
translated.append(groupby.name)
# sorted so the result key is consistent
return sorted(translated)
def is_aggregate_field(self, field: str) -> bool:
resolved = self.resolve_column(self.prefixed_to_tag_map.get(field, field))
return resolved in self.aggregates
def resolve_top_event_conditions(
self, top_functions: list[dict[str, Any]], other: bool
) -> WhereType | None:
assert not other, "Other is not supported" # TODO: support other
# we only want to create conditions on the non aggregate fields
fields = [field for field in self.fields if not self.is_aggregate_field(field)]
conditions = []
# if the project id is in the query, we can further narrow down the
# list of projects to only the set that matches the top functions
for field in fields:
if field in ["project", "project.id"] and not other:
project_condition = [
condition
for condition in self.where
if isinstance(condition, Condition)
and condition.lhs == self.column("project_id")
][0]
self.where.remove(project_condition)
if field == "project":
projects = list(
{
self.params.project_slug_map[function["project"]]
for function in top_functions
}
)
else:
projects = list({function["project.id"] for function in top_functions})
self.where.append(Condition(self.column("project_id"), Op.IN, projects))
for function in top_functions:
terms = [
SearchFilter(SearchKey(field), "=", SearchValue(function.get(field) or ""))
for field in fields
]
function_condition = self.resolve_where(terms)
if len(function_condition) > 1:
conditions.append(And(function_condition))
elif len(function_condition) == 1:
conditions.append(function_condition[0])
if len(conditions) > 1:
return Or(conditions=conditions)
elif len(conditions) == 1:
return conditions[0]
return None
| ProfileTopFunctionsTimeseriesQueryBuilder |
python | Textualize__textual | src/textual/css/stylesheet.py | {
"start": 1047,
"end": 1310
} | class ____(StylesheetError):
"""Raised when the stylesheet could not be parsed."""
def __init__(self, errors: StylesheetErrors) -> None:
self.errors = errors
def __rich__(self) -> RenderableType:
return self.errors
| StylesheetParseError |
python | kamyu104__LeetCode-Solutions | Python/predict-the-winner.py | {
"start": 31,
"end": 486
} | class ____(object):
def PredictTheWinner(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if len(nums) % 2 == 0 or len(nums) == 1:
return True
dp = [0] * len(nums)
for i in reversed(xrange(len(nums))):
dp[i] = nums[i]
for j in xrange(i+1, len(nums)):
dp[j] = max(nums[i] - dp[j], nums[j] - dp[j - 1])
return dp[-1] >= 0
| Solution |
python | pydantic__pydantic | pydantic/v1/types.py | {
"start": 13028,
"end": 14606
} | class ____(set): # type: ignore
# Needed for pydantic to detect that this is a set
__origin__ = set
__args__: Set[Type[T]] # type: ignore
min_items: Optional[int] = None
max_items: Optional[int] = None
item_type: Type[T] # type: ignore
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.set_length_validator
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items)
@classmethod
def set_length_validator(cls, v: 'Optional[Set[T]]') -> 'Optional[Set[T]]':
if v is None:
return None
v = set_validator(v)
v_len = len(v)
if cls.min_items is not None and v_len < cls.min_items:
raise errors.SetMinLengthError(limit_value=cls.min_items)
if cls.max_items is not None and v_len > cls.max_items:
raise errors.SetMaxLengthError(limit_value=cls.max_items)
return v
def conset(item_type: Type[T], *, min_items: Optional[int] = None, max_items: Optional[int] = None) -> Type[Set[T]]:
# __args__ is needed to conform to typing generics api
namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': [item_type]}
# We use new_class to be able to deal with Generic types
return new_class('ConstrainedSetValue', (ConstrainedSet,), {}, lambda ns: ns.update(namespace))
# This types superclass should be FrozenSet[T], but cython chokes on that...
| ConstrainedSet |
python | spyder-ide__spyder | spyder/plugins/editor/widgets/window.py | {
"start": 19744,
"end": 26912
} | class ____(QSplitter):
def __init__(self):
QSplitter.__init__(self)
self._plugin = None
self.dock_action = None
self.undock_action = None
self.close_action = None
self.windowwidget = None
self.lock_unlock_action = None
menu_actions = []
self.editorstacks = []
self.editorwindows = []
self.last_focused_editorstack = {} # fake
self.find_widget = FindReplace(self, enable_replace=True)
self.outlineexplorer = OutlineExplorerWidget(None, self, self)
self.outlineexplorer.edit_goto.connect(self.go_to_file)
self.editor_splitter = EditorSplitter(self, self, menu_actions,
first=True,
use_switcher=False)
editor_widgets = QWidget(self)
editor_layout = QVBoxLayout()
editor_layout.setSpacing(0)
editor_layout.setContentsMargins(0, 0, 0, 0)
editor_widgets.setLayout(editor_layout)
editor_layout.addWidget(self.editor_splitter)
editor_layout.addWidget(self.find_widget)
self.setContentsMargins(0, 0, 0, 0)
self.addWidget(editor_widgets)
self.addWidget(self.outlineexplorer)
self.setStretchFactor(0, 5)
self.setStretchFactor(1, 1)
self.menu_actions = menu_actions
self.toolbar_list = None
self.menu_list = None
self.setup_window([], [])
def go_to_file(self, fname, lineno, text='', start_column=None):
editorstack = self.editorstacks[0]
editorstack.set_current_filename(str(fname))
editor = editorstack.get_current_editor()
editor.go_to_line(lineno, word=text, start_column=start_column)
def closeEvent(self, event):
for win in self.editorwindows[:]:
win.close()
logger.debug("%d: %r" % (len(self.editorwindows), self.editorwindows))
logger.debug("%d: %r" % (len(self.editorstacks), self.editorstacks))
event.accept()
def load(self, fname):
QApplication.processEvents()
editorstack = self.editorstacks[0]
editorstack.load(fname)
editorstack.analyze_script()
def register_editorstack(self, editorstack):
logger.debug(
"FakeEditorMainWidget.register_editorstack: %r" % editorstack
)
self.editorstacks.append(editorstack)
if self.isAncestorOf(editorstack):
# editorstack is a child of the EditorMainWidget
editorstack.set_closable(len(self.editorstacks) > 1)
editorstack.set_outlineexplorer(self.outlineexplorer)
editorstack.set_find_widget(self.find_widget)
oe_btn = create_toolbutton(self)
editorstack.add_corner_widgets_to_tabbar([5, oe_btn])
font = QFont("Courier New")
font.setPointSize(10)
editorstack.set_default_font(font, color_scheme='Spyder')
editorstack.sig_close_file.connect(self.close_file_in_all_editorstacks)
editorstack.file_saved.connect(self.file_saved_in_editorstack)
editorstack.file_renamed_in_data.connect(
self.file_renamed_in_data_in_editorstack)
editorstack.plugin_load.connect(self.load)
def unregister_editorstack(self, editorstack):
logger.debug(
"EditorMainWidget.unregister_editorstack: %r" % editorstack
)
self.editorstacks.pop(self.editorstacks.index(editorstack))
def clone_editorstack(self, editorstack):
editorstack.clone_from(self.editorstacks[0])
def setup_window(self, toolbar_list, menu_list):
self.toolbar_list = toolbar_list
self.menu_list = menu_list
def create_new_window(self):
window = EditorMainWindow(self, self.menu_actions,
self.toolbar_list, self.menu_list,
show_fullpath=False, show_all_files=False,
group_cells=True, show_comments=True,
sort_files_alphabetically=False)
window.resize(self.size())
window.show()
self.register_editorwindow(window)
window.destroyed.connect(lambda: self.unregister_editorwindow(window))
def register_editorwindow(self, window):
logger.debug("register_editorwindowQObject*: %r" % window)
self.editorwindows.append(window)
def unregister_editorwindow(self, window):
logger.debug("unregister_editorwindow: %r" % window)
self.editorwindows.pop(self.editorwindows.index(window))
def get_focus_widget(self):
pass
@Slot(str, str)
def close_file_in_all_editorstacks(self, editorstack_id_str, filename):
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.blockSignals(True)
index = editorstack.get_index_from_filename(filename)
editorstack.close_file(index, force=True)
editorstack.blockSignals(False)
# This method is never called in this plugin example. It's here only
# to show how to use the file_saved signal (see above).
@Slot(str, str, str)
def file_saved_in_editorstack(self, editorstack_id_str,
original_filename, filename):
"""A file was saved in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.file_saved_in_other_editorstack(original_filename,
filename)
# This method is never called in this plugin example. It's here only
# to show how to use the file_saved signal (see above).
@Slot(str, str, str)
def file_renamed_in_data_in_editorstack(
self, original_filename, filename, editorstack_id_str
):
"""A file was renamed in data in editorstack, this notifies others"""
for editorstack in self.editorstacks:
if str(id(editorstack)) != editorstack_id_str:
editorstack.rename_in_data(original_filename, filename)
def _get_color_scheme(self):
pass
def test():
from spyder.utils.qthelpers import qapplication
from spyder.config.base import get_module_path
spyder_dir = get_module_path('spyder')
app = qapplication(test_time=8)
test = EditorMainWidgetExample()
test.resize(900, 700)
test.show()
import time
t0 = time.time()
test.load(osp.join(spyder_dir, "widgets", "collectionseditor.py"))
test.load(osp.join(spyder_dir, "plugins", "editor", "widgets",
"window.py"))
test.load(osp.join(spyder_dir, "plugins", "explorer", "widgets",
'explorer.py'))
test.load(osp.join(spyder_dir, "plugins", "editor", "widgets",
"codeeditor", "codeeditor.py"))
print("Elapsed time: %.3f s" % (time.time()-t0)) # spyder: test-skip
sys.exit(app.exec_())
if __name__ == "__main__":
test()
| EditorMainWidgetExample |
python | justquick__django-activity-stream | actstream/drf/serializers.py | {
"start": 2274,
"end": 2607
} | class ____(DEFAULT_SERIALIZER):
"""
Serializer for actstream.Action models in the activity feeds
"""
actor = get_grf()
target = get_grf()
action_object = get_grf()
class Meta:
model = Action
fields = 'id verb public description timestamp actor target action_object'.split()
| ActionSerializer |
python | pydata__xarray | xarray/core/_typed_ops.py | {
"start": 678,
"end": 6733
} | class ____:
__slots__ = ()
def _binary_op(
self, other: DtCompatible, f: Callable, reflexive: bool = False
) -> Self:
raise NotImplementedError
def __add__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.add)
def __sub__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.sub)
def __mul__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.mul)
def __pow__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.pow)
def __truediv__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.truediv)
def __floordiv__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.floordiv)
def __mod__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.mod)
def __and__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.and_)
def __xor__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.xor)
def __or__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.or_)
def __lshift__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.lshift)
def __rshift__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.rshift)
def __lt__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.lt)
def __le__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.le)
def __gt__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.gt)
def __ge__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.ge)
def __eq__(self, other: DtCompatible) -> Self: # type:ignore[override]
return self._binary_op(other, nputils.array_eq)
def __ne__(self, other: DtCompatible) -> Self: # type:ignore[override]
return self._binary_op(other, nputils.array_ne)
# When __eq__ is defined but __hash__ is not, then an object is unhashable,
# and it should be declared as follows:
__hash__: None # type:ignore[assignment]
def __radd__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.add, reflexive=True)
def __rsub__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.sub, reflexive=True)
def __rmul__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.mul, reflexive=True)
def __rpow__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.pow, reflexive=True)
def __rtruediv__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.truediv, reflexive=True)
def __rfloordiv__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.floordiv, reflexive=True)
def __rmod__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.mod, reflexive=True)
def __rand__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.and_, reflexive=True)
def __rxor__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.xor, reflexive=True)
def __ror__(self, other: DtCompatible) -> Self:
return self._binary_op(other, operator.or_, reflexive=True)
def _unary_op(self, f: Callable, *args: Any, **kwargs: Any) -> Self:
raise NotImplementedError
def __neg__(self) -> Self:
return self._unary_op(operator.neg)
def __pos__(self) -> Self:
return self._unary_op(operator.pos)
def __abs__(self) -> Self:
return self._unary_op(operator.abs)
def __invert__(self) -> Self:
return self._unary_op(operator.invert)
def round(self, *args: Any, **kwargs: Any) -> Self:
return self._unary_op(ops.round_, *args, **kwargs)
def argsort(self, *args: Any, **kwargs: Any) -> Self:
return self._unary_op(ops.argsort, *args, **kwargs)
def conj(self, *args: Any, **kwargs: Any) -> Self:
return self._unary_op(ops.conj, *args, **kwargs)
def conjugate(self, *args: Any, **kwargs: Any) -> Self:
return self._unary_op(ops.conjugate, *args, **kwargs)
__add__.__doc__ = operator.add.__doc__
__sub__.__doc__ = operator.sub.__doc__
__mul__.__doc__ = operator.mul.__doc__
__pow__.__doc__ = operator.pow.__doc__
__truediv__.__doc__ = operator.truediv.__doc__
__floordiv__.__doc__ = operator.floordiv.__doc__
__mod__.__doc__ = operator.mod.__doc__
__and__.__doc__ = operator.and_.__doc__
__xor__.__doc__ = operator.xor.__doc__
__or__.__doc__ = operator.or_.__doc__
__lshift__.__doc__ = operator.lshift.__doc__
__rshift__.__doc__ = operator.rshift.__doc__
__lt__.__doc__ = operator.lt.__doc__
__le__.__doc__ = operator.le.__doc__
__gt__.__doc__ = operator.gt.__doc__
__ge__.__doc__ = operator.ge.__doc__
__eq__.__doc__ = nputils.array_eq.__doc__
__ne__.__doc__ = nputils.array_ne.__doc__
__radd__.__doc__ = operator.add.__doc__
__rsub__.__doc__ = operator.sub.__doc__
__rmul__.__doc__ = operator.mul.__doc__
__rpow__.__doc__ = operator.pow.__doc__
__rtruediv__.__doc__ = operator.truediv.__doc__
__rfloordiv__.__doc__ = operator.floordiv.__doc__
__rmod__.__doc__ = operator.mod.__doc__
__rand__.__doc__ = operator.and_.__doc__
__rxor__.__doc__ = operator.xor.__doc__
__ror__.__doc__ = operator.or_.__doc__
__neg__.__doc__ = operator.neg.__doc__
__pos__.__doc__ = operator.pos.__doc__
__abs__.__doc__ = operator.abs.__doc__
__invert__.__doc__ = operator.invert.__doc__
round.__doc__ = ops.round_.__doc__
argsort.__doc__ = ops.argsort.__doc__
conj.__doc__ = ops.conj.__doc__
conjugate.__doc__ = ops.conjugate.__doc__
| DataTreeOpsMixin |
python | streamlit__streamlit | lib/tests/streamlit/web/server/server_util_test.py | {
"start": 884,
"end": 5208
} | class ____(unittest.TestCase):
def test_allowlisted_origins_empty_string(self):
with testutil.patch_config_options({"server.corsAllowedOrigins": []}):
assert server_util.allowlisted_origins() == set()
def test_allowlisted_origins_singleton(self):
with testutil.patch_config_options(
{"server.corsAllowedOrigins": ["http://example.com"]}
):
assert server_util.allowlisted_origins() == {"http://example.com"}
def test_allowlisted_origins_multiple_entries(self):
with testutil.patch_config_options(
{
"server.corsAllowedOrigins": [
"http://example.com",
"https://streamlit.io",
]
}
):
assert server_util.allowlisted_origins() == {
"http://example.com",
"https://streamlit.io",
}
def test_allowlisted_origins_string_with_whitespace(self):
with testutil.patch_config_options(
{
"server.corsAllowedOrigins": [
" http://example.com ",
" https://streamlit.io ",
]
}
):
assert server_util.allowlisted_origins() == {
"http://example.com",
"https://streamlit.io",
}
def test_is_url_from_allowed_origins_allowed_domains(self):
with testutil.patch_config_options(
{
"server.corsAllowedOrigins": [
"http://example.com",
"https://streamlit.io",
]
}
):
for origin in [
"localhost",
"127.0.0.1",
"http://example.com",
"https://streamlit.io",
]:
assert server_util.is_url_from_allowed_origins(origin)
assert not server_util.is_url_from_allowed_origins(
"http://some-other-origin.com"
)
def test_is_url_from_allowed_origins_CORS_off(self):
with patch(
"streamlit.web.server.server_util.config.get_option", side_effect=[False]
):
assert server_util.is_url_from_allowed_origins("does not matter")
def test_is_url_from_allowed_origins_browser_serverAddress(self):
with (
patch(
"streamlit.web.server.server_util.config.is_manually_set",
side_effect=[True],
),
patch(
"streamlit.web.server.server_util.config.get_option",
side_effect=[True, [], "browser.server.address"],
),
):
assert server_util.is_url_from_allowed_origins("browser.server.address")
@parameterized.expand(
[
(None, 8501, "http://the_ip_address:8501"),
(None, 9988, "http://the_ip_address:9988"),
("foo", 8501, "http://the_ip_address:8501/foo"),
("foo/", 8501, "http://the_ip_address:8501/foo"),
("/foo/bar/", 8501, "http://the_ip_address:8501/foo/bar"),
("/foo/bar/", 9988, "http://the_ip_address:9988/foo/bar"),
]
)
def test_get_url(self, base_url: str | None, port: int, expected_url: str):
options = {"server.headless": False, "global.developmentMode": False}
if base_url:
options["server.baseUrlPath"] = base_url
options["server.port"] = port
mock_get_option = testutil.build_mock_config_get_option(options)
with patch.object(config, "get_option", new=mock_get_option):
actual_url = server_util.get_url("the_ip_address")
assert expected_url == actual_url
def test_make_url_path_regex(self):
assert (
server_util.make_url_path_regex("foo") == r"^/foo/?$"
) # defaults to optional
assert (
server_util.make_url_path_regex("foo", trailing_slash="optional")
== r"^/foo/?$"
)
assert (
server_util.make_url_path_regex("foo", trailing_slash="required")
== r"^/foo/$"
)
assert (
server_util.make_url_path_regex("foo", trailing_slash="prohibited")
== r"^/foo$"
)
| ServerUtilTest |
python | pytorch__pytorch | torch/package/package_importer.py | {
"start": 28802,
"end": 28948
} | class ____(_PathNode):
__slots__ = ["source_file"]
def __init__(self, source_file: str):
self.source_file = source_file
| _ModuleNode |
python | redis__redis-py | redis/sentinel.py | {
"start": 6591,
"end": 15013
} | class ____(SentinelCommands):
"""
Redis Sentinel cluster client
>>> from redis.sentinel import Sentinel
>>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)
>>> master = sentinel.master_for('mymaster', socket_timeout=0.1)
>>> master.set('foo', 'bar')
>>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1)
>>> slave.get('foo')
b'bar'
``sentinels`` is a list of sentinel nodes. Each node is represented by
a pair (hostname, port).
``min_other_sentinels`` defined a minimum number of peers for a sentinel.
When querying a sentinel, if it doesn't meet this threshold, responses
from that sentinel won't be considered valid.
``sentinel_kwargs`` is a dictionary of connection arguments used when
connecting to sentinel instances. Any argument that can be passed to
a normal Redis connection can be specified here. If ``sentinel_kwargs`` is
not specified, any socket_timeout and socket_keepalive options specified
in ``connection_kwargs`` will be used.
``connection_kwargs`` are keyword arguments that will be used when
establishing a connection to a Redis server.
"""
def __init__(
self,
sentinels,
min_other_sentinels=0,
sentinel_kwargs=None,
force_master_ip=None,
**connection_kwargs,
):
# if sentinel_kwargs isn't defined, use the socket_* options from
# connection_kwargs
if sentinel_kwargs is None:
sentinel_kwargs = {
k: v for k, v in connection_kwargs.items() if k.startswith("socket_")
}
self.sentinel_kwargs = sentinel_kwargs
self.sentinels = [
Redis(hostname, port, **self.sentinel_kwargs)
for hostname, port in sentinels
]
self.min_other_sentinels = min_other_sentinels
self.connection_kwargs = connection_kwargs
self._force_master_ip = force_master_ip
def execute_command(self, *args, **kwargs):
"""
Execute Sentinel command in sentinel nodes.
once - If set to True, then execute the resulting command on a single
node at random, rather than across the entire sentinel cluster.
"""
once = bool(kwargs.pop("once", False))
# Check if command is supposed to return the original
# responses instead of boolean value.
return_responses = bool(kwargs.pop("return_responses", False))
if once:
response = random.choice(self.sentinels).execute_command(*args, **kwargs)
if return_responses:
return [response]
else:
return True if response else False
responses = []
for sentinel in self.sentinels:
responses.append(sentinel.execute_command(*args, **kwargs))
if return_responses:
return responses
return all(responses)
def __repr__(self):
sentinel_addresses = []
for sentinel in self.sentinels:
sentinel_addresses.append(
"{host}:{port}".format_map(sentinel.connection_pool.connection_kwargs)
)
return (
f"<{type(self).__module__}.{type(self).__name__}"
f"(sentinels=[{','.join(sentinel_addresses)}])>"
)
def check_master_state(self, state, service_name):
if not state["is_master"] or state["is_sdown"] or state["is_odown"]:
return False
# Check if our sentinel doesn't see other nodes
if state["num-other-sentinels"] < self.min_other_sentinels:
return False
return True
def discover_master(self, service_name):
"""
Asks sentinel servers for the Redis master's address corresponding
to the service labeled ``service_name``.
Returns a pair (address, port) or raises MasterNotFoundError if no
master is found.
"""
collected_errors = list()
for sentinel_no, sentinel in enumerate(self.sentinels):
try:
masters = sentinel.sentinel_masters()
except (ConnectionError, TimeoutError) as e:
collected_errors.append(f"{sentinel} - {e!r}")
continue
state = masters.get(service_name)
if state and self.check_master_state(state, service_name):
# Put this sentinel at the top of the list
self.sentinels[0], self.sentinels[sentinel_no] = (
sentinel,
self.sentinels[0],
)
ip = (
self._force_master_ip
if self._force_master_ip is not None
else state["ip"]
)
return ip, state["port"]
error_info = ""
if len(collected_errors) > 0:
error_info = f" : {', '.join(collected_errors)}"
raise MasterNotFoundError(f"No master found for {service_name!r}{error_info}")
def filter_slaves(self, slaves):
"Remove slaves that are in an ODOWN or SDOWN state"
slaves_alive = []
for slave in slaves:
if slave["is_odown"] or slave["is_sdown"]:
continue
slaves_alive.append((slave["ip"], slave["port"]))
return slaves_alive
def discover_slaves(self, service_name):
"Returns a list of alive slaves for service ``service_name``"
for sentinel in self.sentinels:
try:
slaves = sentinel.sentinel_slaves(service_name)
except (ConnectionError, ResponseError, TimeoutError):
continue
slaves = self.filter_slaves(slaves)
if slaves:
return slaves
return []
def master_for(
self,
service_name,
redis_class=Redis,
connection_pool_class=SentinelConnectionPool,
**kwargs,
):
"""
Returns a redis client instance for the ``service_name`` master.
Sentinel client will detect failover and reconnect Redis clients
automatically.
A :py:class:`~redis.sentinel.SentinelConnectionPool` class is
used to retrieve the master's address before establishing a new
connection.
NOTE: If the master's address has changed, any cached connections to
the old master are closed.
By default clients will be a :py:class:`~redis.Redis` instance.
Specify a different class to the ``redis_class`` argument if you
desire something different.
The ``connection_pool_class`` specifies the connection pool to
use. The :py:class:`~redis.sentinel.SentinelConnectionPool`
will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
kwargs["is_master"] = True
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
return redis_class.from_pool(
connection_pool_class(service_name, self, **connection_kwargs)
)
def slave_for(
self,
service_name,
redis_class=Redis,
connection_pool_class=SentinelConnectionPool,
**kwargs,
):
"""
Returns redis client instance for the ``service_name`` slave(s).
A SentinelConnectionPool class is used to retrieve the slave's
address before establishing a new connection.
By default clients will be a :py:class:`~redis.Redis` instance.
Specify a different class to the ``redis_class`` argument if you
desire something different.
The ``connection_pool_class`` specifies the connection pool to use.
The SentinelConnectionPool will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
kwargs["is_master"] = False
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
return redis_class.from_pool(
connection_pool_class(service_name, self, **connection_kwargs)
)
| Sentinel |
python | numpy__numpy | numpy/linalg/tests/test_regression.py | {
"start": 301,
"end": 6796
} | class ____:
def test_eig_build(self):
# Ticket #652
rva = array([1.03221168e+02 + 0.j,
-1.91843603e+01 + 0.j,
-6.04004526e-01 + 15.84422474j,
-6.04004526e-01 - 15.84422474j,
-1.13692929e+01 + 0.j,
-6.57612485e-01 + 10.41755503j,
-6.57612485e-01 - 10.41755503j,
1.82126812e+01 + 0.j,
1.06011014e+01 + 0.j,
7.80732773e+00 + 0.j,
-7.65390898e-01 + 0.j,
1.51971555e-15 + 0.j,
-1.51308713e-15 + 0.j])
a = arange(13 * 13, dtype=float64)
a = a.reshape((13, 13))
a = a % 17
va, ve = linalg.eig(a)
va.sort()
rva.sort()
assert_array_almost_equal(va, rva)
def test_eigh_build(self):
# Ticket 662.
rvals = [68.60568999, 89.57756725, 106.67185574]
cov = array([[77.70273908, 3.51489954, 15.64602427],
[ 3.51489954, 88.97013878, -1.07431931],
[15.64602427, -1.07431931, 98.18223512]])
vals, vecs = linalg.eigh(cov)
assert_array_almost_equal(vals, rvals)
def test_svd_build(self):
# Ticket 627.
a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
m, n = a.shape
u, s, vh = linalg.svd(a)
b = dot(transpose(u[:, n:]), a)
assert_array_almost_equal(b, np.zeros((2, 2)))
def test_norm_vector_badarg(self):
# Regression for #786: Frobenius norm for vectors raises
# ValueError.
assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')
def test_lapack_endian(self):
# For bug #1482
a = array([[ 5.7998084, -2.1825367],
[-2.1825367, 9.85910595]], dtype='>f8')
b = array(a, dtype='<f8')
ap = linalg.cholesky(a)
bp = linalg.cholesky(b)
assert_array_equal(ap, bp)
def test_large_svd_32bit(self):
# See gh-4442, 64bit would require very large/slow matrices.
x = np.eye(1000, 66)
np.linalg.svd(x)
def test_svd_no_uv(self):
# gh-4733
for shape in (3, 4), (4, 4), (4, 3):
for t in float, complex:
a = np.ones(shape, dtype=t)
w = linalg.svd(a, compute_uv=False)
c = np.count_nonzero(np.absolute(w) > 0.5)
assert_equal(c, 1)
assert_equal(np.linalg.matrix_rank(a), 1)
assert_array_less(1, np.linalg.norm(a, ord=2))
w_svdvals = linalg.svdvals(a)
assert_array_almost_equal(w, w_svdvals)
def test_norm_object_array(self):
# gh-7575
testvector = np.array([np.array([0, 1]), 0, 0], dtype=object)
norm = linalg.norm(testvector)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
norm = linalg.norm(testvector, ord=1)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype != np.dtype('float64'))
norm = linalg.norm(testvector, ord=2)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
assert_raises(ValueError, linalg.norm, testvector, ord='fro')
assert_raises(ValueError, linalg.norm, testvector, ord='nuc')
assert_raises(ValueError, linalg.norm, testvector, ord=np.inf)
assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf)
assert_raises(ValueError, linalg.norm, testvector, ord=0)
assert_raises(ValueError, linalg.norm, testvector, ord=-1)
assert_raises(ValueError, linalg.norm, testvector, ord=-2)
testmatrix = np.array([[np.array([0, 1]), 0, 0],
[0, 0, 0]], dtype=object)
norm = linalg.norm(testmatrix)
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
norm = linalg.norm(testmatrix, ord='fro')
assert_array_equal(norm, [0, 1])
assert_(norm.dtype == np.dtype('float64'))
assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc')
assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf)
assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf)
assert_raises(ValueError, linalg.norm, testmatrix, ord=0)
assert_raises(ValueError, linalg.norm, testmatrix, ord=1)
assert_raises(ValueError, linalg.norm, testmatrix, ord=-1)
assert_raises(TypeError, linalg.norm, testmatrix, ord=2)
assert_raises(TypeError, linalg.norm, testmatrix, ord=-2)
assert_raises(ValueError, linalg.norm, testmatrix, ord=3)
def test_lstsq_complex_larger_rhs(self):
# gh-9891
size = 20
n_rhs = 70
G = np.random.randn(size, size) + 1j * np.random.randn(size, size)
u = np.random.randn(size, n_rhs) + 1j * np.random.randn(size, n_rhs)
b = G.dot(u)
# This should work without segmentation fault.
u_lstsq, res, rank, sv = linalg.lstsq(G, b, rcond=None)
# check results just in case
assert_array_almost_equal(u_lstsq, u)
@pytest.mark.parametrize("upper", [True, False])
def test_cholesky_empty_array(self, upper):
# gh-25840 - upper=True hung before.
res = np.linalg.cholesky(np.zeros((0, 0)), upper=upper)
assert res.size == 0
@pytest.mark.parametrize("rtol", [0.0, [0.0] * 4, np.zeros((4,))])
def test_matrix_rank_rtol_argument(self, rtol):
# gh-25877
x = np.zeros((4, 3, 2))
res = np.linalg.matrix_rank(x, rtol=rtol)
assert res.shape == (4,)
@pytest.mark.thread_unsafe(reason="test is already testing threads with openblas")
def test_openblas_threading(self):
# gh-27036
# Test whether matrix multiplication involving a large matrix always
# gives the same (correct) answer
x = np.arange(500000, dtype=np.float64)
src = np.vstack((x, -10 * x)).T
matrix = np.array([[0, 1], [1, 0]])
expected = np.vstack((-10 * x, x)).T # src @ matrix
for i in range(200):
result = src @ matrix
mismatches = (~np.isclose(result, expected)).sum()
if mismatches != 0:
assert False, ("unexpected result from matmul, "
"probably due to OpenBLAS threading issues")
| TestRegression |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_access_request_details.py | {
"start": 2178,
"end": 6139
} | class ____(APITestCase):
def test_approve_request(self) -> None:
self.login_as(user=self.user)
organization = self.create_organization(name="foo", owner=self.user)
user = self.create_user("bar@example.com")
member = self.create_member(organization=organization, user=user, role="member")
team = self.create_team(name="foo", organization=organization)
access_request = OrganizationAccessRequest.objects.create(member=member, team=team)
path = reverse(
"sentry-api-0-organization-access-request-details",
args=[organization.slug, access_request.id],
)
self.login_as(self.user)
resp = self.client.put(path, data={"isApproved": 1})
assert resp.status_code == 204
assert OrganizationMemberTeam.objects.filter(
organizationmember=member, team=team, is_active=True
).exists()
assert not OrganizationAccessRequest.objects.filter(id=access_request.id).exists()
def test_deny_request(self) -> None:
self.login_as(user=self.user)
organization = self.create_organization(name="foo", owner=self.user)
user = self.create_user("bar@example.com")
member = self.create_member(organization=organization, user=user, role="member")
team = self.create_team(name="foo", organization=organization)
access_request = OrganizationAccessRequest.objects.create(member=member, team=team)
path = reverse(
"sentry-api-0-organization-access-request-details",
args=[organization.slug, access_request.id],
)
self.login_as(self.user)
resp = self.client.put(path, data={"isApproved": 0})
assert resp.status_code == 204
assert not OrganizationMemberTeam.objects.filter(
organizationmember=member, team=team, is_active=True
).exists()
assert not OrganizationAccessRequest.objects.filter(id=access_request.id).exists()
def test_team_admin_can_approve(self) -> None:
self.login_as(user=self.user)
organization = self.create_organization(name="foo", owner=self.user)
user = self.create_user("bar@example.com")
member = self.create_member(organization=organization, user=user, role="member")
team = self.create_team(name="foo", organization=organization)
access_request = OrganizationAccessRequest.objects.create(member=member, team=team)
admin_user = self.create_user("admin@example.com")
self.create_member(organization=organization, user=admin_user, role="admin", teams=[team])
path = reverse(
"sentry-api-0-organization-access-request-details",
args=[organization.slug, access_request.id],
)
self.login_as(admin_user)
resp = self.client.put(path, data={"isApproved": 1})
assert resp.status_code == 204
def test_teamless_admin_cannot_approve_with_closed_membership(self) -> None:
self.login_as(user=self.user)
organization = self.create_organization(
name="foo", owner=self.user, flags=0 # kill allow_joinleave
)
user = self.create_user("bar@example.com")
member = self.create_member(organization=organization, user=user, role="member")
team = self.create_team(name="foo", organization=organization)
access_request = OrganizationAccessRequest.objects.create(member=member, team=team)
admin_user = self.create_user("admin@example.com")
self.create_member(organization=organization, user=admin_user, role="admin", teams=[])
path = reverse(
"sentry-api-0-organization-access-request-details",
args=[organization.slug, access_request.id],
)
self.login_as(admin_user)
resp = self.client.put(path, data={"isApproved": 1})
assert resp.status_code == 403
| UpdateOrganizationAccessRequestTest |
python | doocs__leetcode | solution/2600-2699/2639.Find the Width of Columns of a Grid/Solution.py | {
"start": 0,
"end": 152
} | class ____:
def findColumnWidth(self, grid: List[List[int]]) -> List[int]:
return [max(len(str(x)) for x in col) for col in zip(*grid)]
| Solution |
python | ipython__ipython | docs/source/conf.py | {
"start": 5550,
"end": 6435
} | class ____(logging.Filter):
"""
This is a filter to remove in sphinx 3+ the error about config traits being duplicated.
As we autogenerate configuration traits from, subclasses have lots of
duplication and we want to silence them. Indeed we build on travis with
warnings-as-error set to True, so those duplicate items make the build fail.
"""
def filter(self, record):
if (
record.args
and record.args[0] == "configtrait"
and "duplicate" in record.msg
):
return False
return True
ct_filter = ConfigtraitFilter()
logger = sphinx.util.logging.getLogger("sphinx.domains.std").logger
logger.addFilter(ct_filter)
def setup(app):
app.add_css_file("theme_overrides.css")
# Cleanup
# -------
# delete release info to avoid pickling errors from sphinx
del iprelease
| ConfigtraitFilter |
python | pytorch__pytorch | torch/distributed/tensor/_ops/_view_ops.py | {
"start": 1888,
"end": 2428
} | class ____(DimSpec):
"""Output dimension is the input dimension repeated n-times."""
input_dim: DimSpec
times: int
@classmethod
def new(cls, dim: DimSpec, times: int) -> DimSpec:
if times == 1:
return dim
elif isinstance(dim, Singleton):
# repeating a singleton is the same as broadcasting it
return Broadcast(dim, times)
else:
return Repeat(dim, times)
def inputs(self) -> Iterable[DimSpec]:
return (self.input_dim,)
@dataclass
| Repeat |
python | mlflow__mlflow | mlflow/server/auth/sqlalchemy_store.py | {
"start": 985,
"end": 14932
} | class ____:
def init_db(self, db_uri):
self.db_uri = db_uri
self.db_type = extract_db_type_from_uri(db_uri)
self.engine = create_sqlalchemy_engine_with_retry(db_uri)
dbutils.migrate_if_needed(self.engine, "head")
SessionMaker = sessionmaker(bind=self.engine)
self.ManagedSessionMaker = _get_managed_session_maker(SessionMaker, self.db_type)
def authenticate_user(self, username: str, password: str) -> bool:
with self.ManagedSessionMaker() as session:
try:
user = self._get_user(session, username)
return check_password_hash(user.password_hash, password)
except MlflowException:
return False
def create_user(self, username: str, password: str, is_admin: bool = False) -> User:
_validate_username(username)
_validate_password(password)
pwhash = generate_password_hash(password)
with self.ManagedSessionMaker() as session:
try:
user = SqlUser(username=username, password_hash=pwhash, is_admin=is_admin)
session.add(user)
session.flush()
return user.to_mlflow_entity()
except IntegrityError as e:
raise MlflowException(
f"User (username={username}) already exists. Error: {e}",
RESOURCE_ALREADY_EXISTS,
) from e
@staticmethod
def _get_user(session, username: str) -> SqlUser:
try:
return session.query(SqlUser).filter(SqlUser.username == username).one()
except NoResultFound:
raise MlflowException(
f"User with username={username} not found",
RESOURCE_DOES_NOT_EXIST,
)
except MultipleResultsFound:
raise MlflowException(
f"Found multiple users with username={username}",
INVALID_STATE,
)
def has_user(self, username: str) -> bool:
with self.ManagedSessionMaker() as session:
return session.query(SqlUser).filter(SqlUser.username == username).first() is not None
def get_user(self, username: str) -> User:
with self.ManagedSessionMaker() as session:
return self._get_user(session, username).to_mlflow_entity()
def list_users(self) -> list[User]:
with self.ManagedSessionMaker() as session:
users = session.query(SqlUser).all()
return [u.to_mlflow_entity() for u in users]
def update_user(
self, username: str, password: str | None = None, is_admin: bool | None = None
) -> User:
with self.ManagedSessionMaker() as session:
user = self._get_user(session, username)
if password is not None:
pwhash = generate_password_hash(password)
user.password_hash = pwhash
if is_admin is not None:
user.is_admin = is_admin
return user.to_mlflow_entity()
def delete_user(self, username: str):
with self.ManagedSessionMaker() as session:
user = self._get_user(session, username)
session.delete(user)
def create_experiment_permission(
self, experiment_id: str, username: str, permission: str
) -> ExperimentPermission:
_validate_permission(permission)
with self.ManagedSessionMaker() as session:
try:
user = self._get_user(session, username=username)
perm = SqlExperimentPermission(
experiment_id=experiment_id, user_id=user.id, permission=permission
)
session.add(perm)
session.flush()
return perm.to_mlflow_entity()
except IntegrityError as e:
raise MlflowException(
f"Experiment permission (experiment_id={experiment_id}, username={username}) "
f"already exists. Error: {e}",
RESOURCE_ALREADY_EXISTS,
)
def _get_experiment_permission(
self, session, experiment_id: str, username: str
) -> SqlExperimentPermission:
try:
user = self._get_user(session, username=username)
return (
session.query(SqlExperimentPermission)
.filter(
SqlExperimentPermission.experiment_id == experiment_id,
SqlExperimentPermission.user_id == user.id,
)
.one()
)
except NoResultFound:
raise MlflowException(
f"Experiment permission with experiment_id={experiment_id} and "
f"username={username} not found",
RESOURCE_DOES_NOT_EXIST,
)
except MultipleResultsFound:
raise MlflowException(
f"Found multiple experiment permissions with experiment_id={experiment_id} "
f"and username={username}",
INVALID_STATE,
)
def get_experiment_permission(self, experiment_id: str, username: str) -> ExperimentPermission:
with self.ManagedSessionMaker() as session:
return self._get_experiment_permission(
session, experiment_id, username
).to_mlflow_entity()
def list_experiment_permissions(self, username: str) -> list[ExperimentPermission]:
with self.ManagedSessionMaker() as session:
user = self._get_user(session, username=username)
perms = (
session.query(SqlExperimentPermission)
.filter(SqlExperimentPermission.user_id == user.id)
.all()
)
return [p.to_mlflow_entity() for p in perms]
def update_experiment_permission(
self, experiment_id: str, username: str, permission: str
) -> ExperimentPermission:
_validate_permission(permission)
with self.ManagedSessionMaker() as session:
perm = self._get_experiment_permission(session, experiment_id, username)
perm.permission = permission
return perm.to_mlflow_entity()
def delete_experiment_permission(self, experiment_id: str, username: str):
with self.ManagedSessionMaker() as session:
perm = self._get_experiment_permission(session, experiment_id, username)
session.delete(perm)
def create_registered_model_permission(
self, name: str, username: str, permission: str
) -> RegisteredModelPermission:
_validate_permission(permission)
with self.ManagedSessionMaker() as session:
try:
user = self._get_user(session, username=username)
perm = SqlRegisteredModelPermission(
name=name, user_id=user.id, permission=permission
)
session.add(perm)
session.flush()
return perm.to_mlflow_entity()
except IntegrityError as e:
raise MlflowException(
f"Registered model permission (name={name}, username={username}) "
f"already exists. Error: {e}",
RESOURCE_ALREADY_EXISTS,
)
def _get_registered_model_permission(
self, session, name: str, username: str
) -> SqlRegisteredModelPermission:
try:
user = self._get_user(session, username=username)
return (
session.query(SqlRegisteredModelPermission)
.filter(
SqlRegisteredModelPermission.name == name,
SqlRegisteredModelPermission.user_id == user.id,
)
.one()
)
except NoResultFound:
raise MlflowException(
f"Registered model permission with name={name} and username={username} not found",
RESOURCE_DOES_NOT_EXIST,
)
except MultipleResultsFound:
raise MlflowException(
f"Found multiple registered model permissions with name={name} "
f"and username={username}",
INVALID_STATE,
)
def get_registered_model_permission(
self, name: str, username: str
) -> RegisteredModelPermission:
with self.ManagedSessionMaker() as session:
return self._get_registered_model_permission(session, name, username).to_mlflow_entity()
def list_registered_model_permissions(self, username: str) -> list[RegisteredModelPermission]:
with self.ManagedSessionMaker() as session:
user = self._get_user(session, username=username)
perms = (
session.query(SqlRegisteredModelPermission)
.filter(SqlRegisteredModelPermission.user_id == user.id)
.all()
)
return [p.to_mlflow_entity() for p in perms]
def update_registered_model_permission(
self, name: str, username: str, permission: str
) -> RegisteredModelPermission:
_validate_permission(permission)
with self.ManagedSessionMaker() as session:
perm = self._get_registered_model_permission(session, name, username)
perm.permission = permission
return perm.to_mlflow_entity()
def delete_registered_model_permission(self, name: str, username: str):
with self.ManagedSessionMaker() as session:
perm = self._get_registered_model_permission(session, name, username)
session.delete(perm)
def rename_registered_model_permissions(self, old_name: str, new_name: str):
with self.ManagedSessionMaker() as session:
perms = (
session.query(SqlRegisteredModelPermission)
.filter(SqlRegisteredModelPermission.name == old_name)
.all()
)
for perm in perms:
perm.name = new_name
def create_scorer_permission(
self, experiment_id: str, scorer_name: str, username: str, permission: str
) -> ScorerPermission:
_validate_permission(permission)
with self.ManagedSessionMaker() as session:
try:
user = self._get_user(session, username=username)
perm = SqlScorerPermission(
experiment_id=experiment_id,
scorer_name=scorer_name,
user_id=user.id,
permission=permission,
)
session.add(perm)
session.flush()
return perm.to_mlflow_entity()
except IntegrityError as e:
raise MlflowException(
f"Scorer permission (experiment_id={experiment_id}, scorer_name={scorer_name}, "
f"username={username}) already exists. Error: {e}",
RESOURCE_ALREADY_EXISTS,
) from e
def _get_scorer_permission(
self, session, experiment_id: str, scorer_name: str, username: str
) -> SqlScorerPermission:
try:
user = self._get_user(session, username=username)
return (
session.query(SqlScorerPermission)
.filter(
SqlScorerPermission.experiment_id == experiment_id,
SqlScorerPermission.scorer_name == scorer_name,
SqlScorerPermission.user_id == user.id,
)
.one()
)
except NoResultFound:
raise MlflowException(
f"Scorer permission with experiment_id={experiment_id}, "
f"scorer_name={scorer_name}, and username={username} not found",
RESOURCE_DOES_NOT_EXIST,
)
except MultipleResultsFound:
raise MlflowException(
f"Found multiple scorer permissions with experiment_id={experiment_id}, "
f"scorer_name={scorer_name}, and username={username}",
INVALID_STATE,
)
def get_scorer_permission(
self, experiment_id: str, scorer_name: str, username: str
) -> ScorerPermission:
with self.ManagedSessionMaker() as session:
return self._get_scorer_permission(
session, experiment_id, scorer_name, username
).to_mlflow_entity()
def list_scorer_permissions(self, username: str) -> list[ScorerPermission]:
with self.ManagedSessionMaker() as session:
user = self._get_user(session, username=username)
perms = (
session.query(SqlScorerPermission)
.filter(SqlScorerPermission.user_id == user.id)
.all()
)
return [p.to_mlflow_entity() for p in perms]
def update_scorer_permission(
self, experiment_id: str, scorer_name: str, username: str, permission: str
) -> ScorerPermission:
_validate_permission(permission)
with self.ManagedSessionMaker() as session:
perm = self._get_scorer_permission(session, experiment_id, scorer_name, username)
perm.permission = permission
return perm.to_mlflow_entity()
def delete_scorer_permission(self, experiment_id: str, scorer_name: str, username: str):
with self.ManagedSessionMaker() as session:
perm = self._get_scorer_permission(session, experiment_id, scorer_name, username)
session.delete(perm)
def delete_scorer_permissions_for_scorer(self, experiment_id: str, scorer_name: str):
with self.ManagedSessionMaker() as session:
session.query(SqlScorerPermission).filter(
SqlScorerPermission.experiment_id == experiment_id,
SqlScorerPermission.scorer_name == scorer_name,
).delete()
| SqlAlchemyStore |
python | scrapy__scrapy | tests/mockserver/http_resources.py | {
"start": 7092,
"end": 7522
} | class ____(resource.Resource):
def render(self, request):
from twisted.internet import reactor
def response():
request.write(b"chunked ")
request.write(b"content\n")
# Disable terminating chunk on finish.
request.chunked = False
close_connection(request)
reactor.callLater(0, response)
return server.NOT_DONE_YET
| BrokenChunkedResource |
python | huggingface__transformers | src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py | {
"start": 24429,
"end": 27525
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
# feature dim might need to be down-projected
if config.output_hidden_size != config.hidden_size:
self.proj = nn.Linear(config.hidden_size, config.output_hidden_size)
self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size, eps=config.layer_norm_eps)
else:
self.proj = self.proj_layer_norm = None
self.layers = nn.ModuleList(Wav2Vec2BertAdapterLayer(config) for _ in range(config.num_adapter_layers))
self.layerdrop = config.layerdrop
self.kernel_size = config.adapter_kernel_size
self.stride = config.adapter_stride
def _compute_sub_sample_lengths_from_attention_mask(self, seq_lens):
if seq_lens is None:
return seq_lens
pad = self.kernel_size // 2
seq_lens = ((seq_lens + 2 * pad - self.kernel_size) / self.stride) + 1
return seq_lens.floor()
def forward(self, hidden_states, attention_mask=None):
# down project hidden_states if necessary
if self.proj is not None and self.proj_layer_norm is not None:
hidden_states = self.proj(hidden_states)
hidden_states = self.proj_layer_norm(hidden_states)
sub_sampled_lengths = None
if attention_mask is not None:
sub_sampled_lengths = (attention_mask.size(1) - (1 - attention_mask.int()).sum(1)).to(hidden_states.device)
for layer in self.layers:
layerdrop_prob = torch.rand([])
sub_sampled_lengths = self._compute_sub_sample_lengths_from_attention_mask(sub_sampled_lengths)
if not self.training or (layerdrop_prob > self.layerdrop):
hidden_states = layer(
hidden_states, attention_mask=attention_mask, sub_sampled_lengths=sub_sampled_lengths
)
return hidden_states
# Copied from transformers.models.seamless_m4t_v2.modeling_seamless_m4t_v2._compute_new_attention_mask
def _compute_new_attention_mask(hidden_states: torch.Tensor, seq_lens: torch.Tensor):
"""
Computes an attention mask of the form `(batch, seq_len)` with an attention for each element in the batch that
stops at the corresponding element in `seq_lens`.
Args:
hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, *)`):
The sequences to mask, where `*` is any number of sequence-specific dimensions including none.
seq_lens (`torch.Tensor` of shape `(batch)`:
Each element represents the length of the sequence at the same index in `hidden_states`
Returns:
`torch.FloatTensor`: The float attention mask of shape `(batch, seq_len)`
"""
batch_size, mask_seq_len = hidden_states.shape[:2]
indices = torch.arange(mask_seq_len, device=seq_lens.device).expand(batch_size, -1)
bool_mask = indices >= seq_lens.unsqueeze(1).expand(-1, mask_seq_len)
mask = hidden_states.new_ones((batch_size, mask_seq_len))
mask = mask.masked_fill(bool_mask, 0)
return mask
| Wav2Vec2BertAdapter |
python | walkccc__LeetCode | solutions/3049. Earliest Second to Mark Indices II/3049.py | {
"start": 0,
"end": 2276
} | class ____:
def earliestSecondToMarkIndices(
self,
nums: list[int],
changeIndices: list[int],
) -> int:
# {the second: the index of nums can be zeroed at the current second}
secondToIndex = self._getSecondToIndex(nums, changeIndices)
numsSum = sum(nums)
def canMark(maxSecond: int) -> bool:
"""
Returns True if all indices of `nums` can be marked within `maxSecond`.
"""
# Use a min-heap to greedily pop out the minimum number, which yields the
# least saving.
minHeap = []
marks = 0
for second in range(maxSecond - 1, -1, -1):
if second in secondToIndex:
# The number mapped by the index is a candidate to be zeroed out.
index = secondToIndex[second]
heapq.heappush(minHeap, nums[index])
if marks == 0:
# Running out of marks, so need to pop out the minimum number.
# So, the current second will be used to mark an index.
heapq.heappop(minHeap)
marks += 1
else:
# There're enough marks.
# So, the current second will be used to zero out a number.
marks -= 1
else:
# There's no candidate to be zeroed out.
# So, the current second will be used to mark an index.
marks += 1
decrementAndMarkCost = ((numsSum - sum(minHeap)) +
(len(nums) - len(minHeap)))
zeroAndMarkCost = len(minHeap) + len(minHeap)
return decrementAndMarkCost + zeroAndMarkCost <= maxSecond
l = 0
r = len(changeIndices) + 1
ans = bisect.bisect_left(range(l, r), True, key=canMark) + l
return ans if ans <= len(changeIndices) else -1
def _getSecondToIndex(
self,
nums: list[int],
changeIndices: list[int],
) -> dict[int, int]:
# {the `index` of nums: the earliest second to zero out nums[index]}
indexToFirstSecond = {}
for zeroIndexedSecond, oneIndexedIndex in enumerate(changeIndices):
index = oneIndexedIndex - 1 # Convert to 0-indexed.
if nums[index] > 0 and index not in indexToFirstSecond:
indexToFirstSecond[index] = zeroIndexedSecond
return {second: index for index, second in indexToFirstSecond.items()}
| Solution |
python | walkccc__LeetCode | solutions/2428. Maximum Sum of an Hourglass/2428.py | {
"start": 0,
"end": 305
} | class ____:
def maxSum(self, grid: list[list[int]]) -> int:
return max(
grid[i - 1][j - 1] + grid[i - 1][j] + grid[i - 1][j + 1] + grid[i][j] +
grid[i + 1][j - 1] + grid[i + 1][j] + grid[i + 1][j + 1]
for i in range(1, len(grid) - 1) for j in range(1, len(grid[0]) - 1))
| Solution |
python | pytorch__pytorch | torch/_subclasses/fake_tensor.py | {
"start": 43160,
"end": 43232
} | class ____:
pass
@dataclass(frozen=True, slots=True)
| SingletonConstant |
python | huggingface__transformers | src/transformers/models/chinese_clip/modeling_chinese_clip.py | {
"start": 31739,
"end": 33977
} | class ____(nn.Module):
def __init__(self, config: ChineseCLIPVisionConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = ChineseCLIPVisionEmbeddings(config)
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.encoder = ChineseCLIPVisionEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
hidden_states = self.pre_layrnorm(hidden_states)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
last_hidden_state = encoder_outputs[0]
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The text model from CHINESE_CLIP without any head or projection on top.
"""
)
| ChineseCLIPVisionTransformer |
python | kamyu104__LeetCode-Solutions | Python/find-the-number-of-distinct-colors-among-the-balls.py | {
"start": 63,
"end": 635
} | class ____(object):
def queryResults(self, limit, queries):
"""
:type limit: int
:type queries: List[List[int]]
:rtype: List[int]
"""
result = [0]*len(queries)
lookup = {}
cnt = collections.Counter()
for i, (x, y) in enumerate(queries):
if x in lookup:
cnt[lookup[x]] -= 1
if not cnt[lookup[x]]:
del cnt[lookup[x]]
lookup[x] = y
cnt[lookup[x]] += 1
result[i] = len(cnt)
return result
| Solution |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py | {
"start": 23943,
"end": 25082
} | class ____(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int,
activation: str = "relu",
sigmoid_output: bool = False,
):
super().__init__()
self.num_layers = num_layers
self.activation = ACT2FN[activation]
self.proj_in = nn.Linear(input_dim, hidden_dim)
self.proj_out = nn.Linear(hidden_dim, output_dim)
self.layers = nn.ModuleList([nn.Linear(hidden_dim, hidden_dim) for _ in range(num_layers - 2)])
self.sigmoid_output = sigmoid_output
def forward(self, hidden_states):
hidden_states = self.proj_in(hidden_states)
hidden_states = self.activation(hidden_states)
for layer in self.layers:
hidden_states = self.activation(layer(hidden_states))
hidden_states = self.proj_out(hidden_states)
if self.sigmoid_output:
hidden_states = F.sigmoid(hidden_states)
return hidden_states
@dataclass
@auto_docstring(custom_intro="Base class for the Sam3TrackerVideo model's output.")
| Sam3TrackerVideoFeedForward |
python | langchain-ai__langchain | libs/partners/fireworks/langchain_fireworks/chat_models.py | {
"start": 9976,
"end": 41777
} | class ____(BaseChatModel):
"""`Fireworks` Chat large language models API.
To use, you should have the
environment variable `FIREWORKS_API_KEY` set with your API key.
Any parameters that are valid to be passed to the fireworks.create call
can be passed in, even if not explicitly saved on this class.
Example:
```python
from langchain_fireworks.chat_models import ChatFireworks
fireworks = ChatFireworks(
model_name="accounts/fireworks/models/llama-v3p1-8b-instruct"
)
```
"""
@property
def lc_secrets(self) -> dict[str, str]:
return {"fireworks_api_key": "FIREWORKS_API_KEY"}
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "chat_models", "fireworks"]`
"""
return ["langchain", "chat_models", "fireworks"]
@property
def lc_attributes(self) -> dict[str, Any]:
attributes: dict[str, Any] = {}
if self.fireworks_api_base:
attributes["fireworks_api_base"] = self.fireworks_api_base
return attributes
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by LangChain."""
return True
client: Any = Field(default=None, exclude=True)
async_client: Any = Field(default=None, exclude=True)
model_name: str = Field(alias="model")
"""Model name to use."""
temperature: float | None = None
"""What sampling temperature to use."""
stop: str | list[str] | None = Field(default=None, alias="stop_sequences")
"""Default stop sequences."""
model_kwargs: dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
fireworks_api_key: SecretStr = Field(
alias="api_key",
default_factory=secret_from_env(
"FIREWORKS_API_KEY",
error_message=(
"You must specify an api key. "
"You can pass it an argument as `api_key=...` or "
"set the environment variable `FIREWORKS_API_KEY`."
),
),
)
"""Fireworks API key.
Automatically read from env variable `FIREWORKS_API_KEY` if not provided.
"""
fireworks_api_base: str | None = Field(
alias="base_url", default_factory=from_env("FIREWORKS_API_BASE", default=None)
)
"""Base URL path for API requests, leave blank if not using a proxy or service
emulator.
"""
request_timeout: float | tuple[float, float] | Any | None = Field(
default=None, alias="timeout"
)
"""Timeout for requests to Fireworks completion API. Can be `float`,
`httpx.Timeout` or `None`.
"""
streaming: bool = False
"""Whether to stream the results or not."""
n: int = 1
"""Number of chat completions to generate for each prompt."""
max_tokens: int | None = None
"""Maximum number of tokens to generate."""
max_retries: int | None = None
"""Maximum number of retries to make when generating."""
model_config = ConfigDict(
populate_by_name=True,
)
@model_validator(mode="before")
@classmethod
def build_extra(cls, values: dict[str, Any]) -> Any:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
return _build_model_kwargs(values, all_required_field_names)
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate that api key and python package exists in environment."""
if self.n < 1:
msg = "n must be at least 1."
raise ValueError(msg)
if self.n > 1 and self.streaming:
msg = "n must be 1 when streaming."
raise ValueError(msg)
client_params = {
"api_key": (
self.fireworks_api_key.get_secret_value()
if self.fireworks_api_key
else None
),
"base_url": self.fireworks_api_base,
"timeout": self.request_timeout,
}
if not self.client:
self.client = Fireworks(**client_params).chat.completions
if not self.async_client:
self.async_client = AsyncFireworks(**client_params).chat.completions
if self.max_retries:
self.client._max_retries = self.max_retries
self.async_client._max_retries = self.max_retries
return self
@model_validator(mode="after")
def _set_model_profile(self) -> Self:
"""Set model profile if not overridden."""
if self.profile is None:
self.profile = _get_default_model_profile(self.model_name)
return self
@property
def _default_params(self) -> dict[str, Any]:
"""Get the default parameters for calling Fireworks API."""
params = {
"model": self.model_name,
"stream": self.streaming,
"n": self.n,
"stop": self.stop,
**self.model_kwargs,
}
if self.temperature is not None:
params["temperature"] = self.temperature
if self.max_tokens is not None:
params["max_tokens"] = self.max_tokens
return params
def _get_ls_params(
self, stop: list[str] | None = None, **kwargs: Any
) -> LangSmithParams:
"""Get standard params for tracing."""
params = self._get_invocation_params(stop=stop, **kwargs)
ls_params = LangSmithParams(
ls_provider="fireworks",
ls_model_name=params.get("model", self.model_name),
ls_model_type="chat",
ls_temperature=params.get("temperature", self.temperature),
)
if ls_max_tokens := params.get("max_tokens", self.max_tokens):
ls_params["ls_max_tokens"] = ls_max_tokens
if ls_stop := stop or params.get("stop", None):
ls_params["ls_stop"] = ls_stop
return ls_params
def _combine_llm_outputs(self, llm_outputs: list[dict | None]) -> dict:
overall_token_usage: dict = {}
system_fingerprint = None
for output in llm_outputs:
if output is None:
# Happens in streaming
continue
token_usage = output["token_usage"]
if token_usage is not None:
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
if system_fingerprint is None:
system_fingerprint = output.get("system_fingerprint")
combined = {"token_usage": overall_token_usage, "model_name": self.model_name}
if system_fingerprint:
combined["system_fingerprint"] = system_fingerprint
return combined
def _stream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class: type[BaseMessageChunk] = AIMessageChunk
for chunk in self.client.create(messages=message_dicts, **params):
if not isinstance(chunk, dict):
chunk = chunk.model_dump()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
message_chunk = _convert_chunk_to_message_chunk(chunk, default_chunk_class)
generation_info = {}
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
generation_info["model_name"] = self.model_name
logprobs = choice.get("logprobs")
if logprobs:
generation_info["logprobs"] = logprobs
default_chunk_class = message_chunk.__class__
generation_chunk = ChatGenerationChunk(
message=message_chunk, generation_info=generation_info or None
)
if run_manager:
run_manager.on_llm_new_token(
generation_chunk.text, chunk=generation_chunk, logprobs=logprobs
)
yield generation_chunk
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
stream: bool | None = None, # noqa: FBT001
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return generate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {
**params,
**({"stream": stream} if stream is not None else {}),
**kwargs,
}
response = self.client.create(messages=message_dicts, **params)
return self._create_chat_result(response)
def _create_message_dicts(
self, messages: list[BaseMessage], stop: list[str] | None
) -> tuple[list[dict[str, Any]], dict[str, Any]]:
params = self._default_params
if stop is not None:
params["stop"] = stop
message_dicts = [_convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: dict | BaseModel) -> ChatResult:
generations = []
if not isinstance(response, dict):
response = response.model_dump()
token_usage = response.get("usage", {})
for res in response["choices"]:
message = _convert_dict_to_message(res["message"])
if token_usage and isinstance(message, AIMessage):
message.usage_metadata = {
"input_tokens": token_usage.get("prompt_tokens", 0),
"output_tokens": token_usage.get("completion_tokens", 0),
"total_tokens": token_usage.get("total_tokens", 0),
}
message.response_metadata["model_provider"] = "fireworks"
message.response_metadata["model_name"] = self.model_name
generation_info = {"finish_reason": res.get("finish_reason")}
if "logprobs" in res:
generation_info["logprobs"] = res["logprobs"]
gen = ChatGeneration(
message=message,
generation_info=generation_info,
)
generations.append(gen)
llm_output = {
"token_usage": token_usage,
"system_fingerprint": response.get("system_fingerprint", ""),
}
return ChatResult(generations=generations, llm_output=llm_output)
async def _astream(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, "stream": True}
default_chunk_class: type[BaseMessageChunk] = AIMessageChunk
async for chunk in self.async_client.acreate(messages=message_dicts, **params):
if not isinstance(chunk, dict):
chunk = chunk.model_dump()
if len(chunk["choices"]) == 0:
continue
choice = chunk["choices"][0]
message_chunk = _convert_chunk_to_message_chunk(chunk, default_chunk_class)
generation_info = {}
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
generation_info["model_name"] = self.model_name
logprobs = choice.get("logprobs")
if logprobs:
generation_info["logprobs"] = logprobs
default_chunk_class = message_chunk.__class__
generation_chunk = ChatGenerationChunk(
message=message_chunk, generation_info=generation_info or None
)
if run_manager:
await run_manager.on_llm_new_token(
token=generation_chunk.text,
chunk=generation_chunk,
logprobs=logprobs,
)
yield generation_chunk
async def _agenerate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
stream: bool | None = None, # noqa: FBT001
**kwargs: Any,
) -> ChatResult:
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await agenerate_from_stream(stream_iter)
message_dicts, params = self._create_message_dicts(messages, stop)
params = {
**params,
**({"stream": stream} if stream is not None else {}),
**kwargs,
}
response = await self.async_client.acreate(messages=message_dicts, **params)
return self._create_chat_result(response)
@property
def _identifying_params(self) -> dict[str, Any]:
"""Get the identifying parameters."""
return {"model_name": self.model_name, **self._default_params}
def _get_invocation_params(
self, stop: list[str] | None = None, **kwargs: Any
) -> dict[str, Any]:
"""Get the parameters used to invoke the model."""
return {
"model": self.model_name,
**super()._get_invocation_params(stop=stop),
**self._default_params,
**kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "fireworks-chat"
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type[BaseModel] | Callable | BaseTool],
*,
tool_choice: dict | str | bool | None = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, AIMessage]:
"""Bind tool-like objects to this chat model.
Assumes model is compatible with Fireworks tool-calling API.
Args:
tools: A list of tool definitions to bind to this chat model.
Supports any tool definition handled by
`langchain_core.utils.function_calling.convert_to_openai_tool`.
tool_choice: Which tool to require the model to call.
Must be the name of the single provided function,
`'auto'` to automatically determine which function to call
with the option to not call any function, `'any'` to enforce that some
function is called, or a dict of the form:
`{"type": "function", "function": {"name": <<tool_name>>}}`.
**kwargs: Any additional parameters to pass to
`langchain_fireworks.chat_models.ChatFireworks.bind`
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
if tool_choice is not None and tool_choice:
if isinstance(tool_choice, str) and (
tool_choice not in ("auto", "any", "none")
):
tool_choice = {"type": "function", "function": {"name": tool_choice}}
if isinstance(tool_choice, bool):
if len(tools) > 1:
msg = (
"tool_choice can only be True when there is one tool. Received "
f"{len(tools)} tools."
)
raise ValueError(msg)
tool_name = formatted_tools[0]["function"]["name"]
tool_choice = {
"type": "function",
"function": {"name": tool_name},
}
kwargs["tool_choice"] = tool_choice
return super().bind(tools=formatted_tools, **kwargs)
def with_structured_output(
self,
schema: dict | type[BaseModel] | None = None,
*,
method: Literal[
"function_calling", "json_mode", "json_schema"
] = "function_calling",
include_raw: bool = False,
**kwargs: Any,
) -> Runnable[LanguageModelInput, dict | BaseModel]:
"""Model wrapper that returns outputs formatted to match the given schema.
Args:
schema: The output schema. Can be passed in as:
- An OpenAI function/tool schema,
- A JSON Schema,
- A `TypedDict` class,
- Or a Pydantic class.
If `schema` is a Pydantic class then the model output will be a
Pydantic instance of that class, and the model-generated fields will be
validated by the Pydantic class. Otherwise the model output will be a
dict and will not be validated.
See `langchain_core.utils.function_calling.convert_to_openai_tool` for
more on how to properly specify types and descriptions of schema fields
when specifying a Pydantic or `TypedDict` class.
method: The method for steering model generation, one of:
- `'function_calling'`:
Uses Fireworks's [tool-calling features](https://docs.fireworks.ai/guides/function-calling).
- `'json_schema'`:
Uses Fireworks's [structured output feature](https://docs.fireworks.ai/structured-responses/structured-response-formatting).
- `'json_mode'`:
Uses Fireworks's [JSON mode feature](https://docs.fireworks.ai/structured-responses/structured-response-formatting).
!!! warning "Behavior changed in `langchain-fireworks` 0.2.8"
Added support for `'json_schema'`.
include_raw:
If `False` then only the parsed structured output is returned.
If an error occurs during model output parsing it will be raised.
If `True` then both the raw model response (a `BaseMessage`) and the
parsed model response will be returned.
If an error occurs during output parsing it will be caught and returned
as well.
The final output is always a `dict` with keys `'raw'`, `'parsed'`, and
`'parsing_error'`.
kwargs:
Any additional parameters to pass to the `langchain.runnable.Runnable`
constructor.
Returns:
A `Runnable` that takes same inputs as a
`langchain_core.language_models.chat.BaseChatModel`. If `include_raw` is
`False` and `schema` is a Pydantic class, `Runnable` outputs an instance
of `schema` (i.e., a Pydantic object). Otherwise, if `include_raw` is
`False` then `Runnable` outputs a `dict`.
If `include_raw` is `True`, then `Runnable` outputs a `dict` with keys:
- `'raw'`: `BaseMessage`
- `'parsed'`: `None` if there was a parsing error, otherwise the type
depends on the `schema` as described above.
- `'parsing_error'`: `BaseException | None`
Example: schema=Pydantic class, method="function_calling", include_raw=False:
```python
from typing import Optional
from langchain_fireworks import ChatFireworks
from pydantic import BaseModel, Field
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
# If we provide default values and/or descriptions for fields, these will be passed
# to the model. This is an important part of improving a model's ability to
# correctly return structured outputs.
justification: str | None = Field(
default=None, description="A justification for the answer."
)
model = ChatFireworks(
model="accounts/fireworks/models/firefunction-v1",
temperature=0,
)
structured_model = model.with_structured_output(AnswerWithJustification)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> AnswerWithJustification(
# answer='They weigh the same',
# justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'
# )
```
Example: schema=Pydantic class, method="function_calling", include_raw=True:
```python
from langchain_fireworks import ChatFireworks
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: str
model = ChatFireworks(
model="accounts/fireworks/models/firefunction-v1",
temperature=0,
)
structured_model = model.with_structured_output(
AnswerWithJustification, include_raw=True
)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}),
# 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'),
# 'parsing_error': None
# }
```
Example: schema=TypedDict class, method="function_calling", include_raw=False:
```python
from typing_extensions import Annotated, TypedDict
from langchain_fireworks import ChatFireworks
class AnswerWithJustification(TypedDict):
'''An answer to the user question along with justification for the answer.'''
answer: str
justification: Annotated[
str | None, None, "A justification for the answer."
]
model = ChatFireworks(
model="accounts/fireworks/models/firefunction-v1",
temperature=0,
)
structured_model = model.with_structured_output(AnswerWithJustification)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
```
Example: schema=OpenAI function schema, method="function_calling", include_raw=False:
```python
from langchain_fireworks import ChatFireworks
oai_schema = {
"name": "AnswerWithJustification",
"description": "An answer to the user question along with justification for the answer.",
"parameters": {
"type": "object",
"properties": {
"answer": {"type": "string"},
"justification": {
"description": "A justification for the answer.",
"type": "string",
},
},
"required": ["answer"],
},
}
model = ChatFireworks(
model="accounts/fireworks/models/firefunction-v1",
temperature=0,
)
structured_model = model.with_structured_output(oai_schema)
structured_model.invoke(
"What weighs more a pound of bricks or a pound of feathers"
)
# -> {
# 'answer': 'They weigh the same',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.'
# }
```
Example: schema=Pydantic class, method="json_mode", include_raw=True:
```python
from langchain_fireworks import ChatFireworks
from pydantic import BaseModel
class AnswerWithJustification(BaseModel):
answer: str
justification: str
model = ChatFireworks(
model="accounts/fireworks/models/firefunction-v1", temperature=0
)
structured_model = model.with_structured_output(
AnswerWithJustification, method="json_mode", include_raw=True
)
structured_model.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'. "
"What's heavier a pound of bricks or a pound of feathers?"
)
# -> {
# 'raw': AIMessage(content='{"answer": "They are both the same weight.", "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight."}'),
# 'parsed': AnswerWithJustification(answer='They are both the same weight.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'),
# 'parsing_error': None
# }
```
Example: schema=None, method="json_mode", include_raw=True:
```python
structured_model = model.with_structured_output(
method="json_mode", include_raw=True
)
structured_model.invoke(
"Answer the following question. "
"Make sure to return a JSON blob with keys 'answer' and 'justification'. "
"What's heavier a pound of bricks or a pound of feathers?"
)
# -> {
# 'raw': AIMessage(content='{"answer": "They are both the same weight.", "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight."}'),
# 'parsed': {
# 'answer': 'They are both the same weight.',
# 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'
# },
# 'parsing_error': None
# }
```
""" # noqa: E501
_ = kwargs.pop("strict", None)
if kwargs:
msg = f"Received unsupported arguments {kwargs}"
raise ValueError(msg)
is_pydantic_schema = _is_pydantic_class(schema)
if method == "function_calling":
if schema is None:
msg = (
"schema must be specified when method is 'function_calling'. "
"Received None."
)
raise ValueError(msg)
formatted_tool = convert_to_openai_tool(schema)
tool_name = formatted_tool["function"]["name"]
llm = self.bind_tools(
[schema],
tool_choice=tool_name,
ls_structured_output_format={
"kwargs": {"method": "function_calling"},
"schema": formatted_tool,
},
)
if is_pydantic_schema:
output_parser: OutputParserLike = PydanticToolsParser(
tools=[schema], # type: ignore[list-item]
first_tool_only=True, # type: ignore[list-item]
)
else:
output_parser = JsonOutputKeyToolsParser(
key_name=tool_name, first_tool_only=True
)
elif method == "json_schema":
if schema is None:
msg = (
"schema must be specified when method is 'json_schema'. "
"Received None."
)
raise ValueError(msg)
formatted_schema = convert_to_json_schema(schema)
llm = self.bind(
response_format={"type": "json_object", "schema": formatted_schema},
ls_structured_output_format={
"kwargs": {"method": "json_schema"},
"schema": schema,
},
)
output_parser = (
PydanticOutputParser(pydantic_object=schema) # type: ignore[arg-type]
if is_pydantic_schema
else JsonOutputParser()
)
elif method == "json_mode":
llm = self.bind(
response_format={"type": "json_object"},
ls_structured_output_format={
"kwargs": {"method": "json_mode"},
"schema": schema,
},
)
output_parser = (
PydanticOutputParser(pydantic_object=schema) # type: ignore[type-var, arg-type]
if is_pydantic_schema
else JsonOutputParser()
)
else:
msg = (
f"Unrecognized method argument. Expected one of 'function_calling' or "
f"'json_mode'. Received: '{method}'"
)
raise ValueError(msg)
if include_raw:
parser_assign = RunnablePassthrough.assign(
parsed=itemgetter("raw") | output_parser, parsing_error=lambda _: None
)
parser_none = RunnablePassthrough.assign(parsed=lambda _: None)
parser_with_fallback = parser_assign.with_fallbacks(
[parser_none], exception_key="parsing_error"
)
return RunnableMap(raw=llm) | parser_with_fallback
return llm | output_parser
def _is_pydantic_class(obj: Any) -> bool:
return isinstance(obj, type) and is_basemodel_subclass(obj)
def _lc_tool_call_to_fireworks_tool_call(tool_call: ToolCall) -> dict:
return {
"type": "function",
"id": tool_call["id"],
"function": {
"name": tool_call["name"],
"arguments": json.dumps(tool_call["args"], ensure_ascii=False),
},
}
def _lc_invalid_tool_call_to_fireworks_tool_call(
invalid_tool_call: InvalidToolCall,
) -> dict:
return {
"type": "function",
"id": invalid_tool_call["id"],
"function": {
"name": invalid_tool_call["name"],
"arguments": invalid_tool_call["args"],
},
}
| ChatFireworks |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_resolver.py | {
"start": 14668,
"end": 25207
} | class ____(ResolverBase):
def test_resolver(self):
url = self.resolver.resolve(project=self.pip)
self.assertEqual(url, "http://pip.readthedocs.org/en/latest/")
def test_resolver_domain(self):
self.domain = fixture.get(
Domain,
domain="docs.foobar.com",
project=self.pip,
canonical=True,
https=False,
)
url = Resolver().resolve(project=self.pip)
self.assertEqual(url, "http://docs.foobar.com/en/latest/")
def test_resolver_domain_https(self):
self.domain = fixture.get(
Domain,
domain="docs.foobar.com",
project=self.pip,
https=True,
canonical=True,
)
url = Resolver().resolve(project=self.pip)
self.assertEqual(url, "https://docs.foobar.com/en/latest/")
def test_resolver_subproject(self):
url = self.resolver.resolve(project=self.subproject)
self.assertEqual(
url,
"http://pip.readthedocs.org/projects/sub/ja/latest/",
)
def test_resolver_translation(self):
url = self.resolver.resolve(project=self.translation)
self.assertEqual(url, "http://pip.readthedocs.org/ja/latest/")
def test_resolver_nested_translation_of_a_subproject(self):
"""The project is a translation, and the main translation is a subproject of a project."""
translation = fixture.get(
Project,
slug="api-es",
language="es",
users=[self.owner],
main_language_project=self.subproject,
)
url = self.resolver.resolve(project=translation)
self.assertEqual(
url,
"http://pip.readthedocs.org/projects/sub/es/latest/",
)
def test_resolver_nested_subproject_of_a_translation(self):
"""The project is a subproject, and the superproject is a translation of a project."""
project = fixture.get(
Project,
slug="all-docs",
language="en",
users=[self.owner],
main_language_project=None,
)
translation = fixture.get(
Project,
slug="docs-es",
language="es",
users=[self.owner],
main_language_project=project,
)
subproject = fixture.get(
Project,
slug="api-es",
language="es",
users=[self.owner],
main_language_project=None,
)
translation.add_subproject(subproject)
url = self.resolver.resolve(project=subproject)
self.assertEqual(
url, "http://docs-es.readthedocs.org/projects/api-es/es/latest/"
)
def test_resolver_single_version(self):
self.pip.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS
self.pip.save()
url = self.resolver.resolve(project=self.pip)
self.assertEqual(url, "http://pip.readthedocs.org/")
def test_resolver_subproject_alias(self):
relation = self.pip.subprojects.first()
relation.alias = "sub_alias"
relation.save()
url = Resolver().resolve(project=self.subproject)
self.assertEqual(
url,
"http://pip.readthedocs.org/projects/sub_alias/ja/latest/",
)
def test_resolver_private_project(self):
self.pip.privacy_level = PRIVATE
self.pip.save()
url = self.resolver.resolve(project=self.pip)
self.assertEqual(url, "http://pip.readthedocs.org/en/latest/")
def test_resolver_private_project_override(self):
self.pip.privacy_level = PRIVATE
self.pip.save()
url = self.resolver.resolve(project=self.pip)
self.assertEqual(url, "http://pip.readthedocs.org/en/latest/")
url = self.resolver.resolve(project=self.pip)
self.assertEqual(url, "http://pip.readthedocs.org/en/latest/")
def test_resolver_private_version_override(self):
latest = self.pip.versions.first()
latest.privacy_level = PRIVATE
latest.save()
url = self.resolver.resolve(project=self.pip)
self.assertEqual(url, "http://pip.readthedocs.org/en/latest/")
url = self.resolver.resolve(project=self.pip)
self.assertEqual(url, "http://pip.readthedocs.org/en/latest/")
@override_settings(
PRODUCTION_DOMAIN="readthedocs.org",
PUBLIC_DOMAIN="public.readthedocs.org",
)
def test_resolver_public_domain_overrides(self):
url = Resolver().resolve(project=self.pip)
self.assertEqual(
url,
"http://pip.public.readthedocs.org/en/latest/",
)
url = Resolver().resolve(project=self.pip)
self.assertEqual(
url,
"http://pip.public.readthedocs.org/en/latest/",
)
# Domain overrides PUBLIC_DOMAIN
self.domain = fixture.get(
Domain,
domain="docs.foobar.com",
project=self.pip,
canonical=True,
https=False,
)
# Purge the cached domain.
del self.pip.canonical_custom_domain
url = Resolver().resolve(project=self.pip)
self.assertEqual(url, "http://docs.foobar.com/en/latest/")
url = Resolver().resolve(project=self.pip)
self.assertEqual(url, "http://docs.foobar.com/en/latest/")
@override_settings(
PRODUCTION_DOMAIN="readthedocs.org",
PUBLIC_DOMAIN="readthedocs.io",
)
def test_resolver_domain_https(self):
with override_settings(PUBLIC_DOMAIN_USES_HTTPS=True):
url = Resolver().resolve(project=self.pip)
self.assertEqual(url, "https://pip.readthedocs.io/en/latest/")
url = Resolver().resolve(project=self.pip)
self.assertEqual(url, "https://pip.readthedocs.io/en/latest/")
with override_settings(PUBLIC_DOMAIN_USES_HTTPS=False):
url = Resolver().resolve(project=self.pip)
self.assertEqual(url, "http://pip.readthedocs.io/en/latest/")
@override_settings(
PUBLIC_DOMAIN="readthedocs.io",
USE_SUBDOMAIN=True,
)
def test_resolver_multiple_versions_without_translations(self):
self.pip.versioning_scheme = MULTIPLE_VERSIONS_WITHOUT_TRANSLATIONS
self.pip.save()
url = Resolver().resolve(project=self.pip)
self.assertEqual(url, "http://pip.readthedocs.io/latest/")
url = Resolver().resolve(project=self.pip, version_slug="stable")
self.assertEqual(url, "http://pip.readthedocs.io/stable/")
@override_settings(
PUBLIC_DOMAIN="readthedocs.io",
USE_SUBDOMAIN=True,
)
def test_resolver_multiple_versions_without_translations_with_subproject(self):
self.pip.versioning_scheme = MULTIPLE_VERSIONS_WITHOUT_TRANSLATIONS
self.pip.save()
url = Resolver().resolve(project=self.subproject)
self.assertEqual(url, "http://pip.readthedocs.io/projects/sub/ja/latest/")
url = Resolver().resolve(project=self.subproject, version_slug="stable")
self.assertEqual(url, "http://pip.readthedocs.io/projects/sub/ja/stable/")
@override_settings(
PUBLIC_DOMAIN="readthedocs.io",
USE_SUBDOMAIN=True,
)
def test_resolver_subproject_with_multiple_versions_without_translations(self):
self.subproject.versioning_scheme = MULTIPLE_VERSIONS_WITHOUT_TRANSLATIONS
self.pip.save()
url = Resolver().resolve(project=self.subproject)
self.assertEqual(url, "http://pip.readthedocs.io/projects/sub/latest/")
url = Resolver().resolve(project=self.subproject, version_slug="stable")
self.assertEqual(url, "http://pip.readthedocs.io/projects/sub/stable/")
def test_resolve_project_object(self):
url = self.resolver.resolve_project(self.pip)
self.assertEqual(url, "http://pip.readthedocs.org/")
url = self.resolver.resolve_project(self.pip, filename="index.html")
self.assertEqual(url, "http://pip.readthedocs.org/index.html")
def test_resolve_subproject_object(self):
url = self.resolver.resolve_project(self.subproject)
self.assertEqual(url, "http://pip.readthedocs.org/")
url = self.resolver.resolve_project(self.subproject, filename="index.html")
self.assertEqual(url, "http://pip.readthedocs.org/index.html")
def test_resolve_translation_object(self):
url = self.resolver.resolve_project(self.translation)
self.assertEqual(url, "http://pip.readthedocs.org/")
url = self.resolver.resolve_project(self.translation, filename="index.html")
self.assertEqual(url, "http://pip.readthedocs.org/index.html")
def test_resolve_version_object(self):
url = self.resolver.resolve_version(self.pip)
self.assertEqual(url, "http://pip.readthedocs.org/en/latest/")
url = self.resolver.resolve_version(self.pip, version=self.version)
self.assertEqual(url, "http://pip.readthedocs.org/en/latest/")
version = get(Version, project=self.pip, slug="v2")
url = self.resolver.resolve_version(self.pip, version=version)
self.assertEqual(url, "http://pip.readthedocs.org/en/v2/")
def test_resolve_version_from_subproject(self):
url = self.resolver.resolve_version(self.subproject)
self.assertEqual(url, "http://pip.readthedocs.org/projects/sub/ja/latest/")
version = self.subproject.versions.first()
url = self.resolver.resolve_version(self.subproject, version=version)
self.assertEqual(url, "http://pip.readthedocs.org/projects/sub/ja/latest/")
version = get(Version, project=self.subproject, slug="v2")
url = self.resolver.resolve_version(self.subproject, version=version)
self.assertEqual(url, "http://pip.readthedocs.org/projects/sub/ja/v2/")
def test_resolve_version_from_translation(self):
url = self.resolver.resolve_version(self.translation)
self.assertEqual(url, "http://pip.readthedocs.org/ja/latest/")
version = self.translation.versions.first()
url = self.resolver.resolve_version(self.translation, version=version)
self.assertEqual(url, "http://pip.readthedocs.org/ja/latest/")
version = get(Version, project=self.translation, slug="v2")
url = self.resolver.resolve_version(self.translation, version=version)
self.assertEqual(url, "http://pip.readthedocs.org/ja/v2/")
| ResolverTests |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 119644,
"end": 120359
} | class ____(Operation):
def call(self, x):
return backend.numpy.isfinite(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype="bool")
@keras_export(["keras.ops.isfinite", "keras.ops.numpy.isfinite"])
def isfinite(x):
"""Return whether a tensor is finite, element-wise.
Real values are finite when they are not NaN, not positive infinity, and
not negative infinity. Complex values are finite when both their real
and imaginary parts are finite.
Args:
x: Input tensor.
Returns:
Output boolean tensor.
"""
if any_symbolic_tensors((x,)):
return Isfinite().symbolic_call(x)
return backend.numpy.isfinite(x)
| Isfinite |
python | PrefectHQ__prefect | src/prefect/server/schemas/ui.py | {
"start": 127,
"end": 266
} | class ____(CoreTaskRun):
"""A task run with additional details for display in the UI."""
flow_run_name: Optional[str] = None
| UITaskRun |
python | pypa__warehouse | warehouse/packaging/services.py | {
"start": 13182,
"end": 13780
} | class ____(GenericGCSBlobStorage):
@classmethod
@google.api_core.retry.Retry(
predicate=google.api_core.retry.if_exception_type(
google.api_core.exceptions.ServiceUnavailable
)
)
def create_service(cls, context, request):
storage_client = request.find_service(name="gcloud.gcs")
bucket_name = request.registry.settings["files.bucket"]
bucket = storage_client.get_bucket(bucket_name)
prefix = request.registry.settings.get("files.prefix")
return cls(bucket, prefix=prefix)
@implementer(ISimpleStorage)
| GCSFileStorage |
python | neetcode-gh__leetcode | python/0706-design-hashmap.py | {
"start": 0,
"end": 139
} | class ____:
def __init__(self, key=-1, val=-1, next=None):
self.key = key
self.val = val
self.next = next
| ListNode |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/missingTypeArg1.py | {
"start": 262,
"end": 664
} | class ____(Class1):
pass
# This should generate an error when reportMissingTypeArgument is enabled.
_T2 = TypeVar("_T2", bound=Class1)
# This should generate an error when reportMissingTypeArgument is enabled.
var1: Class1 | None = None
GenericTypeAlias = Class1[_T1] | int
# This should generate an error when reportMissingTypeArgument is enabled.
var2: GenericTypeAlias | None = None
| Class2 |
python | django-extensions__django-extensions | django_extensions/management/commands/syncdata.py | {
"start": 817,
"end": 11067
} | class ____(BaseCommand):
"""syncdata command"""
help = "Makes the current database have the same data as the fixture(s), no more, no less." # noqa: E501
args = "fixture [fixture ...]"
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--skip-remove",
action="store_false",
dest="remove",
default=True,
help="Avoid remove any object from db",
)
parser.add_argument(
"--remove-before",
action="store_true",
dest="remove_before",
default=False,
help="Remove existing objects before inserting and updating new ones",
)
parser.add_argument(
"--database",
default=DEFAULT_DB_ALIAS,
help=(
"Nominates a specific database to load fixtures into. "
'Defaults to the "default" database.'
),
)
parser.add_argument(
"fixture_labels",
nargs="?",
type=str,
help="Specify the fixture label (comma separated)",
)
def remove_objects_not_in(self, objects_to_keep, verbosity):
"""
Delete all the objects in the database that are not in objects_to_keep.
- objects_to_keep: A map where the keys are classes, and the values are a
set of the objects of that class we should keep.
"""
for class_ in objects_to_keep.keys():
current = class_.objects.all()
current_ids = set(x.pk for x in current)
keep_ids = set(x.pk for x in objects_to_keep[class_])
remove_these_ones = current_ids.difference(keep_ids)
if remove_these_ones:
for obj in current:
if obj.pk in remove_these_ones:
obj.delete()
if verbosity >= 2:
print("Deleted object: %s" % str(obj))
if verbosity > 0 and remove_these_ones:
num_deleted = len(remove_these_ones)
if num_deleted > 1:
type_deleted = str(class_._meta.verbose_name_plural)
else:
type_deleted = str(class_._meta.verbose_name)
print("Deleted %s %s" % (str(num_deleted), type_deleted))
@signalcommand
def handle(self, *args, **options):
self.style = no_style()
self.using = options["database"]
fixture_labels = (
options["fixture_labels"].split(",") if options["fixture_labels"] else ()
)
try:
with transaction.atomic():
self.syncdata(fixture_labels, options)
except SyncDataError as exc:
raise CommandError(exc)
finally:
# Close the DB connection -- unless we're still in a transaction. This
# is required as a workaround for an edge case in MySQL: if the same
# connection is used to create tables, load data, and query, the query
# can return incorrect results. See Django #7572, MySQL #37735.
if transaction.get_autocommit(self.using):
connections[self.using].close()
def syncdata(self, fixture_labels, options):
verbosity = options["verbosity"]
show_traceback = options["traceback"]
# Keep a count of the installed objects and fixtures
fixture_count = 0
object_count = 0
objects_per_fixture = []
models = set()
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database (if
# it isn't already initialized).
cursor = connections[self.using].cursor()
app_modules = [app.module for app in apps.get_app_configs()]
app_fixtures = [
os.path.join(os.path.dirname(app.__file__), "fixtures")
for app in app_modules
]
for fixture_label in fixture_labels:
parts = fixture_label.split(".")
if len(parts) == 1:
fixture_name = fixture_label
formats = serializers.get_public_serializer_formats()
else:
fixture_name, format_ = ".".join(parts[:-1]), parts[-1]
if format_ in serializers.get_public_serializer_formats():
formats = [format_]
else:
formats = []
if formats:
if verbosity > 1:
print("Loading '%s' fixtures..." % fixture_name)
else:
raise SyncDataError(
(
"Problem installing fixture '%s': %s is not a known "
"serialization format."
)
% (fixture_name, format_)
)
if os.path.isabs(fixture_name):
fixture_dirs = [fixture_name]
else:
fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + [""]
for fixture_dir in fixture_dirs:
if verbosity > 1:
print("Checking %s for fixtures..." % humanize(fixture_dir))
label_found = False
for format_ in formats:
if verbosity > 1:
print(
"Trying %s for %s fixture '%s'..."
% (humanize(fixture_dir), format_, fixture_name)
)
try:
full_path = os.path.join(
fixture_dir, ".".join([fixture_name, format_])
)
fixture = open(full_path, "r")
if label_found:
fixture.close()
raise SyncDataError(
"Multiple fixtures named '%s' in %s. Aborting."
% (fixture_name, humanize(fixture_dir))
)
else:
fixture_count += 1
objects_per_fixture.append(0)
if verbosity > 0:
print(
"Installing %s fixture '%s' from %s."
% (format_, fixture_name, humanize(fixture_dir))
)
try:
objects_to_keep = {}
objects = list(
serializers.deserialize(format_, fixture)
)
for obj in objects:
class_ = obj.object.__class__
if class_ not in objects_to_keep:
objects_to_keep[class_] = set()
objects_to_keep[class_].add(obj.object)
if options["remove"] and options["remove_before"]:
self.remove_objects_not_in(
objects_to_keep, verbosity
)
for obj in objects:
object_count += 1
objects_per_fixture[-1] += 1
models.add(obj.object.__class__)
obj.save()
if options["remove"] and not options["remove_before"]:
self.remove_objects_not_in(
objects_to_keep, verbosity
)
label_found = True
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
import traceback
fixture.close()
if show_traceback:
traceback.print_exc()
raise SyncDataError(
"Problem installing fixture '%s': %s\n"
% (full_path, traceback.format_exc())
)
fixture.close()
except SyncDataError as e:
raise e
except Exception:
if verbosity > 1:
print(
"No %s fixture '%s' in %s."
% (format_, fixture_name, humanize(fixture_dir))
)
# If any of the fixtures we loaded contain 0 objects, assume that an
# error was encountered during fixture loading.
if 0 in objects_per_fixture:
raise SyncDataError(
"No fixture data found for '%s'. (File format may be invalid.)"
% fixture_name
)
# If we found even one object in a fixture, we need to reset the
# database sequences.
if object_count > 0:
sequence_sql = connections[self.using].ops.sequence_reset_sql(
self.style, models
)
if sequence_sql:
if verbosity > 1:
print("Resetting sequences")
for line in sequence_sql:
cursor.execute(line)
if object_count == 0:
if verbosity > 1:
print("No fixtures found.")
else:
if verbosity > 0:
print(
"Installed %d object%s from %d fixture%s"
% (
object_count,
pluralize(object_count),
fixture_count,
pluralize(fixture_count),
)
)
| Command |
python | plotly__plotly.py | plotly/graph_objs/scattergeo/unselected/_marker.py | {
"start": 233,
"end": 4065
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattergeo.unselected"
_path_str = "scattergeo.unselected.marker"
_valid_props = {"color", "opacity", "size"}
@property
def color(self):
"""
Sets the marker color of unselected points, applied only when a
selection exists.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def opacity(self):
"""
Sets the marker opacity of unselected points, applied only when
a selection exists.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def size(self):
"""
Sets the marker size of unselected points, applied only when a
selection exists.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
size
Sets the marker size of unselected points, applied only
when a selection exists.
"""
def __init__(self, arg=None, color=None, opacity=None, size=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergeo.unselected.Marker`
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
size
Sets the marker size of unselected points, applied only
when a selection exists.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattergeo.unselected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergeo.unselected.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("opacity", arg, opacity)
self._set_property("size", arg, size)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | numpy__numpy | numpy/_core/tests/test_numeric.py | {
"start": 152142,
"end": 153678
} | class ____:
def test_simple(self):
[x, y] = np.indices((4, 3))
assert_array_equal(x, np.array([[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]))
assert_array_equal(y, np.array([[0, 1, 2],
[0, 1, 2],
[0, 1, 2],
[0, 1, 2]]))
def test_single_input(self):
[x] = np.indices((4,))
assert_array_equal(x, np.array([0, 1, 2, 3]))
[x] = np.indices((4,), sparse=True)
assert_array_equal(x, np.array([0, 1, 2, 3]))
def test_scalar_input(self):
assert_array_equal([], np.indices(()))
assert_array_equal([], np.indices((), sparse=True))
assert_array_equal([[]], np.indices((0,)))
assert_array_equal([[]], np.indices((0,), sparse=True))
def test_sparse(self):
[x, y] = np.indices((4, 3), sparse=True)
assert_array_equal(x, np.array([[0], [1], [2], [3]]))
assert_array_equal(y, np.array([[0, 1, 2]]))
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("dims", [(), (0,), (4, 3)])
def test_return_type(self, dtype, dims):
inds = np.indices(dims, dtype=dtype)
assert_(inds.dtype == dtype)
for arr in np.indices(dims, dtype=dtype, sparse=True):
assert_(arr.dtype == dtype)
| TestIndices |
python | conda__conda | tests/plugins/test_post_solves.py | {
"start": 504,
"end": 2175
} | class ____:
def post_solve_action(self) -> None:
pass
@plugins.hookimpl
def conda_post_solves(self):
yield plugins.CondaPostSolve(
name="custom-post-solve",
action=self.post_solve_action,
)
@pytest.fixture
def post_solve_plugin(
mocker: MockerFixture,
plugin_manager_with_reporter_backends: CondaPluginManager,
) -> PostSolvePlugin:
mocker.patch.object(PostSolvePlugin, "post_solve_action")
post_solve_plugin = PostSolvePlugin()
plugin_manager_with_reporter_backends.register(post_solve_plugin)
# register solvers
plugin_manager_with_reporter_backends.load_plugins(solvers)
return post_solve_plugin
def test_post_solve_invoked(
post_solve_plugin: PostSolvePlugin,
tmp_env: TmpEnvFixture,
path_factory: PathFactoryFixture,
):
with pytest.raises(DryRunExit):
with tmp_env("zlib", "--solver=classic", "--dry-run"):
pass
assert post_solve_plugin.post_solve_action.mock_calls
def test_post_solve_not_invoked(
post_solve_plugin: PostSolvePlugin,
conda_cli: CondaCLIFixture,
):
conda_cli("config")
assert not post_solve_plugin.post_solve_action.mock_calls
def test_post_solve_action_raises_exception(
post_solve_plugin: PostSolvePlugin,
tmp_env: TmpEnvFixture,
path_factory: PathFactoryFixture,
):
exc_message = "💥"
post_solve_plugin.post_solve_action.side_effect = [Exception(exc_message)]
with pytest.raises(Exception, match=exc_message):
with tmp_env("zlib", "--solver=classic", "--dry-run"):
pass
assert post_solve_plugin.post_solve_action.mock_calls
| PostSolvePlugin |
python | ansible__ansible | test/units/playbook/test_base.py | {
"start": 9637,
"end": 12486
} | class ____(base.Base):
name = FieldAttribute(isa='string', default='', always_post_validate=True)
test_attr_bool = FieldAttribute(isa='bool', always_post_validate=True)
test_attr_int = FieldAttribute(isa='int', always_post_validate=True)
test_attr_float = FieldAttribute(isa='float', default=3.14159, always_post_validate=True)
test_attr_list = FieldAttribute(isa='list', listof=(str,), always_post_validate=True)
test_attr_mixed_list = FieldAttribute(isa='list', listof=(str, int), always_post_validate=True)
test_attr_list_no_listof = FieldAttribute(isa='list', always_post_validate=True)
test_attr_list_required = FieldAttribute(isa='list', listof=(str,), required=True,
default=list, always_post_validate=True)
test_attr_string = FieldAttribute(isa='string', default='the_test_attr_string_default_value')
test_attr_string_required = FieldAttribute(isa='string', required=True,
default='the_test_attr_string_default_value')
test_attr_percent = FieldAttribute(isa='percent', always_post_validate=True)
test_attr_dict = FieldAttribute(isa='dict', default=lambda: {'a_key': 'a_value'}, always_post_validate=True)
test_attr_class = FieldAttribute(isa='class', class_type=ExampleSubClass)
test_attr_class_post_validate = FieldAttribute(isa='class', class_type=ExampleSubClass,
always_post_validate=True)
test_attr_unknown_isa = FieldAttribute(isa='not_a_real_isa', always_post_validate=True)
test_attr_example = FieldAttribute(isa='string', default='the_default',
always_post_validate=True)
test_attr_none = FieldAttribute(isa='string', always_post_validate=True)
test_attr_preprocess = FieldAttribute(isa='string', default='the default for preprocess')
test_attr_method = FieldAttribute(isa='string', default='some attr with a getter',
always_post_validate=True)
test_attr_method_missing = FieldAttribute(isa='string', default='some attr with a missing getter',
always_post_validate=True)
def _get_attr_test_attr_method(self):
return 'foo bar'
def _validate_test_attr_example(self, attr, name, value):
if not isinstance(value, str):
raise ExampleException('test_attr_example is not a string: %s type=%s' % (value, type(value)))
def _post_validate_test_attr_example(self, attr, value, templar):
after_template_value = templar.template(value)
return after_template_value
def _post_validate_test_attr_none(self, attr, value, templar):
return None
# terrible name, but it is a TestBase subclass for testing subclasses of Base
| BaseSubClass |
python | spyder-ide__spyder | spyder/api/widgets/auxiliary_widgets.py | {
"start": 566,
"end": 1947
} | class ____(QMainWindow, SpyderMainWindowMixin):
"""MainWindow subclass that contains a SpyderDockablePlugin."""
# ---- Signals
# ------------------------------------------------------------------------
sig_closed = Signal()
"""This signal is emitted when the close event is fired."""
sig_window_state_changed = Signal(object)
"""
This signal is emitted when the window state has changed (for instance,
between maximized and minimized states).
Parameters
----------
window_state: Qt.WindowStates
The window state.
"""
def __init__(self, widget):
super().__init__()
self.widget = widget
# To distinguish these windows from the main Spyder one
self.is_window_widget = True
# Setting interface theme
self.setStyleSheet(str(APP_STYLESHEET))
def closeEvent(self, event):
"""Override Qt method to emit a custom `sig_close` signal."""
super().closeEvent(event)
self.sig_closed.emit()
def changeEvent(self, event):
"""
Override Qt method to emit a custom `sig_windowstate_changed` signal
when there's a change in the window state.
"""
if event.type() == QEvent.WindowStateChange:
self.sig_window_state_changed.emit(self.windowState())
super().changeEvent(event)
| SpyderWindowWidget |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/array_ops_test.py | {
"start": 66178,
"end": 67349
} | class ____(test_util.TensorFlowTestCase):
def testSimple(self):
a = array_ops.constant(10)
guarantee_a = array_ops.guarantee_const(a)
self.assertEqual(10, self.evaluate(guarantee_a))
def testVariables(self):
for use_resource in [False, True]:
with self.subTest(use_resource=use_resource):
a = variable_scope.get_variable(
"var_{}".format(use_resource), [],
initializer=init_ops.constant_initializer(10.0),
use_resource=use_resource)
guarantee_a = array_ops.guarantee_const(a)
self.evaluate(a.initializer)
self.assertEqual(10.0, self.evaluate(guarantee_a))
def testResourceRejection(self):
with ops.device("/cpu:0"):
a = variable_scope.get_variable(
"resource_var", [],
initializer=init_ops.constant_initializer(10.0),
use_resource=True)
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"cannot be a resource variable"):
guarantee_a = array_ops.guarantee_const(a.handle)
self.evaluate(a.initializer)
self.evaluate(guarantee_a)
| GuaranteeConstOpTest |
python | django__django | tests/sitemaps_tests/test_generic.py | {
"start": 238,
"end": 3841
} | class ____(SitemapTestsBase):
def test_generic_sitemap_attributes(self):
datetime_value = datetime.now()
queryset = TestModel.objects.all()
generic_sitemap = GenericSitemap(
info_dict={
"queryset": queryset,
"date_field": datetime_value,
},
priority=0.6,
changefreq="monthly",
protocol="https",
)
attr_values = (
("date_field", datetime_value),
("priority", 0.6),
("changefreq", "monthly"),
("protocol", "https"),
)
for attr_name, expected_value in attr_values:
with self.subTest(attr_name=attr_name):
self.assertEqual(getattr(generic_sitemap, attr_name), expected_value)
self.assertCountEqual(generic_sitemap.queryset, queryset)
def test_generic_sitemap(self):
"A minimal generic sitemap can be rendered"
response = self.client.get("/generic/sitemap.xml")
expected = ""
for pk in TestModel.objects.values_list("id", flat=True):
expected += "<url><loc>%s/testmodel/%s/</loc></url>" % (self.base_url, pk)
expected_content = (
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" '
'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n'
"%s\n"
"</urlset>"
) % expected
self.assertXMLEqual(response.text, expected_content)
def test_generic_sitemap_lastmod(self):
test_model = TestModel.objects.first()
TestModel.objects.update(lastmod=datetime(2013, 3, 13, 10, 0, 0))
response = self.client.get("/generic-lastmod/sitemap.xml")
expected_content = (
'<?xml version="1.0" encoding="UTF-8"?>\n'
'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" '
'xmlns:xhtml="http://www.w3.org/1999/xhtml">\n'
"<url><loc>%s/testmodel/%s/</loc><lastmod>2013-03-13</lastmod></url>\n"
"</urlset>"
) % (
self.base_url,
test_model.pk,
)
self.assertXMLEqual(response.text, expected_content)
self.assertEqual(
response.headers["Last-Modified"], "Wed, 13 Mar 2013 10:00:00 GMT"
)
def test_get_protocol_defined_in_constructor(self):
for protocol in ["http", "https"]:
with self.subTest(protocol=protocol):
sitemap = GenericSitemap({"queryset": None}, protocol=protocol)
self.assertEqual(sitemap.get_protocol(), protocol)
def test_get_protocol_passed_as_argument(self):
sitemap = GenericSitemap({"queryset": None})
for protocol in ["http", "https"]:
with self.subTest(protocol=protocol):
self.assertEqual(sitemap.get_protocol(protocol), protocol)
def test_get_protocol_default(self):
sitemap = GenericSitemap({"queryset": None})
self.assertEqual(sitemap.get_protocol(), "https")
def test_generic_sitemap_index(self):
TestModel.objects.update(lastmod=datetime(2013, 3, 13, 10, 0, 0))
response = self.client.get("/generic-lastmod/index.xml")
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>http://example.com/simple/sitemap-generic.xml</loc><lastmod>2013-03-13T10:00:00</lastmod></sitemap>
</sitemapindex>"""
self.assertXMLEqual(response.text, expected_content)
| GenericViewsSitemapTests |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vector_index.py | {
"start": 10452,
"end": 14183
} | class ____:
@staticmethod
def pq(
bit_compression: Optional[bool] = None,
centroids: Optional[int] = None,
encoder_distribution: Optional[PQEncoderDistribution] = None,
encoder_type: Optional[PQEncoderType] = None,
segments: Optional[int] = None,
training_limit: Optional[int] = None,
) -> _PQConfigCreate:
"""Create a `_PQConfigCreate` object to be used when defining the product quantization (PQ) configuration of Weaviate.
Use this method when defining the `quantizer` argument in the `vector_index` configuration.
Args:
See [the docs](https://weaviate.io/developers/weaviate/concepts/vector-index#hnsw-with-compression) for a more detailed view!
""" # noqa: D417 (missing argument descriptions in the docstring)
if bit_compression is not None:
_Warnings.bit_compression_in_pq_config()
return _PQConfigCreate(
centroids=centroids,
segments=segments,
trainingLimit=training_limit,
encoder=_PQEncoderConfigCreate(type_=encoder_type, distribution=encoder_distribution),
)
@staticmethod
def bq(
cache: Optional[bool] = None,
rescore_limit: Optional[int] = None,
) -> _BQConfigCreate:
"""Create a `_BQConfigCreate` object to be used when defining the binary quantization (BQ) configuration of Weaviate.
Use this method when defining the `quantizer` argument in the `vector_index` configuration. Note that the arguments have no effect for HNSW.
Args:
See [the docs](https://weaviate.io/developers/weaviate/concepts/vector-index#binary-quantization) for a more detailed view!
""" # noqa: D417 (missing argument descriptions in the docstring)
return _BQConfigCreate(
cache=cache,
rescoreLimit=rescore_limit,
)
@staticmethod
def sq(
cache: Optional[bool] = None,
rescore_limit: Optional[int] = None,
training_limit: Optional[int] = None,
) -> _SQConfigCreate:
"""Create a `_SQConfigCreate` object to be used when defining the scalar quantization (SQ) configuration of Weaviate.
Use this method when defining the `quantizer` argument in the `vector_index` configuration. Note that the arguments have no effect for HNSW.
Args:
See [the docs](https://weaviate.io/developers/weaviate/concepts/vector-index#binary-quantization) for a more detailed view!
""" # noqa: D417 (missing argument descriptions in the docstring)
return _SQConfigCreate(
cache=cache,
rescoreLimit=rescore_limit,
trainingLimit=training_limit,
)
@staticmethod
def rq(
cache: Optional[bool] = None,
bits: Optional[int] = None,
rescore_limit: Optional[int] = None,
) -> _RQConfigCreate:
"""Create a `_RQConfigCreate` object to be used when defining the Rotational quantization (RQ) configuration of Weaviate.
Use this method when defining the `quantizer` argument in the `vector_index` configuration. Note that the arguments have no effect for HNSW.
Arguments:
See [the docs](https://weaviate.io/developers/weaviate/concepts/vector-index) for a more detailed view!
""" # noqa: D417 (missing argument descriptions in the docstring)
return _RQConfigCreate(
cache=cache,
bits=bits,
rescoreLimit=rescore_limit,
)
@staticmethod
def none() -> _UncompressedConfigCreate:
"""Create a a vector index without compression."""
return _UncompressedConfigCreate()
| _VectorIndexQuantizer |
python | readthedocs__readthedocs.org | readthedocs/embed/v3/tests/test_access.py | {
"start": 878,
"end": 4242
} | class ____(TestCase):
def setUp(self):
self.user = get(User)
self.project = get(
Project,
slug="docs",
privacy_level=PUBLIC,
users=[self.user],
)
self.version = self.project.versions.get(slug=LATEST)
self.version.privacy_level = PUBLIC
self.version.save()
self.url = (
reverse("embed_api_v3") + "?url=https://docs.readthedocs.io/en/latest/"
)
self.content = """
<html>
<div role=main>
Content
</div>
</html>
"""
def get(self, *args, **kwargs):
"""Wrapper around ``client.get`` to be overridden in the proxied api tests."""
return self.client.get(*args, **kwargs)
def _mock_open(self, content):
@contextmanager
def f(*args, **kwargs):
read_mock = mock.MagicMock()
read_mock.read.return_value = content
yield read_mock
return f
def _mock_storage(self, storage_mock):
storage_mock.open.side_effect = self._mock_open(self.content)
def test_get_content_public_version_anonymous_user(self, storage_mock):
self._mock_storage(storage_mock)
self.client.logout()
resp = self.get(self.url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Content", resp.json()["content"])
self.assertNotIn(ACCESS_CONTROL_ALLOW_ORIGIN, resp.headers)
def test_get_content_private_version_anonymous_user(self, storage_mock):
self._mock_storage(storage_mock)
self.version.privacy_level = PRIVATE
self.version.save()
self.client.logout()
resp = self.get(self.url)
self.assertEqual(resp.status_code, 403)
def test_get_content_public_version_logged_in_user(self, storage_mock):
self._mock_storage(storage_mock)
self.client.force_login(self.user)
resp = self.get(self.url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Content", resp.json()["content"])
self.assertNotIn(ACCESS_CONTROL_ALLOW_ORIGIN, resp.headers)
def test_get_content_private_version_logged_in_user(self, storage_mock):
self._mock_storage(storage_mock)
self.version.privacy_level = PRIVATE
self.version.save()
self.client.force_login(self.user)
resp = self.get(self.url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Content", resp.json()["content"])
self.assertNotIn(ACCESS_CONTROL_ALLOW_ORIGIN, resp.headers)
@mock.patch.object(EmbedAPIBase, "_download_page_content")
def test_get_content_allowed_external_page(
self, download_page_content, storage_mock
):
download_page_content.return_value = self.content
resp = self.get(
reverse("embed_api_v3") + "?url=https://docs.python.org/en/latest/"
)
self.assertEqual(resp.status_code, 200)
self.assertIn("Content", resp.json()["content"])
self.assertNotIn(ACCESS_CONTROL_ALLOW_ORIGIN, resp.headers)
def test_get_content_not_allowed_external_page(self, storage_mock):
resp = self.get(reverse("embed_api_v3") + "?url=https://example.com/en/latest/")
self.assertEqual(resp.status_code, 400)
@pytest.mark.proxito
| TestEmbedAPIV3Access |
python | weaviate__weaviate-python-client | weaviate/collections/batch/base.py | {
"start": 2093,
"end": 2999
} | class ____(ABC, Generic[TBatchInput, TBatchReturn]):
"""`BatchRequest` abstract class used as a interface for batch requests."""
def __init__(self) -> None:
self._items: List[TBatchInput] = []
self._lock = threading.Lock()
def __len__(self) -> int:
return len(self._items)
def add(self, item: TBatchInput) -> None:
"""Add an item to the BatchRequest."""
self._lock.acquire()
self._items.append(item)
self._lock.release()
def prepend(self, item: List[TBatchInput]) -> None:
"""Add items to the front of the BatchRequest.
This is intended to be used when objects should be retries, eg. after a temporary error.
"""
self._lock.acquire()
self._items = item + self._items
self._lock.release()
Ref = TypeVar("Ref", bound=Union[_BatchReference, batch_pb2.BatchReference])
| BatchRequest |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_raw_message_start_event.py | {
"start": 239,
"end": 343
} | class ____(BaseModel):
message: BetaMessage
type: Literal["message_start"]
| BetaRawMessageStartEvent |
python | pypa__pip | tests/unit/test_utils_compatibility_tags.py | {
"start": 1579,
"end": 2413
} | class ____:
@pytest.mark.parametrize(
"manylinux2010,manylinux1",
[
("manylinux2010_x86_64", "manylinux1_x86_64"),
("manylinux2010_i686", "manylinux1_i686"),
],
)
def test_manylinux2010_implies_manylinux1(
self, manylinux2010: str, manylinux1: str
) -> None:
"""
Specifying manylinux2010 implies manylinux1.
"""
groups: dict[tuple[str, str], list[str]] = {}
supported = compatibility_tags.get_supported(platforms=[manylinux2010])
for tag in supported:
groups.setdefault((tag.interpreter, tag.abi), []).append(tag.platform)
for arches in groups.values():
if arches == ["any"]:
continue
assert arches[:2] == [manylinux2010, manylinux1]
| TestManylinux2010Tags |
python | wandb__wandb | tests/unit_tests/test_launch/test_inputs/test_internal.py | {
"start": 3553,
"end": 25875
} | class ____(BaseModel):
trainer: Trainer
def test_validate_schema_pydantic_lists():
class Item(BaseModel):
name: str
epochs: int = Field(ge=1)
class GenericLists(BaseModel):
# TODO: Only list of enums are supported for now
# tags: list[str] = Field(min_length=0, max_length=10)
# probs: list[float] = Field(min_length=1)
# items: list[Item] = Field(min_length=1)
# dicts: list[dict[str, str]] = Field(min_length=1)
enums: list[DatasetEnum] = Field(min_length=1)
enums_no_bounds: list[DatasetEnum] = Field()
prepared_schema = _prepare_schema(GenericLists)
props = prepared_schema["properties"]
assert props["enums"]["type"] == "array"
assert props["enums"]["items"]["type"] == "string"
assert props["enums_no_bounds"]["type"] == "array"
assert props["enums_no_bounds"]["items"]["type"] == "string"
_validate_schema(prepared_schema)
def test_validate_schema_pydantic_sets():
"""Generic Pydantic sets map to JSON Schema arrays properly."""
class Item(BaseModel):
name: str
epochs: int = Field(ge=1)
class GenericSets(BaseModel):
# TODO: Only set of enums are supported for now
# tags: set[str] = Field(min_length=0, max_length=10)
# probs: set[float] = Field(min_length=1)
# items: set[Item] = Field(min_length=1)
# dicts: set[dict[str, str]] = Field(min_length=1)
enums: set[DatasetEnum] = Field(min_length=1)
enums_no_bounds: set[DatasetEnum] = Field()
prepared_schema = _prepare_schema(GenericSets)
props = prepared_schema["properties"]
assert props["enums"]["type"] == "array"
assert props["enums"]["items"]["type"] == "string"
assert props["enums_no_bounds"]["type"] == "array"
assert props["enums_no_bounds"]["items"]["type"] == "string"
_validate_schema(prepared_schema)
@pytest.mark.parametrize(
"path, expected",
[
(r"path", ["path"]),
(r"path.with.dot", ["path", "with", "dot"]),
(r"path\.with\.esc.dot", ["path.with.esc", "dot"]),
(r"path\.with.esc\.dot", ["path.with", "esc.dot"]),
(r"path.with\.esc.dot", ["path", "with.esc", "dot"]),
],
)
def test_split_on_unesc_dot(path, expected):
"""Test _split_on_unesc_dot function."""
assert _split_on_unesc_dot(path) == expected
def test_split_on_unesc_dot_trailing_backslash():
"""Test _split_on_unesc_dot function with trailing backslash."""
with pytest.raises(LaunchError):
_split_on_unesc_dot("path\\")
def test_config_tmp_dir():
"""Test ConfigTmpDir class."""
config_dir = ConfigTmpDir()
assert config_dir.tmp_dir.is_dir()
assert config_dir.configs_dir.is_dir()
assert config_dir.tmp_dir != config_dir.configs_dir
def test_job_input_arguments():
"""Test JobInputArguments class."""
arguments = JobInputArguments(
include=["include"], exclude=["exclude"], file_path="path", run_config=True
)
assert arguments.include == ["include"]
assert arguments.exclude == ["exclude"]
assert arguments.file_path == "path"
assert arguments.run_config is True
def test_publish_job_input(mocker):
"""Test _publish_job_input function."""
run = mocker.MagicMock()
run._backend.interface = mocker.MagicMock()
arguments = JobInputArguments(
include=["include"], exclude=["exclude"], file_path="path", run_config=True
)
_publish_job_input(arguments, run)
run._backend.interface.publish_job_input.assert_called_once_with(
include_paths=[["include"]],
exclude_paths=[["exclude"]],
run_config=True,
input_schema=None,
file_path="path",
)
def test_replace_refs_and_allofs(test_json_schema, expected_json_schema):
defs = test_json_schema.pop("$defs")
resp = _replace_refs_and_allofs(test_json_schema, defs)
assert resp == expected_json_schema
def test_handle_config_file_input(mocker):
"""Test handle_config_file_input function."""
mocker.patch("wandb.sdk.launch.inputs.internal.override_file")
mocker.patch("wandb.sdk.launch.inputs.internal.config_path_is_valid")
mocker.patch("wandb.sdk.launch.inputs.internal.ConfigTmpDir")
mocker.patch("wandb.sdk.launch.inputs.internal.shutil.copy")
wandb_run = MagicMock()
mocker.patch("wandb.sdk.launch.inputs.internal.wandb.run", wandb_run)
handle_config_file_input("path", include=["include"], exclude=["exclude"])
wandb_run._backend.interface.publish_job_input.assert_called_once_with(
include_paths=[["include"]],
exclude_paths=[["exclude"]],
run_config=False,
input_schema=None,
file_path="path",
)
@pytest.mark.skipif(
sys.version_info < (3, 9),
reason="Pydantic versions <2.4 doesn't support json schema",
)
@pytest.mark.skipif(
platform.system().lower() == "windows",
reason="Doesn't work on Windows",
)
def test_handle_config_file_input_pydantic(
mocker,
expected_json_schema,
):
"""Test handle_config_file_input function with a Pydantic model schema."""
mocker.patch("wandb.sdk.launch.inputs.internal.override_file")
mocker.patch("wandb.sdk.launch.inputs.internal.config_path_is_valid")
mocker.patch("wandb.sdk.launch.inputs.internal.ConfigTmpDir")
mocker.patch("wandb.sdk.launch.inputs.internal.shutil.copy")
wandb_run = MagicMock()
mocker.patch("wandb.sdk.launch.inputs.internal.wandb.run", wandb_run)
handle_config_file_input(
"path", include=["include"], exclude=["exclude"], schema=ExampleSchema
)
wandb_run._backend.interface.publish_job_input.assert_called_once_with(
include_paths=[["include"]],
exclude_paths=[["exclude"]],
run_config=False,
input_schema=expected_json_schema,
file_path="path",
)
def test_handle_run_config_input(mocker):
"""Test handle_run_config_input function."""
wandb_run = mocker.MagicMock()
wandb_run._backend.interface = mocker.MagicMock()
mocker.patch("wandb.sdk.launch.inputs.internal.wandb.run", wandb_run)
handle_run_config_input(include=["include"], exclude=["exclude"])
wandb_run._backend.interface.publish_job_input.assert_called_once_with(
include_paths=[["include"]],
exclude_paths=[["exclude"]],
run_config=True,
input_schema=None,
file_path="",
)
def test_handle_config_file_input_staged(mocker, reset_staged_inputs):
"""Test that config file input is staged when run is not available."""
mocker.patch("wandb.sdk.launch.inputs.internal.wandb.run", None)
mocker.patch("wandb.sdk.launch.inputs.internal.override_file")
mocker.patch("wandb.sdk.launch.inputs.internal.config_path_is_valid")
mocker.patch("wandb.sdk.launch.inputs.internal.ConfigTmpDir")
mocker.patch("wandb.sdk.launch.inputs.internal.shutil.copy")
handle_config_file_input("path", include=["include"], exclude=["exclude"])
staged_inputs = StagedLaunchInputs()._staged_inputs
assert len(staged_inputs) == 1
config_file = staged_inputs[0]
assert config_file.include == ["include"]
assert config_file.exclude == ["exclude"]
assert config_file.file_path == "path"
assert config_file.run_config is False
def test_handle_run_config_input_staged(mocker, reset_staged_inputs):
"""Test that run config input is staged when run is not available."""
mocker.patch("wandb.sdk.launch.inputs.internal.wandb.run", None)
handle_run_config_input(include=["include"], exclude=["exclude"])
staged_inputs = StagedLaunchInputs()._staged_inputs
assert len(staged_inputs) == 1
run_config = staged_inputs[0]
assert run_config.include == ["include"]
assert run_config.exclude == ["exclude"]
assert run_config.file_path is None
assert run_config.run_config is True
@pytest.mark.parametrize(
"schema, expected",
[
# --- Passing cases ---
# Basic test
({"type": "object", "properties": {"key1": {"type": "integer"}}}, []),
# Test using all supported keys + nested schemas
(
{
"type": "object",
"properties": {
"key1": {"type": "integer", "minimum": 3, "exclusiveMaximum": 6.0},
"key2": {"type": "number", "exclusiveMinimum": 1.2, "maximum": 3},
"key3": {
"type": "object",
"properties": {
"key3": {
"type": "string",
"title": "My cool string",
"description": "It is cool",
"enum": ["value-1", "value-2"],
},
"key4": {"type": "integer", "enum": [3, 4, 5]},
"key5": {"type": "boolean"},
},
},
},
},
[],
),
# --- Secret format tests ---
# Test basic secret field
(
{
"type": "object",
"properties": {
"api_key": {
"type": "string",
"format": "secret",
"title": "API Key",
"description": "Secret API key",
}
},
},
[],
),
# Test nested object with secret field
(
{
"type": "object",
"properties": {
"config": {
"type": "object",
"properties": {
"secret_token": {
"type": "string",
"format": "secret",
"description": "Nested secret",
},
"public_key": {
"type": "string",
"description": "Public configuration",
},
},
}
},
},
[],
),
# Test multiple secret fields
(
{
"type": "object",
"properties": {
"api_key": {"type": "string", "format": "secret"},
"db_password": {"type": "string", "format": "secret"},
"regular_field": {"type": "string"},
},
},
[],
),
# --- Placeholder field tests ---
# Test basic placeholder field
(
{
"type": "object",
"properties": {
"username": {
"type": "string",
"placeholder": "Enter your username",
"title": "Username",
"description": "Your account username",
}
},
},
[],
),
# --- Label field tests ---
# Test basic label field
(
{
"type": "object",
"properties": {
"api_key": {
"type": "string",
"label": "API Key",
"placeholder": "sk-...",
"required": True,
"format": "secret",
}
},
},
[],
),
# Test nested object with label and placeholder fields
(
{
"type": "object",
"properties": {
"database": {
"type": "object",
"label": "Database Configuration",
"properties": {
"host": {
"type": "string",
"label": "Database Host",
"placeholder": "localhost",
"description": "Database host",
},
"port": {
"type": "integer",
"label": "Database Port",
"placeholder": "5432",
"minimum": 1,
},
},
}
},
},
[],
),
# --- Required field tests ---
# Test basic required field
(
{
"type": "object",
"properties": {
"api_key": {
"type": "string",
"required": True,
"title": "API Key",
"description": "Required API key",
},
"optional_field": {
"type": "string",
"required": False,
"description": "Optional field",
},
},
},
[],
),
# Test required field with different types
(
{
"type": "object",
"properties": {
"count": {
"type": "integer",
"required": True,
"minimum": 1,
},
"threshold": {
"type": "number",
"required": True,
"minimum": 0.0,
},
"active": {
"type": "boolean",
"required": False,
},
},
},
[],
),
# Test nested object with required fields
(
{
"type": "object",
"properties": {
"config": {
"type": "object",
"properties": {
"name": {
"type": "string",
"required": True,
"description": "Configuration name",
},
"version": {
"type": "string",
"required": False,
"placeholder": "1.0.0",
},
},
}
},
},
[],
),
# --- Warning cases ---
# Test using a float as a minimum for an integer
(
{
"type": "object",
"properties": {"key1": {"type": "integer", "minimum": 1.5}},
},
["1.5 is not of type 'integer'"],
),
# Test setting "minimum" on a type that doesn't support it
(
{
"type": "object",
"properties": {"key1": {"type": "string", "minimum": 1}},
},
["Unevaluated properties are not allowed ('minimum' was unexpected)"],
),
# Test using an unsupported key
(
{
"type": "object",
"properties": {"key1": {"type": "integer", "default": 5}},
},
["Unevaluated properties are not allowed ('default' was unexpected)"],
),
# --- Placeholder field tests for all types ---
# Test placeholder on boolean field
(
{
"type": "object",
"properties": {
"field1": {
"type": "boolean",
"placeholder": "Enable this feature",
}
},
},
[],
),
# Test placeholder on array field
(
{
"type": "object",
"properties": {
"field1": {
"type": "array",
"items": {
"type": "string",
"enum": ["a", "b", "c"],
},
"placeholder": "Select options...",
}
},
},
[],
),
# --- Invalid UI field tests ---
# Test placeholder with wrong type (must be string)
(
{
"type": "object",
"properties": {
"field1": {
"type": "string",
"placeholder": 123, # Should be string
}
},
},
["123 is not of type 'string'"],
),
# Test label with wrong type (must be string)
(
{
"type": "object",
"properties": {
"field1": {
"type": "string",
"label": 456, # Should be string
}
},
},
["456 is not of type 'string'"],
),
# --- Array passing cases ---
# Array: string enum multi-select with bounds and uniqueness
(
{
"type": "object",
"properties": {
"tags": {
"type": "array",
"items": {
"type": "string",
"enum": ["a", "b", "c"],
},
"uniqueItems": True,
"minItems": 1,
"maxItems": 3,
}
},
},
[],
),
# Array: integer enum multi-select
(
{
"type": "object",
"properties": {
"ids": {
"type": "array",
"items": {
"type": "integer",
"enum": [1, 2, 3],
},
"uniqueItems": True,
}
},
},
[],
),
# Array: number enum multi-select, nested inside object
(
{
"type": "object",
"properties": {
"config": {
"type": "object",
"properties": {
"lrs": {
"type": "array",
"items": {
"type": "number",
"enum": [0.001, 0.01, 0.1],
},
"minItems": 1,
}
},
}
},
},
[],
),
# Array with label, placeholder and required fields
(
{
"type": "object",
"properties": {
"tags": {
"type": "array",
"items": {
"type": "string",
"enum": ["dev", "prod", "test"],
},
"label": "Environment Tags",
"placeholder": "Select environment tags...",
"required": True,
"minItems": 1,
"uniqueItems": True,
},
"optional_list": {
"type": "array",
"items": {
"type": "integer",
"enum": [1, 2, 3, 4, 5],
},
"label": "Optional Numbers",
"placeholder": "Choose numbers (optional)",
"required": False,
},
},
},
[],
),
# --- Array warning cases ---
# Array warning: unsupported 'contains'
(
{
"type": "object",
"properties": {
"arr": {
"type": "array",
"contains": {"type": "number"},
}
},
},
["Unevaluated properties are not allowed ('contains' was unexpected)"],
),
# Array warning: unsupported 'prefixItems'
(
{
"type": "object",
"properties": {
"tuple_like": {
"type": "array",
"prefixItems": [
{"type": "string"},
{"type": "number"},
],
}
},
},
["Unevaluated properties are not allowed ('prefixItems' was unexpected)"],
),
# Array warning: minItems wrong type
(
{
"type": "object",
"properties": {
"vals": {
"type": "array",
"items": {"type": "string"},
"minItems": 1.5,
}
},
},
["1.5 is not of type 'integer'"],
),
],
)
def test_validate_schema(mocker, mock_wandb_log, schema, expected):
"""Test that valid schemas show no warnings, and invalid schemas do."""
_validate_schema(schema)
warns = "".join(mock_wandb_log._logs(mock_wandb_log._termwarn))
for e in expected:
assert e in warns
if not expected:
assert not warns
| ExampleSchema |
python | huggingface__transformers | src/transformers/models/mra/modeling_mra.py | {
"start": 20850,
"end": 25231
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
kernel_loaded = mra_cuda_kernel is not None
if is_torch_cuda_available() and is_cuda_platform() and is_ninja_available() and not kernel_loaded:
try:
load_cuda_kernels()
except Exception as e:
logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}")
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.num_block = (config.max_position_embeddings // 32) * config.block_per_row
self.num_block = min(self.num_block, int((config.max_position_embeddings // 32) ** 2))
self.approx_mode = config.approx_mode
self.initial_prior_first_n_blocks = config.initial_prior_first_n_blocks
self.initial_prior_diagonal_n_blocks = config.initial_prior_diagonal_n_blocks
def forward(self, hidden_states, attention_mask=None):
batch_size, seq_len, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
# revert changes made by get_extended_attention_mask
attention_mask = 1.0 + attention_mask / 10000.0
attention_mask = (
attention_mask.squeeze()
.repeat(1, self.num_attention_heads, 1)
.reshape(batch_size * self.num_attention_heads, seq_len)
.int()
)
# The CUDA kernels are most efficient with inputs whose size is a multiple of a GPU's warp size (32). Inputs
# smaller than this are padded with zeros.
gpu_warp_size = 32
if self.attention_head_size < gpu_warp_size:
pad_size = batch_size, self.num_attention_heads, seq_len, gpu_warp_size - self.attention_head_size
query_layer = torch.cat([query_layer, torch.zeros(pad_size, device=query_layer.device)], dim=-1)
key_layer = torch.cat([key_layer, torch.zeros(pad_size, device=key_layer.device)], dim=-1)
value_layer = torch.cat([value_layer, torch.zeros(pad_size, device=value_layer.device)], dim=-1)
context_layer = mra2_attention(
query_layer.float(),
key_layer.float(),
value_layer.float(),
attention_mask.float(),
self.num_block,
approx_mode=self.approx_mode,
initial_prior_first_n_blocks=self.initial_prior_first_n_blocks,
initial_prior_diagonal_n_blocks=self.initial_prior_diagonal_n_blocks,
)
if self.attention_head_size < gpu_warp_size:
context_layer = context_layer[:, :, :, : self.attention_head_size]
context_layer = context_layer.reshape(batch_size, self.num_attention_heads, seq_len, self.attention_head_size)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
| MraSelfAttention |
python | walkccc__LeetCode | solutions/3420. Count Non-Decreasing Subarrays After K Operations/3420-2.py | {
"start": 0,
"end": 666
} | class ____:
def countNonDecreasingSubarrays(self, nums: list[int], k: int) -> int:
ans = 0
cost = 0
# Store indices (i) of nums with nums[i] in non-increasing order.
dq = collections.deque()
j = len(nums) - 1
for i, num in reversed(list(enumerate(nums))):
while dq and nums[dq[-1]] < num:
l = dq.pop()
r = dq[-1] if dq else j + 1
cost += (r - l) * (num - nums[l]) # Adjust `nums[l]` to `num`.
dq.append(i)
while cost > k: # Remove the rightmost number.
cost -= nums[dq[0]] - nums[j]
if dq[0] == j:
dq.popleft()
j -= 1
ans += j - i + 1
return ans
| Solution |
python | jina-ai__jina | tests/integration/docarray_v2/test_streaming.py | {
"start": 671,
"end": 3592
} | class ____(Executor):
@requests(on='/task1')
async def task1(self, doc: MyDocument, **kwargs) -> OutputDocument:
for i in range(100):
yield OutputDocument(text=f'{doc.text} {doc.number}-{i}-task1')
@requests(on='/task2')
async def task2(
self, doc: MyDocument, **kwargs
) -> Generator[OutputDocument, None, None]:
for i in range(100):
yield OutputDocument(text=f'{doc.text} {doc.number}-{i}-task2')
@requests(on='/task3')
async def task3(
self, doc: MyDocument, **kwargs
) -> AsyncGenerator[OutputDocument, None]:
for i in range(100):
yield OutputDocument(text=f'{doc.text} {doc.number}-{i}-task3')
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
async def test_streaming_deployment(protocol, include_gateway):
port = random_port()
with Deployment(
uses=MyExecutor,
protocol=protocol,
cors=True,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, cors=True, asyncio=True)
i = 10
async for doc in client.stream_doc(
on='/hello',
inputs=MyDocument(text='hello world', number=i),
return_type=MyDocument,
):
assert doc.text == f'hello world {i}'
i += 1
assert doc.input_type_name == 'MyDocumentType'
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
async def test_streaming_flow(protocol):
port = random_port()
with Flow(protocol=protocol, port=port, cors=True).add(
uses=MyExecutor,
):
client = Client(port=port, protocol=protocol, asyncio=True)
i = 10
async for doc in client.stream_doc(
on='/hello',
inputs=MyDocument(text='hello world', number=i),
return_type=MyDocument,
):
assert doc.text == f'hello world {i}'
i += 1
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('endpoint', ['task1', 'task2', 'task3'])
@pytest.mark.parametrize('include_gateway', [False, True])
async def test_streaming_custom_response(protocol, endpoint, include_gateway):
port = random_port()
with Deployment(
uses=CustomResponseExecutor,
protocol=protocol,
cors=True,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, cors=True, asyncio=True)
i = 0
async for doc in client.stream_doc(
on=f'/{endpoint}',
inputs=MyDocument(text='hello world', number=5),
return_type=OutputDocument,
):
assert doc.text == f'hello world 5-{i}-{endpoint}'
i += 1
| CustomResponseExecutor |
python | pandas-dev__pandas | pandas/tests/indexes/timedeltas/test_formats.py | {
"start": 92,
"end": 3855
} | class ____:
def test_repr_round_days_non_nano(self):
# GH#55405
# we should get "1 days", not "1 days 00:00:00" with non-nano
tdi = TimedeltaIndex(["1 days"], freq="D").as_unit("s")
result = repr(tdi)
expected = "TimedeltaIndex(['1 days'], dtype='timedelta64[s]', freq='D')"
assert result == expected
result2 = repr(Series(tdi))
expected2 = "0 1 days\ndtype: timedelta64[s]"
assert result2 == expected2
@pytest.mark.parametrize("method", ["__repr__", "__str__"])
def test_representation(self, method):
idx1 = TimedeltaIndex([], freq="D", dtype="m8[ns]")
idx2 = TimedeltaIndex(["1 days"], freq="D")
idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D")
idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D")
idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"])
exp1 = "TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"
exp2 = "TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', freq='D')"
exp3 = "TimedeltaIndex(['1 days', '2 days'], dtype='timedelta64[ns]', freq='D')"
exp4 = (
"TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')"
)
exp5 = (
"TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)"
)
with pd.option_context("display.width", 300):
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]
):
result = getattr(idx, method)()
assert result == expected
# TODO: this is a Series.__repr__ test
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq="D", dtype="m8[ns]")
idx2 = TimedeltaIndex(["1 days"], freq="D")
idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D")
idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D")
idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = "0 1 days\ndtype: timedelta64[ns]"
exp3 = "0 1 days\n1 2 days\ndtype: timedelta64[ns]"
exp4 = "0 1 days\n1 2 days\n2 3 days\ndtype: timedelta64[ns]"
exp5 = (
"0 1 days 00:00:01\n"
"1 2 days 00:00:00\n"
"2 3 days 00:00:00\n"
"dtype: timedelta64[ns]"
)
with pd.option_context("display.width", 300):
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]
):
result = repr(Series(idx))
assert result == expected
def test_summary(self):
# GH#9116
idx1 = TimedeltaIndex([], freq="D", dtype="m8[ns]")
idx2 = TimedeltaIndex(["1 days"], freq="D")
idx3 = TimedeltaIndex(["1 days", "2 days"], freq="D")
idx4 = TimedeltaIndex(["1 days", "2 days", "3 days"], freq="D")
idx5 = TimedeltaIndex(["1 days 00:00:01", "2 days", "3 days"])
exp1 = "TimedeltaIndex: 0 entries\nFreq: D"
exp2 = "TimedeltaIndex: 1 entries, 1 days to 1 days\nFreq: D"
exp3 = "TimedeltaIndex: 2 entries, 1 days to 2 days\nFreq: D"
exp4 = "TimedeltaIndex: 3 entries, 1 days to 3 days\nFreq: D"
exp5 = "TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days 00:00:00"
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5], [exp1, exp2, exp3, exp4, exp5]
):
result = idx._summary()
assert result == expected
| TestTimedeltaIndexRendering |
python | falconry__falcon | falcon/asgi_spec.py | {
"start": 736,
"end": 1508
} | class ____:
"""Standard ASGI event type strings."""
HTTP_REQUEST = 'http.request'
HTTP_RESPONSE_START = 'http.response.start'
HTTP_RESPONSE_BODY = 'http.response.body'
HTTP_DISCONNECT = 'http.disconnect'
LIFESPAN_STARTUP = 'lifespan.startup'
LIFESPAN_STARTUP_COMPLETE = 'lifespan.startup.complete'
LIFESPAN_STARTUP_FAILED = 'lifespan.startup.failed'
LIFESPAN_SHUTDOWN = 'lifespan.shutdown'
LIFESPAN_SHUTDOWN_COMPLETE = 'lifespan.shutdown.complete'
LIFESPAN_SHUTDOWN_FAILED = 'lifespan.shutdown.failed'
WS_CONNECT = 'websocket.connect'
WS_ACCEPT = 'websocket.accept'
WS_RECEIVE = 'websocket.receive'
WS_SEND = 'websocket.send'
WS_DISCONNECT = 'websocket.disconnect'
WS_CLOSE = 'websocket.close'
| EventType |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_sort_values.py | {
"start": 180,
"end": 22211
} | class ____:
@pytest.mark.parametrize("dtype", [np.uint8, bool])
def test_sort_values_sparse_no_warning(self, dtype):
# GH#45618
ser = pd.Series(Categorical(["a", "b", "a"], categories=["a", "b", "c"]))
df = pd.get_dummies(ser, dtype=dtype, sparse=True)
with tm.assert_produces_warning(None):
# No warnings about constructing Index from SparseArray
df.sort_values(by=df.columns.tolist())
def test_sort_values(self):
frame = DataFrame(
[[1, 1, 2], [3, 1, 0], [4, 5, 6]], index=[1, 2, 3], columns=list("ABC")
)
# by column (axis=0)
sorted_df = frame.sort_values(by="A")
indexer = frame["A"].argsort().values
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=["A"], ascending=[False])
tm.assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=["B", "C"])
expected = frame.loc[[2, 1, 3]]
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=["B", "C"], ascending=False)
tm.assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=["B", "A"], ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=2, inplace=True)
# by row (axis=1): GH#10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis="columns")
expected = frame.reindex(columns=["B", "A", "C"])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=[True, False])
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=["C", "B", "A"])
tm.assert_frame_equal(sorted_df, expected)
msg = r"Length of ascending \(5\) != length of by \(2\)"
with pytest.raises(ValueError, match=msg):
frame.sort_values(by=["A", "B"], axis=0, ascending=[True] * 5)
def test_sort_values_by_empty_list(self):
# https://github.com/pandas-dev/pandas/issues/40258
expected = DataFrame({"a": [1, 4, 2, 5, 3, 6]})
result = expected.sort_values(by=[])
tm.assert_frame_equal(result, expected)
assert result is not expected
def test_sort_values_inplace(self):
frame = DataFrame(
np.random.default_rng(2).standard_normal((4, 4)),
index=[1, 2, 3, 4],
columns=["A", "B", "C", "D"],
)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", inplace=True)
assert return_value is None
expected = frame.sort_values(by="A")
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by=1, axis=1, inplace=True)
assert return_value is None
expected = frame.sort_values(by=1, axis=1)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(by="A", ascending=False, inplace=True)
assert return_value is None
expected = frame.sort_values(by="A", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
return_value = sorted_df.sort_values(
by=["A", "B"], ascending=False, inplace=True
)
assert return_value is None
expected = frame.sort_values(by=["A", "B"], ascending=False)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
np.random.default_rng(2).shuffle(A)
np.random.default_rng(2).shuffle(B)
frame = DataFrame(
{"A": A, "B": B, "C": np.random.default_rng(2).standard_normal(100)}
)
result = frame.sort_values(by=["A", "B"])
indexer = np.lexsort((frame["B"], frame["A"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["A", "B"], ascending=False)
indexer = np.lexsort(
(frame["B"].rank(ascending=False), frame["A"].rank(ascending=False))
)
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
result = frame.sort_values(by=["B", "A"])
indexer = np.lexsort((frame["A"], frame["B"]))
expected = frame.take(indexer)
tm.assert_frame_equal(result, expected)
def test_sort_values_multicolumn_uint64(self):
# GH#9918
# uint64 multicolumn sort
df = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
}
)
df["a"] = df["a"].astype(np.uint64)
result = df.sort_values(["a", "b"])
expected = DataFrame(
{
"a": pd.Series([18446637057563306014, 1162265347240853609]),
"b": pd.Series([1, 2]),
},
index=range(1, -1, -1),
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nan(self):
# GH#3917
df = DataFrame(
{"A": [1, 2, np.nan, 1, 6, 8, 4], "B": [9, np.nan, 5, 2, 5, 4, 5]}
)
# sort one column only
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{"A": [np.nan, 8, 6, 4, 2, 1, 1], "B": [5, 4, 5, 5, np.nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3],
)
sorted_df = df.sort_values(["A"], na_position="first", ascending=False)
tm.assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=["B", "A"])
sorted_df = df.sort_values(by=1, axis=1, na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{"A": [1, 1, 2, 4, 6, 8, np.nan], "B": [2, 9, np.nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2],
)
sorted_df = df.sort_values(["A", "B"])
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 2, 9, np.nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{"A": [np.nan, 1, 1, 2, 4, 6, 8], "B": [5, 9, 2, np.nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5],
)
sorted_df = df.sort_values(["A", "B"], ascending=[1, 0], na_position="first")
tm.assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{"A": [8, 6, 4, 2, 1, 1, np.nan], "B": [4, 5, 5, np.nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2],
)
sorted_df = df.sort_values(["A", "B"], ascending=[0, 1], na_position="last")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_descending_sort(self):
# GH#6399
df = DataFrame(
[[2, "first"], [2, "second"], [1, "a"], [1, "b"]],
columns=["sort_col", "order"],
)
sorted_df = df.sort_values(by="sort_col", kind="mergesort", ascending=False)
tm.assert_frame_equal(df, sorted_df)
@pytest.mark.parametrize(
"expected_idx_non_na, ascending",
[
[
[3, 4, 5, 0, 1, 8, 6, 9, 7, 10, 13, 14],
[True, True],
],
[
[0, 3, 4, 5, 1, 8, 6, 7, 10, 13, 14, 9],
[True, False],
],
[
[9, 7, 10, 13, 14, 6, 8, 1, 3, 4, 5, 0],
[False, True],
],
[
[7, 10, 13, 14, 9, 6, 8, 1, 0, 3, 4, 5],
[False, False],
],
],
)
@pytest.mark.parametrize("na_position", ["first", "last"])
def test_sort_values_stable_multicolumn_sort(
self, expected_idx_non_na, ascending, na_position
):
# GH#38426 Clarify sort_values with mult. columns / labels is stable
df = DataFrame(
{
"A": [1, 2, np.nan, 1, 1, 1, 6, 8, 4, 8, 8, np.nan, np.nan, 8, 8],
"B": [9, np.nan, 5, 2, 2, 2, 5, 4, 5, 3, 4, np.nan, np.nan, 4, 4],
}
)
# All rows with NaN in col "B" only have unique values in "A", therefore,
# only the rows with NaNs in "A" have to be treated individually:
expected_idx = (
[11, 12, 2] + expected_idx_non_na
if na_position == "first"
else expected_idx_non_na + [2, 11, 12]
)
expected = df.take(expected_idx)
sorted_df = df.sort_values(
["A", "B"], ascending=ascending, na_position=na_position
)
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_stable_categorial(self):
# GH#16793
df = DataFrame({"x": Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)})
expected = df.copy()
sorted_df = df.sort_values("x", kind="mergesort")
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_datetimes(self):
# GH#3461, argsort / lexsort differences for a datetime column
df = DataFrame(
["a", "a", "a", "b", "c", "d", "e", "f", "g"],
columns=["A"],
index=date_range("20130101", periods=9),
)
dts = [
Timestamp(x)
for x in [
"2004-02-11",
"2004-01-21",
"2004-01-26",
"2005-09-20",
"2010-10-04",
"2009-05-12",
"2008-11-12",
"2010-09-28",
"2010-09-28",
]
]
df["B"] = dts[::2] + dts[1::2]
df["C"] = 2.0
df["A1"] = 3.0
df1 = df.sort_values(by="A")
df2 = df.sort_values(by=["A"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["B"])
tm.assert_frame_equal(df1, df2)
df1 = df.sort_values(by="B")
df2 = df.sort_values(by=["C", "B"])
tm.assert_frame_equal(df1, df2)
def test_sort_values_frame_column_inplace_sort_exception(self, float_frame):
s = float_frame["A"]
float_frame_orig = float_frame.copy()
# INFO(CoW) Series is a new object, so can be changed inplace
# without modifying original datafame
s.sort_values(inplace=True)
tm.assert_series_equal(s, float_frame_orig["A"].sort_values())
# column in dataframe is not changed
tm.assert_frame_equal(float_frame, float_frame_orig)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_values_nat_values_in_int_column(self):
# GH#14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT._value))
float_values = (2.0, -1.797693e308)
df = DataFrame(
{"int": int_values, "float": float_values}, columns=["int", "float"]
)
df_reversed = DataFrame(
{"int": int_values[::-1], "float": float_values[::-1]},
columns=["int", "float"],
index=range(1, -1, -1),
)
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(
{"datetime": [Timestamp("2016-01-01"), NaT], "float": float_values},
columns=["datetime", "float"],
)
df_reversed = DataFrame(
{"datetime": [NaT, Timestamp("2016-01-01")], "float": float_values[::-1]},
columns=["datetime", "float"],
index=range(1, -1, -1),
)
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
tm.assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
tm.assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
tm.assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories(self):
# GH#22556
# Positioning missing value properly when column is Categorical.
categories = ["A", "B", "C"]
category_indices = [0, 2, 4]
list_of_nans = [np.nan, np.nan]
na_indices = [1, 3]
na_position_first = "first"
na_position_last = "last"
column_name = "c"
reversed_categories = sorted(categories, reverse=True)
reversed_category_indices = sorted(category_indices, reverse=True)
reversed_na_indices = sorted(na_indices)
df = DataFrame(
{
column_name: Categorical(
["A", np.nan, "B", np.nan, "C"], categories=categories, ordered=True
)
}
)
# sort ascending with na first
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + categories, categories=categories, ordered=True
)
},
index=na_indices + category_indices,
)
tm.assert_frame_equal(result, expected)
# sort ascending with na last
result = df.sort_values(
by=column_name, ascending=True, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
categories + list_of_nans, categories=categories, ordered=True
)
},
index=category_indices + na_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na first
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_first
)
expected = DataFrame(
{
column_name: Categorical(
list_of_nans + reversed_categories,
categories=categories,
ordered=True,
)
},
index=reversed_na_indices + reversed_category_indices,
)
tm.assert_frame_equal(result, expected)
# sort descending with na last
result = df.sort_values(
by=column_name, ascending=False, na_position=na_position_last
)
expected = DataFrame(
{
column_name: Categorical(
reversed_categories + list_of_nans,
categories=categories,
ordered=True,
)
},
index=reversed_category_indices + reversed_na_indices,
)
tm.assert_frame_equal(result, expected)
def test_sort_values_nat(self):
# GH#16836
d1 = [Timestamp(x) for x in ["2016-01-01", "2015-01-01", np.nan, "2016-01-01"]]
d2 = [
Timestamp(x)
for x in ["2017-01-01", "2014-01-01", "2016-01-01", "2015-01-01"]
]
df = DataFrame({"a": d1, "b": d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ["2015-01-01", "2016-01-01", "2016-01-01", np.nan]]
d4 = [
Timestamp(x)
for x in ["2014-01-01", "2015-01-01", "2017-01-01", "2016-01-01"]
]
expected = DataFrame({"a": d3, "b": d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=["a", "b"])
tm.assert_frame_equal(sorted_df, expected)
def test_sort_values_na_position_with_categories_raises(self):
df = DataFrame(
{
"c": Categorical(
["A", np.nan, "B", np.nan, "C"],
categories=["A", "B", "C"],
ordered=True,
)
}
)
with pytest.raises(ValueError, match="invalid na_position: bad_position"):
df.sort_values(by="c", ascending=False, na_position="bad_position")
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize(
"original_dict, sorted_dict, ignore_index, output_index",
[
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, True, range(3)),
({"A": [1, 2, 3]}, {"A": [3, 2, 1]}, False, range(2, -1, -1)),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
True,
range(3),
),
(
{"A": [1, 2, 3], "B": [2, 3, 4]},
{"A": [3, 2, 1], "B": [4, 3, 2]},
False,
range(2, -1, -1),
),
],
)
def test_sort_values_ignore_index(
self, inplace, original_dict, sorted_dict, ignore_index, output_index
):
# GH 30114
df = DataFrame(original_dict)
expected = DataFrame(sorted_dict, index=output_index)
kwargs = {"ignore_index": ignore_index, "inplace": inplace}
if inplace:
result_df = df.copy()
result_df.sort_values("A", ascending=False, **kwargs)
else:
result_df = df.sort_values("A", ascending=False, **kwargs)
tm.assert_frame_equal(result_df, expected)
tm.assert_frame_equal(df, DataFrame(original_dict))
def test_sort_values_nat_na_position_default(self):
# GH 13230
expected = DataFrame(
{
"A": [1, 2, 3, 4, 4],
"date": pd.DatetimeIndex(
[
"2010-01-01 09:00:00",
"2010-01-01 09:00:01",
"2010-01-01 09:00:02",
"2010-01-01 09:00:03",
"NaT",
]
),
}
)
result = expected.sort_values(["A", "date"])
tm.assert_frame_equal(result, expected)
def test_sort_values_reshaping(self):
# GH 39426
values = list(range(21))
expected = DataFrame([values], columns=values)
df = expected.sort_values(expected.index[0], axis=1, ignore_index=True)
tm.assert_frame_equal(df, expected)
def test_sort_values_no_by_inplace(self):
# GH#50643
df = DataFrame({"a": [1, 2, 3]})
expected = df.copy()
result = df.sort_values(by=[], inplace=True)
tm.assert_frame_equal(df, expected)
assert result is None
def test_sort_values_no_op_reset_index(self):
# GH#52553
df = DataFrame({"A": [10, 20], "B": [1, 5]}, index=[2, 3])
result = df.sort_values(by="A", ignore_index=True)
expected = DataFrame({"A": [10, 20], "B": [1, 5]})
tm.assert_frame_equal(result, expected)
def test_sort_by_column_named_none(self):
# GH#61512
df = DataFrame([[3, 1], [2, 2]], columns=[None, "C1"])
result = df.sort_values(by=None)
expected = DataFrame([[2, 2], [3, 1]], columns=[None, "C1"], index=[1, 0])
tm.assert_frame_equal(result, expected)
| TestDataFrameSortValues |
python | great-expectations__great_expectations | great_expectations/execution_engine/partition_and_sample/pandas_data_sampler.py | {
"start": 360,
"end": 5892
} | class ____(DataSampler):
"""Methods for sampling a pandas dataframe."""
def sample_using_limit(self, df: pd.DataFrame, batch_spec: BatchSpec) -> pd.DataFrame:
"""Sample the first n rows of data.
Args:
df: pandas dataframe.
batch_spec: Should contain key `n` in sampling_kwargs, the number of
values in the sample e.g. sampling_kwargs={"n": 100}.
Returns:
Sampled dataframe
Raises:
SamplerError
"""
self.verify_batch_spec_sampling_kwargs_exists(batch_spec)
self.verify_batch_spec_sampling_kwargs_key_exists("n", batch_spec)
n: int = batch_spec["sampling_kwargs"]["n"]
return df.head(n)
def sample_using_random(
self,
df: pd.DataFrame,
batch_spec: BatchSpec,
) -> pd.DataFrame:
"""Take a random sample of rows, retaining proportion p.
Args:
df: dataframe to sample
batch_spec: Can contain key `p` (float) which defaults to 0.1
if not provided.
Returns:
Sampled dataframe
Raises:
SamplerError
"""
p: float = self.get_sampling_kwargs_value_or_default(
batch_spec=batch_spec, sampling_kwargs_key="p", default_value=0.1
)
return df[df.index.map(lambda x: random.random() < p)]
def sample_using_mod(
self,
df: pd.DataFrame,
batch_spec: BatchSpec,
) -> pd.DataFrame:
"""Take the mod of named column, and only keep rows that match the given value.
Args:
df: dataframe to sample
batch_spec: should contain keys `column_name`, `mod` and `value`
Returns:
Sampled dataframe
Raises:
SamplerError
"""
self.verify_batch_spec_sampling_kwargs_exists(batch_spec)
self.verify_batch_spec_sampling_kwargs_key_exists("column_name", batch_spec)
self.verify_batch_spec_sampling_kwargs_key_exists("mod", batch_spec)
self.verify_batch_spec_sampling_kwargs_key_exists("value", batch_spec)
column_name: str = self.get_sampling_kwargs_value_or_default(batch_spec, "column_name")
mod: int = self.get_sampling_kwargs_value_or_default(batch_spec, "mod")
value: int = self.get_sampling_kwargs_value_or_default(batch_spec, "value")
return df[df[column_name].map(lambda x: x % mod == value)]
def sample_using_a_list(
self,
df: pd.DataFrame,
batch_spec: BatchSpec,
) -> pd.DataFrame:
"""Match the values in the named column against value_list, and only keep the matches.
Args:
df: dataframe to sample
batch_spec: should contain keys `column_name` and `value_list`
Returns:
Sampled dataframe
Raises:
SamplerError
"""
self.verify_batch_spec_sampling_kwargs_exists(batch_spec)
self.verify_batch_spec_sampling_kwargs_key_exists("column_name", batch_spec)
self.verify_batch_spec_sampling_kwargs_key_exists("value_list", batch_spec)
column_name: str = self.get_sampling_kwargs_value_or_default(batch_spec, "column_name")
value_list: int = self.get_sampling_kwargs_value_or_default(batch_spec, "value_list")
return df[df[column_name].isin(value_list)] # type: ignore[arg-type] # FIXME CoP
def sample_using_hash(
self,
df: pd.DataFrame,
batch_spec: BatchSpec,
) -> pd.DataFrame:
"""Hash the values in the named column, and only keep rows that match the given hash_value.
Args:
df: dataframe to sample
batch_spec: should contain keys `column_name` and optionally `hash_digits`
(default is 1 if not provided), `hash_value` (default is "f" if not provided),
and `hash_function_name` (default is "md5" if not provided)
Returns:
Sampled dataframe
Raises:
SamplerError
"""
self.verify_batch_spec_sampling_kwargs_exists(batch_spec)
self.verify_batch_spec_sampling_kwargs_key_exists("column_name", batch_spec)
column_name: str = self.get_sampling_kwargs_value_or_default(batch_spec, "column_name")
hash_digits: int = self.get_sampling_kwargs_value_or_default(
batch_spec=batch_spec, sampling_kwargs_key="hash_digits", default_value=1
)
hash_value: str = self.get_sampling_kwargs_value_or_default(
batch_spec=batch_spec, sampling_kwargs_key="hash_value", default_value="f"
)
hash_function_name: str = self.get_sampling_kwargs_value_or_default(
batch_spec=batch_spec,
sampling_kwargs_key="hash_function_name",
default_value="md5",
)
try:
hash_func = getattr(hashlib, hash_function_name)
except (TypeError, AttributeError):
raise (
gx_exceptions.ExecutionEngineError( # noqa: TRY003 # FIXME CoP
f"""The sampling method used with PandasExecutionEngine has a reference to an invalid hash_function_name.
Reference to {hash_function_name} cannot be found.""" # noqa: E501 # FIXME CoP
)
)
matches: pd.Series = df[column_name].map(
lambda x: hash_func(str(x).encode()).hexdigest()[-1 * hash_digits :] == hash_value
)
return df[matches]
| PandasDataSampler |
python | modin-project__modin | modin/core/execution/ray/implementations/pandas_on_ray/dataframe/dataframe.py | {
"start": 1121,
"end": 2884
} | class ____(PandasDataframe):
"""
The class implements the interface in ``PandasDataframe`` using Ray.
Parameters
----------
partitions : np.ndarray
A 2D NumPy array of partitions.
index : sequence
The index for the dataframe. Converted to a ``pandas.Index``.
columns : sequence
The columns object for the dataframe. Converted to a ``pandas.Index``.
row_lengths : list, optional
The length of each partition in the rows. The "height" of
each of the block partitions. Is computed if not provided.
column_widths : list, optional
The width of each partition in the columns. The "width" of
each of the block partitions. Is computed if not provided.
dtypes : pandas.Series, optional
The data types for the dataframe columns.
pandas_backend : {"pyarrow", None}, optional
Backend used by pandas. None - means default NumPy backend.
"""
_partition_mgr_cls = PandasOnRayDataframePartitionManager
def _get_lengths(self, parts, axis):
"""
Get list of dimensions for all the provided parts.
Parameters
----------
parts : list
List of parttions.
axis : {0, 1}
The axis along which to get the lengths (0 - length across rows or, 1 - width across columns).
Returns
-------
list
"""
if axis == Axis.ROW_WISE:
dims = [part.length(False) for part in parts]
else:
dims = [part.width(False) for part in parts]
return self._partition_mgr_cls.materialize_futures(dims)
@property
@_inherit_docstrings(PandasDataframe.engine)
def engine(self) -> str:
return "Ray"
| PandasOnRayDataframe |
python | huggingface__transformers | tests/models/cwm/test_modeling_cwm.py | {
"start": 1709,
"end": 2333
} | class ____(CausalLMModelTest, unittest.TestCase):
all_model_classes = (
(
CwmModel,
CwmForCausalLM,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": CwmModel,
"text-generation": CwmForCausalLM,
}
if is_torch_available()
else {}
)
model_tester_class = CwmModelTester
model_split_percents = [0.5, 0.7, 0.8]
_torch_compile_train_cls = CwmForCausalLM if is_torch_available() else None
@require_torch_accelerator
@slow
@require_read_token
| CwmModelTest |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 116699,
"end": 124923
} | class ____(Request):
"""
Update a model
:param model: Model id
:type model: str
:param name: Model name Unique within the company.
:type name: str
:param comment: Model comment
:type comment: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param ready: Indication if the model is final and can be used by other tasks
Default is false.
:type ready: bool
:param created: Model creation time (UTC)
:type created: datetime.datetime
:param ui_cache: UI cache for this model
:type ui_cache: dict
:param project: Project to which to model belongs
:type project: str
:param task: Associated task ID
:type task: str
:param iteration: Iteration (used to update task statistics if an associated
task is reported)
:type iteration: int
"""
_service = "models"
_action = "update"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"comment": {"description": "Model comment", "type": "string"},
"created": {
"description": "Model creation time (UTC) ",
"format": "date-time",
"type": "string",
},
"iteration": {
"description": "Iteration (used to update task statistics if an associated task is reported)",
"type": "integer",
},
"model": {"description": "Model id", "type": "string"},
"name": {
"description": "Model name Unique within the company.",
"type": "string",
},
"project": {
"description": "Project to which to model belongs",
"type": "string",
},
"ready": {
"default": False,
"description": "Indication if the model is final and can be used by other tasks Default is false.",
"type": "boolean",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "Associated task ID", "type": "string"},
"ui_cache": {
"additionalProperties": True,
"description": "UI cache for this model",
"type": "object",
},
},
"required": ["model"],
"type": "object",
}
def __init__(
self,
model: str,
name: Optional[str] = None,
comment: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
ready: Optional[bool] = False,
created: Optional[str] = None,
ui_cache: Optional[dict] = None,
project: Optional[str] = None,
task: Optional[str] = None,
iteration: Optional[int] = None,
**kwargs: Any
) -> None:
super(UpdateRequest, self).__init__(**kwargs)
self.model = model
self.name = name
self.comment = comment
self.tags = tags
self.system_tags = system_tags
self.ready = ready
self.created = created
self.ui_cache = ui_cache
self.project = project
self.task = task
self.iteration = iteration
@schema_property("model")
def model(self) -> str:
return self._property_model
@model.setter
def model(self, value: str) -> None:
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("ready")
def ready(self) -> Optional[bool]:
return self._property_ready
@ready.setter
def ready(self, value: Optional[bool]) -> None:
if value is None:
self._property_ready = None
return
self.assert_isinstance(value, "ready", (bool,))
self._property_ready = value
@schema_property("created")
def created(self) -> Optional[str]:
return self._property_created
@created.setter
def created(self, value: Optional[str]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property("ui_cache")
def ui_cache(self) -> Optional[dict]:
return self._property_ui_cache
@ui_cache.setter
def ui_cache(self, value: Optional[dict]) -> None:
if value is None:
self._property_ui_cache = None
return
self.assert_isinstance(value, "ui_cache", (dict,))
self._property_ui_cache = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("iteration")
def iteration(self) -> Optional[int]:
return self._property_iteration
@iteration.setter
def iteration(self, value: Optional[int]) -> None:
if value is None:
self._property_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iteration", six.integer_types)
self._property_iteration = value
| UpdateRequest |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 38341,
"end": 38678
} | class ____(VOWarning, ValueError):
"""Incorrect number of elements in array.
The number of array elements in the data does not match that specified
in the FIELD specifier.
"""
message_template = (
"Incorrect number of elements in array. Expected multiple of {}, got {}"
)
default_args = ("x", "y")
| E02 |
python | pandas-dev__pandas | asv_bench/benchmarks/groupby.py | {
"start": 24773,
"end": 25331
} | class ____:
# GH 14338
params = ["period_range", "date_range", "date_range_tz"]
param_names = ["grouper"]
def setup(self, grouper):
N = 10**4
rng_map = {
"period_range": period_range,
"date_range": date_range,
"date_range_tz": partial(date_range, tz="US/Central"),
}
self.grouper = rng_map[grouper]("1900-01-01", freq="D", periods=N)
self.df = DataFrame(np.random.randn(10**4, 2))
def time_sum(self, grouper):
self.df.groupby(self.grouper).sum()
| Datelike |
python | chroma-core__chroma | chromadb/utils/embedding_functions/google_embedding_function.py | {
"start": 14381,
"end": 19807
} | class ____(EmbeddingFunction[Documents]):
"""To use this EmbeddingFunction, you must have the vertexai Python package installed and have Google Cloud credentials configured."""
def __init__(
self,
api_key: Optional[str] = None,
model_name: str = "textembedding-gecko",
project_id: str = "cloud-large-language-models",
region: str = "us-central1",
api_key_env_var: str = "CHROMA_GOOGLE_VERTEX_API_KEY",
):
"""
Initialize the GoogleVertexEmbeddingFunction.
Args:
api_key_env_var (str, optional): Environment variable name that contains your API key for the Google Vertex AI API.
Defaults to "CHROMA_GOOGLE_VERTEX_API_KEY".
model_name (str, optional): The name of the model to use for text embeddings.
Defaults to "textembedding-gecko".
project_id (str, optional): The Google Cloud project ID.
Defaults to "cloud-large-language-models".
region (str, optional): The Google Cloud region.
Defaults to "us-central1".
"""
try:
import vertexai
from vertexai.language_models import TextEmbeddingModel
except ImportError:
raise ValueError(
"The vertexai python package is not installed. Please install it with `pip install google-cloud-aiplatform`"
)
if api_key is not None:
warnings.warn(
"Direct api_key configuration will not be persisted. "
"Please use environment variables via api_key_env_var for persistent storage.",
DeprecationWarning,
)
if os.getenv("GOOGLE_API_KEY") is not None:
self.api_key_env_var = "GOOGLE_API_KEY"
else:
self.api_key_env_var = api_key_env_var
self.api_key = api_key or os.getenv(self.api_key_env_var)
if not self.api_key:
raise ValueError(
f"The {self.api_key_env_var} environment variable is not set."
)
self.model_name = model_name
self.project_id = project_id
self.region = region
vertexai.init(project=project_id, location=region)
self._model = TextEmbeddingModel.from_pretrained(model_name)
def __call__(self, input: Documents) -> Embeddings:
"""
Generate embeddings for the given documents.
Args:
input: Documents or images to generate embeddings for.
Returns:
Embeddings for the documents.
"""
# Google Vertex only works with text documents
if not all(isinstance(item, str) for item in input):
raise ValueError("Google Vertex only supports text documents, not images")
embeddings_list: List[npt.NDArray[np.float32]] = []
for text in input:
embedding_result = self._model.get_embeddings([text])
embeddings_list.append(
np.array(embedding_result[0].values, dtype=np.float32)
)
# Convert to the expected Embeddings type (List[Vector])
return cast(Embeddings, embeddings_list)
@staticmethod
def name() -> str:
return "google_vertex"
def default_space(self) -> Space:
return "cosine"
def supported_spaces(self) -> List[Space]:
return ["cosine", "l2", "ip"]
@staticmethod
def build_from_config(config: Dict[str, Any]) -> "EmbeddingFunction[Documents]":
api_key_env_var = config.get("api_key_env_var")
model_name = config.get("model_name")
project_id = config.get("project_id")
region = config.get("region")
if (
api_key_env_var is None
or model_name is None
or project_id is None
or region is None
):
assert False, "This code should not be reached"
return GoogleVertexEmbeddingFunction(
api_key_env_var=api_key_env_var,
model_name=model_name,
project_id=project_id,
region=region,
)
def get_config(self) -> Dict[str, Any]:
return {
"api_key_env_var": self.api_key_env_var,
"model_name": self.model_name,
"project_id": self.project_id,
"region": self.region,
}
def validate_config_update(
self, old_config: Dict[str, Any], new_config: Dict[str, Any]
) -> None:
if "model_name" in new_config:
raise ValueError(
"The model name cannot be changed after the embedding function has been initialized."
)
if "project_id" in new_config:
raise ValueError(
"The project ID cannot be changed after the embedding function has been initialized."
)
if "region" in new_config:
raise ValueError(
"The region cannot be changed after the embedding function has been initialized."
)
@staticmethod
def validate_config(config: Dict[str, Any]) -> None:
"""
Validate the configuration using the JSON schema.
Args:
config: Configuration to validate
Raises:
ValidationError: If the configuration does not match the schema
"""
validate_config_schema(config, "google_vertex")
| GoogleVertexEmbeddingFunction |
python | huggingface__transformers | src/transformers/models/seamless_m4t/processing_seamless_m4t.py | {
"start": 1054,
"end": 1179
} | class ____(ProcessingKwargs, total=False):
text_kwargs: SeamlessM4TTextKwargs
_defaults = {}
| SeamlessM4TProcessorKwargs |
python | pypa__setuptools | setuptools/_distutils/filelist.py | {
"start": 11942,
"end": 15337
} | class ____(set):
"""
Exclude previously-seen dirs from walk results,
avoiding infinite recursion.
Ref https://bugs.python.org/issue44497.
"""
def __call__(self, walk_item):
"""
Given an item from an os.walk result, determine
if the item represents a unique dir for this instance
and if not, prevent further traversal.
"""
base, dirs, files = walk_item
stat = os.stat(base)
candidate = stat.st_dev, stat.st_ino
found = candidate in self
if found:
del dirs[:]
self.add(candidate)
return not found
@classmethod
def filter(cls, items):
return filter(cls(), items)
def findall(dir: str | os.PathLike[str] = os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
def glob_to_re(pattern):
"""Translate a shell-like glob pattern to a regular expression; return
a string containing the regex. Differs from 'fnmatch.translate()' in
that '*' does not match "special characters" (which are
platform-specific).
"""
pattern_re = fnmatch.translate(pattern)
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
# and by extension they shouldn't match such "special characters" under
# any OS. So change all non-escaped dots in the RE to match any
# character except the special characters (currently: just os.sep).
sep = os.sep
if os.sep == '\\':
# we're using a regex to manipulate a regex, so we need
# to escape the backslash twice
sep = r'\\\\'
escaped = rf'\1[^{sep}]'
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
return pattern_re
def translate_pattern(pattern, anchor=True, prefix=None, is_regex=False):
"""Translate a shell-like wildcard pattern to a compiled regular
expression. Return the compiled regex. If 'is_regex' true,
then 'pattern' is directly compiled to a regex (if it's a string)
or just returned as-is (assumes it's a regex object).
"""
if is_regex:
if isinstance(pattern, str):
return re.compile(pattern)
else:
return pattern
# ditch start and end characters
start, _, end = glob_to_re('_').partition('_')
if pattern:
pattern_re = glob_to_re(pattern)
assert pattern_re.startswith(start) and pattern_re.endswith(end)
else:
pattern_re = ''
if prefix is not None:
prefix_re = glob_to_re(prefix)
assert prefix_re.startswith(start) and prefix_re.endswith(end)
prefix_re = prefix_re[len(start) : len(prefix_re) - len(end)]
sep = os.sep
if os.sep == '\\':
sep = r'\\'
pattern_re = pattern_re[len(start) : len(pattern_re) - len(end)]
pattern_re = rf'{start}\A{prefix_re}{sep}.*{pattern_re}{end}'
else: # no prefix -- respect anchor flag
if anchor:
pattern_re = rf'{start}\A{pattern_re[len(start) :]}'
return re.compile(pattern_re)
| _UniqueDirs |
python | apache__airflow | task-sdk/src/airflow/sdk/bases/operator.py | {
"start": 24107,
"end": 77318
} | class ____(AbstractOperator, metaclass=BaseOperatorMeta):
r"""
Abstract base class for all operators.
Since operators create objects that become nodes in the Dag, BaseOperator
contains many recursive methods for Dag crawling behavior. To derive from
this class, you are expected to override the constructor and the 'execute'
method.
Operators derived from this class should perform or trigger certain tasks
synchronously (wait for completion). Example of operators could be an
operator that runs a Pig job (PigOperator), a sensor operator that
waits for a partition to land in Hive (HiveSensorOperator), or one that
moves data from Hive to MySQL (Hive2MySqlOperator). Instances of these
operators (tasks) target specific operations, running specific scripts,
functions or data transfers.
This class is abstract and shouldn't be instantiated. Instantiating a
class derived from this one results in the creation of a task object,
which ultimately becomes a node in Dag objects. Task dependencies should
be set by using the set_upstream and/or set_downstream methods.
:param task_id: a unique, meaningful id for the task
:param owner: the owner of the task. Using a meaningful description
(e.g. user/person/team/role name) to clarify ownership is recommended.
:param email: the 'to' email address(es) used in email alerts. This can be a
single email or multiple ones. Multiple addresses can be specified as a
comma or semicolon separated string or by passing a list of strings. (deprecated)
:param email_on_retry: Indicates whether email alerts should be sent when a
task is retried (deprecated)
:param email_on_failure: Indicates whether email alerts should be sent when
a task failed (deprecated)
:param retries: the number of retries that should be performed before
failing the task
:param retry_delay: delay between retries, can be set as ``timedelta`` or
``float`` seconds, which will be converted into ``timedelta``,
the default is ``timedelta(seconds=300)``.
:param retry_exponential_backoff: multiplier for exponential backoff between retries.
Set to 0 to disable (constant delay). Set to 2.0 for standard exponential backoff
(delay doubles with each retry). For example, with retry_delay=4min and
retry_exponential_backoff=5, retries occur after 4min, 20min, 100min, etc.
:param max_retry_delay: maximum delay interval between retries, can be set as
``timedelta`` or ``float`` seconds, which will be converted into ``timedelta``.
:param start_date: The ``start_date`` for the task, determines
the ``logical_date`` for the first task instance. The best practice
is to have the start_date rounded
to your Dag's ``schedule_interval``. Daily jobs have their start_date
some day at 00:00:00, hourly jobs have their start_date at 00:00
of a specific hour. Note that Airflow simply looks at the latest
``logical_date`` and adds the ``schedule_interval`` to determine
the next ``logical_date``. It is also very important
to note that different tasks' dependencies
need to line up in time. If task A depends on task B and their
start_date are offset in a way that their logical_date don't line
up, A's dependencies will never be met. If you are looking to delay
a task, for example running a daily task at 2AM, look into the
``TimeSensor`` and ``TimeDeltaSensor``. We advise against using
dynamic ``start_date`` and recommend using fixed ones. Read the
FAQ entry about start_date for more information.
:param end_date: if specified, the scheduler won't go beyond this date
:param depends_on_past: when set to true, task instances will run
sequentially and only if the previous instance has succeeded or has been skipped.
The task instance for the start_date is allowed to run.
:param wait_for_past_depends_before_skipping: when set to true, if the task instance
should be marked as skipped, and depends_on_past is true, the ti will stay on None state
waiting the task of the previous run
:param wait_for_downstream: when set to true, an instance of task
X will wait for tasks immediately downstream of the previous instance
of task X to finish successfully or be skipped before it runs. This is useful if the
different instances of a task X alter the same asset, and this asset
is used by tasks downstream of task X. Note that depends_on_past
is forced to True wherever wait_for_downstream is used. Also note that
only tasks *immediately* downstream of the previous task instance are waited
for; the statuses of any tasks further downstream are ignored.
:param dag: a reference to the dag the task is attached to (if any)
:param priority_weight: priority weight of this task against other task.
This allows the executor to trigger higher priority tasks before
others when things get backed up. Set priority_weight as a higher
number for more important tasks.
As not all database engines support 64-bit integers, values are capped with 32-bit.
Valid range is from -2,147,483,648 to 2,147,483,647.
:param weight_rule: weighting method used for the effective total
priority weight of the task. Options are:
``{ downstream | upstream | absolute }`` default is ``downstream``
When set to ``downstream`` the effective weight of the task is the
aggregate sum of all downstream descendants. As a result, upstream
tasks will have higher weight and will be scheduled more aggressively
when using positive weight values. This is useful when you have
multiple dag run instances and desire to have all upstream tasks to
complete for all runs before each dag can continue processing
downstream tasks. When set to ``upstream`` the effective weight is the
aggregate sum of all upstream ancestors. This is the opposite where
downstream tasks have higher weight and will be scheduled more
aggressively when using positive weight values. This is useful when you
have multiple dag run instances and prefer to have each dag complete
before starting upstream tasks of other dags. When set to
``absolute``, the effective weight is the exact ``priority_weight``
specified without additional weighting. You may want to do this when
you know exactly what priority weight each task should have.
Additionally, when set to ``absolute``, there is bonus effect of
significantly speeding up the task creation process as for very large
Dags. Options can be set as string or using the constants defined in
the static class ``airflow.utils.WeightRule``.
Irrespective of the weight rule, resulting priority values are capped with 32-bit.
|experimental|
Since 2.9.0, Airflow allows to define custom priority weight strategy,
by creating a subclass of
``airflow.task.priority_strategy.PriorityWeightStrategy`` and registering
in a plugin, then providing the class path or the class instance via
``weight_rule`` parameter. The custom priority weight strategy will be
used to calculate the effective total priority weight of the task instance.
:param queue: which queue to target when running this job. Not
all executors implement queue management, the CeleryExecutor
does support targeting specific queues.
:param pool: the slot pool this task should run in, slot pools are a
way to limit concurrency for certain tasks
:param pool_slots: the number of pool slots this task should use (>= 1)
Values less than 1 are not allowed.
:param sla: DEPRECATED - The SLA feature is removed in Airflow 3.0, to be replaced with a
new implementation in Airflow >=3.1.
:param execution_timeout: max time allowed for the execution of
this task instance, if it goes beyond it will raise and fail.
:param on_failure_callback: a function or list of functions to be called when a task instance
of this task fails. a context dictionary is passed as a single
parameter to this function. Context contains references to related
objects to the task instance and is documented under the macros
section of the API.
:param on_execute_callback: much like the ``on_failure_callback`` except
that it is executed right before the task is executed.
:param on_retry_callback: much like the ``on_failure_callback`` except
that it is executed when retries occur.
:param on_success_callback: much like the ``on_failure_callback`` except
that it is executed when the task succeeds.
:param on_skipped_callback: much like the ``on_failure_callback`` except
that it is executed when skipped occur; this callback will be called only if AirflowSkipException get raised.
Explicitly it is NOT called if a task is not started to be executed because of a preceding branching
decision in the Dag or a trigger rule which causes execution to skip so that the task execution
is never scheduled.
:param pre_execute: a function to be called immediately before task
execution, receiving a context dictionary; raising an exception will
prevent the task from being executed.
|experimental|
:param post_execute: a function to be called immediately after task
execution, receiving a context dictionary and task result; raising an
exception will prevent the task from succeeding.
|experimental|
:param trigger_rule: defines the rule by which dependencies are applied
for the task to get triggered. Options are:
``{ all_success | all_failed | all_done | all_skipped | one_success | one_done |
one_failed | none_failed | none_failed_min_one_success | none_skipped | always}``
default is ``all_success``. Options can be set as string or
using the constants defined in the static class
``airflow.utils.TriggerRule``
:param resources: A map of resource parameter names (the argument names of the
Resources constructor) to their values.
:param run_as_user: unix username to impersonate while running the task
:param max_active_tis_per_dag: When set, a task will be able to limit the concurrent
runs across logical_dates.
:param max_active_tis_per_dagrun: When set, a task will be able to limit the concurrent
task instances per Dag run.
:param executor: Which executor to target when running this task. NOT YET SUPPORTED
:param executor_config: Additional task-level configuration parameters that are
interpreted by a specific executor. Parameters are namespaced by the name of
executor.
**Example**: to run this task in a specific docker container through
the KubernetesExecutor ::
MyOperator(..., executor_config={"KubernetesExecutor": {"image": "myCustomDockerImage"}})
:param do_xcom_push: if True, an XCom is pushed containing the Operator's
result
:param multiple_outputs: if True and do_xcom_push is True, pushes multiple XComs, one for each
key in the returned dictionary result. If False and do_xcom_push is True, pushes a single XCom.
:param task_group: The TaskGroup to which the task should belong. This is typically provided when not
using a TaskGroup as a context manager.
:param doc: Add documentation or notes to your Task objects that is visible in
Task Instance details View in the Webserver
:param doc_md: Add documentation (in Markdown format) or notes to your Task objects
that is visible in Task Instance details View in the Webserver
:param doc_rst: Add documentation (in RST format) or notes to your Task objects
that is visible in Task Instance details View in the Webserver
:param doc_json: Add documentation (in JSON format) or notes to your Task objects
that is visible in Task Instance details View in the Webserver
:param doc_yaml: Add documentation (in YAML format) or notes to your Task objects
that is visible in Task Instance details View in the Webserver
:param task_display_name: The display name of the task which appears on the UI.
:param logger_name: Name of the logger used by the Operator to emit logs.
If set to `None` (default), the logger name will fall back to
`airflow.task.operators.{class.__module__}.{class.__name__}` (e.g. HttpOperator will have
*airflow.task.operators.airflow.providers.http.operators.http.HttpOperator* as logger).
:param allow_nested_operators: if True, when an operator is executed within another one a warning message
will be logged. If False, then an exception will be raised if the operator is badly used (e.g. nested
within another one). In future releases of Airflow this parameter will be removed and an exception
will always be thrown when operators are nested within each other (default is True).
**Example**: example of a bad operator mixin usage::
@task(provide_context=True)
def say_hello_world(**context):
hello_world_task = BashOperator(
task_id="hello_world_task",
bash_command="python -c \"print('Hello, world!')\"",
dag=dag,
)
hello_world_task.execute(context)
"""
task_id: str
owner: str = DEFAULT_OWNER
email: str | Sequence[str] | None = None
email_on_retry: bool = True
email_on_failure: bool = True
retries: int | None = DEFAULT_RETRIES
retry_delay: timedelta = DEFAULT_RETRY_DELAY
retry_exponential_backoff: float = 0
max_retry_delay: timedelta | float | None = None
start_date: datetime | None = None
end_date: datetime | None = None
depends_on_past: bool = False
ignore_first_depends_on_past: bool = DEFAULT_IGNORE_FIRST_DEPENDS_ON_PAST
wait_for_past_depends_before_skipping: bool = DEFAULT_WAIT_FOR_PAST_DEPENDS_BEFORE_SKIPPING
wait_for_downstream: bool = False
# At execution_time this becomes a normal dict
params: ParamsDict | dict = field(default_factory=ParamsDict)
default_args: dict | None = None
priority_weight: int = DEFAULT_PRIORITY_WEIGHT
weight_rule: PriorityWeightStrategy = field(
default_factory=airflow_priority_weight_strategies[DEFAULT_WEIGHT_RULE]
)
queue: str = DEFAULT_QUEUE
pool: str = DEFAULT_POOL_NAME
pool_slots: int = DEFAULT_POOL_SLOTS
execution_timeout: timedelta | None = DEFAULT_TASK_EXECUTION_TIMEOUT
on_execute_callback: Sequence[TaskStateChangeCallback] = ()
on_failure_callback: Sequence[TaskStateChangeCallback] = ()
on_success_callback: Sequence[TaskStateChangeCallback] = ()
on_retry_callback: Sequence[TaskStateChangeCallback] = ()
on_skipped_callback: Sequence[TaskStateChangeCallback] = ()
_pre_execute_hook: TaskPreExecuteHook | None = None
_post_execute_hook: TaskPostExecuteHook | None = None
trigger_rule: TriggerRule = DEFAULT_TRIGGER_RULE
resources: dict[str, Any] | None = None
run_as_user: str | None = None
task_concurrency: int | None = None
map_index_template: str | None = None
max_active_tis_per_dag: int | None = None
max_active_tis_per_dagrun: int | None = None
executor: str | None = None
executor_config: dict | None = None
do_xcom_push: bool = True
multiple_outputs: bool = False
inlets: list[Any] = field(default_factory=list)
outlets: list[Any] = field(default_factory=list)
task_group: TaskGroup | None = None
doc: str | None = None
doc_md: str | None = None
doc_json: str | None = None
doc_yaml: str | None = None
doc_rst: str | None = None
_task_display_name: str | None = None
logger_name: str | None = None
allow_nested_operators: bool = True
is_setup: bool = False
is_teardown: bool = False
# TODO: Task-SDK: Make these ClassVar[]?
template_fields: Collection[str] = ()
template_ext: Sequence[str] = ()
template_fields_renderers: ClassVar[dict[str, str]] = {}
operator_extra_links: Collection[BaseOperatorLink] = ()
# Defines the color in the UI
ui_color: str = "#fff"
ui_fgcolor: str = "#000"
partial: Callable[..., OperatorPartial] = _PartialDescriptor() # type: ignore
_dag: DAG | None = field(init=False, default=None)
# Make this optional so the type matches the one define in LoggingMixin
_log_config_logger_name: str | None = field(default="airflow.task.operators", init=False)
_logger_name: str | None = None
# The _serialized_fields are lazily loaded when get_serialized_fields() method is called
__serialized_fields: ClassVar[frozenset[str] | None] = None
_comps: ClassVar[set[str]] = {
"task_id",
"dag_id",
"owner",
"email",
"email_on_retry",
"retry_delay",
"retry_exponential_backoff",
"max_retry_delay",
"start_date",
"end_date",
"depends_on_past",
"wait_for_downstream",
"priority_weight",
"execution_timeout",
"has_on_execute_callback",
"has_on_failure_callback",
"has_on_success_callback",
"has_on_retry_callback",
"has_on_skipped_callback",
"do_xcom_push",
"multiple_outputs",
"allow_nested_operators",
"executor",
}
# If True, the Rendered Template fields will be overwritten in DB after execution
# This is useful for Taskflow decorators that modify the template fields during execution like
# @task.bash decorator.
overwrite_rtif_after_execution: bool = False
# If True then the class constructor was called
__instantiated: bool = False
# List of args as passed to `init()`, after apply_defaults() has been updated. Used to "recreate" the task
# when mapping
# Set via the metaclass
__init_kwargs: dict[str, Any] = field(init=False)
# Set to True before calling execute method
_lock_for_execution: bool = False
# Set to True for an operator instantiated by a mapped operator.
__from_mapped: bool = False
start_trigger_args: StartTriggerArgs | None = None
start_from_trigger: bool = False
# base list which includes all the attrs that don't need deep copy.
_base_operator_shallow_copy_attrs: Final[tuple[str, ...]] = (
"user_defined_macros",
"user_defined_filters",
"params",
)
# each operator should override this class attr for shallow copy attrs.
shallow_copy_attrs: Sequence[str] = ()
def __setattr__(self: BaseOperator, key: str, value: Any):
if converter := getattr(self, f"_convert_{key}", None):
value = converter(value)
super().__setattr__(key, value)
if self.__from_mapped or self._lock_for_execution:
return # Skip any custom behavior for validation and during execute.
if key in self.__init_kwargs:
self.__init_kwargs[key] = value
if self.__instantiated and key in self.template_fields:
# Resolve upstreams set by assigning an XComArg after initializing
# an operator, example:
# op = BashOperator()
# op.bash_command = "sleep 1"
self._set_xcomargs_dependency(key, value)
def __init__(
self,
*,
task_id: str,
owner: str = DEFAULT_OWNER,
email: str | Sequence[str] | None = None,
email_on_retry: bool = True,
email_on_failure: bool = True,
retries: int | None = DEFAULT_RETRIES,
retry_delay: timedelta | float = DEFAULT_RETRY_DELAY,
retry_exponential_backoff: float = 0,
max_retry_delay: timedelta | float | None = None,
start_date: datetime | None = None,
end_date: datetime | None = None,
depends_on_past: bool = False,
ignore_first_depends_on_past: bool = DEFAULT_IGNORE_FIRST_DEPENDS_ON_PAST,
wait_for_past_depends_before_skipping: bool = DEFAULT_WAIT_FOR_PAST_DEPENDS_BEFORE_SKIPPING,
wait_for_downstream: bool = False,
dag: DAG | None = None,
params: collections.abc.MutableMapping[str, Any] | None = None,
default_args: dict | None = None,
priority_weight: int = DEFAULT_PRIORITY_WEIGHT,
weight_rule: str | PriorityWeightStrategy = DEFAULT_WEIGHT_RULE,
queue: str = DEFAULT_QUEUE,
pool: str | None = None,
pool_slots: int = DEFAULT_POOL_SLOTS,
sla: timedelta | None = None,
execution_timeout: timedelta | None = DEFAULT_TASK_EXECUTION_TIMEOUT,
on_execute_callback: None | TaskStateChangeCallback | Collection[TaskStateChangeCallback] = None,
on_failure_callback: None | TaskStateChangeCallback | list[TaskStateChangeCallback] = None,
on_success_callback: None | TaskStateChangeCallback | Collection[TaskStateChangeCallback] = None,
on_retry_callback: None | TaskStateChangeCallback | list[TaskStateChangeCallback] = None,
on_skipped_callback: None | TaskStateChangeCallback | Collection[TaskStateChangeCallback] = None,
pre_execute: TaskPreExecuteHook | None = None,
post_execute: TaskPostExecuteHook | None = None,
trigger_rule: str = DEFAULT_TRIGGER_RULE,
resources: dict[str, Any] | None = None,
run_as_user: str | None = None,
map_index_template: str | None = None,
max_active_tis_per_dag: int | None = None,
max_active_tis_per_dagrun: int | None = None,
executor: str | None = None,
executor_config: dict | None = None,
do_xcom_push: bool = True,
multiple_outputs: bool = False,
inlets: Any | None = None,
outlets: Any | None = None,
task_group: TaskGroup | None = None,
doc: str | None = None,
doc_md: str | None = None,
doc_json: str | None = None,
doc_yaml: str | None = None,
doc_rst: str | None = None,
task_display_name: str | None = None,
logger_name: str | None = None,
allow_nested_operators: bool = True,
**kwargs: Any,
):
# Note: Metaclass handles passing in the Dag/TaskGroup from active context manager, if any
# Only apply task_group prefix if this operator was not created from a mapped operator
# Mapped operators already have the prefix applied during their creation
if task_group and not self.__from_mapped:
self.task_id = task_group.child_id(task_id)
task_group.add(self)
else:
self.task_id = task_id
super().__init__()
self.task_group = task_group
kwargs.pop("_airflow_mapped_validation_only", None)
if kwargs:
raise TypeError(
f"Invalid arguments were passed to {self.__class__.__name__} (task_id: {task_id}). "
f"Invalid arguments were:\n**kwargs: {redact(kwargs)}",
)
validate_key(self.task_id)
self.owner = owner
self.email = email
self.email_on_retry = email_on_retry
self.email_on_failure = email_on_failure
if email is not None:
warnings.warn(
"Setting email on a task is deprecated; please migrate to SmtpNotifier.",
RemovedInAirflow4Warning,
stacklevel=2,
)
if email and email_on_retry is not None:
warnings.warn(
"Setting email_on_retry on a task is deprecated; please migrate to SmtpNotifier.",
RemovedInAirflow4Warning,
stacklevel=2,
)
if email and email_on_failure is not None:
warnings.warn(
"Setting email_on_failure on a task is deprecated; please migrate to SmtpNotifier.",
RemovedInAirflow4Warning,
stacklevel=2,
)
if execution_timeout is not None and not isinstance(execution_timeout, timedelta):
raise ValueError(
f"execution_timeout must be timedelta object but passed as type: {type(execution_timeout)}"
)
self.execution_timeout = execution_timeout
self.on_execute_callback = _collect_from_input(on_execute_callback)
self.on_failure_callback = _collect_from_input(on_failure_callback)
self.on_success_callback = _collect_from_input(on_success_callback)
self.on_retry_callback = _collect_from_input(on_retry_callback)
self.on_skipped_callback = _collect_from_input(on_skipped_callback)
self._pre_execute_hook = pre_execute
self._post_execute_hook = post_execute
self.start_date = timezone.convert_to_utc(start_date)
self.end_date = timezone.convert_to_utc(end_date)
self.executor = executor
self.executor_config = executor_config or {}
self.run_as_user = run_as_user
# TODO:
# self.retries = parse_retries(retries)
self.retries = retries
self.queue = queue
self.pool = DEFAULT_POOL_NAME if pool is None else pool
self.pool_slots = pool_slots
if self.pool_slots < 1:
dag_str = f" in dag {dag.dag_id}" if dag else ""
raise ValueError(f"pool slots for {self.task_id}{dag_str} cannot be less than 1")
if sla is not None:
warnings.warn(
"The SLA feature is removed in Airflow 3.0, replaced with Deadline Alerts in >=3.1",
stacklevel=2,
)
try:
TriggerRule(trigger_rule)
except ValueError:
raise ValueError(
f"The trigger_rule must be one of {[rule.value for rule in TriggerRule]},"
f"'{dag.dag_id if dag else ''}.{task_id}'; received '{trigger_rule}'."
)
self.trigger_rule: TriggerRule = TriggerRule(trigger_rule)
self.depends_on_past: bool = depends_on_past
self.ignore_first_depends_on_past: bool = ignore_first_depends_on_past
self.wait_for_past_depends_before_skipping: bool = wait_for_past_depends_before_skipping
self.wait_for_downstream: bool = wait_for_downstream
if wait_for_downstream:
self.depends_on_past = True
# Converted by setattr
self.retry_delay = retry_delay # type: ignore[assignment]
self.retry_exponential_backoff = retry_exponential_backoff
if max_retry_delay is not None:
self.max_retry_delay = max_retry_delay
self.resources = resources
self.params = ParamsDict(params)
self.priority_weight = priority_weight
self.weight_rule = validate_and_load_priority_weight_strategy(weight_rule)
self.max_active_tis_per_dag: int | None = max_active_tis_per_dag
self.max_active_tis_per_dagrun: int | None = max_active_tis_per_dagrun
self.do_xcom_push: bool = do_xcom_push
self.map_index_template: str | None = map_index_template
self.multiple_outputs: bool = multiple_outputs
self.doc_md = doc_md
self.doc_json = doc_json
self.doc_yaml = doc_yaml
self.doc_rst = doc_rst
self.doc = doc
self._task_display_name = task_display_name
self.allow_nested_operators = allow_nested_operators
self._logger_name = logger_name
# Lineage
self.inlets = _collect_from_input(inlets)
self.outlets = _collect_from_input(outlets)
if isinstance(self.template_fields, str):
warnings.warn(
f"The `template_fields` value for {self.task_type} is a string "
"but should be a list or tuple of string. Wrapping it in a list for execution. "
f"Please update {self.task_type} accordingly.",
UserWarning,
stacklevel=2,
)
self.template_fields = [self.template_fields]
self.is_setup = False
self.is_teardown = False
if SetupTeardownContext.active:
SetupTeardownContext.update_context_map(self)
# We set self.dag right at the end as `_convert_dag` calls `dag.add_task` for us, and we need all the
# other properties to be set at that point
if dag is not None:
self.dag = dag
validate_instance_args(self, BASEOPERATOR_ARGS_EXPECTED_TYPES)
# Ensure priority_weight is within the valid range
self.priority_weight = db_safe_priority(self.priority_weight)
def __eq__(self, other):
if type(self) is type(other):
# Use getattr() instead of __dict__ as __dict__ doesn't return
# correct values for properties.
return all(getattr(self, c, None) == getattr(other, c, None) for c in self._comps)
return False
def __ne__(self, other):
return not self == other
def __hash__(self):
hash_components = [type(self)]
for component in self._comps:
val = getattr(self, component, None)
try:
hash(val)
hash_components.append(val)
except TypeError:
hash_components.append(repr(val))
return hash(tuple(hash_components))
# /Composing Operators ---------------------------------------------
def __gt__(self, other):
"""
Return [Operator] > [Outlet].
If other is an attr annotated object it is set as an outlet of this Operator.
"""
if not isinstance(other, Iterable):
other = [other]
for obj in other:
if not attrs.has(obj):
raise TypeError(f"Left hand side ({obj}) is not an outlet")
self.add_outlets(other)
return self
def __lt__(self, other):
"""
Return [Inlet] > [Operator] or [Operator] < [Inlet].
If other is an attr annotated object it is set as an inlet to this operator.
"""
if not isinstance(other, Iterable):
other = [other]
for obj in other:
if not attrs.has(obj):
raise TypeError(f"{obj} cannot be an inlet")
self.add_inlets(other)
return self
def __deepcopy__(self, memo: dict[int, Any]):
# Hack sorting double chained task lists by task_id to avoid hitting
# max_depth on deepcopy operations.
sys.setrecursionlimit(5000) # TODO fix this in a better way
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
shallow_copy = tuple(cls.shallow_copy_attrs) + cls._base_operator_shallow_copy_attrs
for k, v_org in self.__dict__.items():
if k not in shallow_copy:
v = copy.deepcopy(v_org, memo)
else:
v = copy.copy(v_org)
# Bypass any setters, and set it on the object directly. This works since we are cloning ourself so
# we know the type is already fine
result.__dict__[k] = v
return result
def __getstate__(self):
state = dict(self.__dict__)
if "_log" in state:
del state["_log"]
return state
def __setstate__(self, state):
self.__dict__ = state
def add_inlets(self, inlets: Iterable[Any]):
"""Set inlets to this operator."""
self.inlets.extend(inlets)
def add_outlets(self, outlets: Iterable[Any]):
"""Define the outlets of this operator."""
self.outlets.extend(outlets)
def get_dag(self) -> DAG | None:
return self._dag
@property
def dag(self) -> DAG:
"""Returns the Operator's Dag if set, otherwise raises an error."""
if dag := self._dag:
return dag
raise RuntimeError(f"Operator {self} has not been assigned to a Dag yet")
@dag.setter
def dag(self, dag: DAG | None) -> None:
"""Operators can be assigned to one Dag, one time. Repeat assignments to that same Dag are ok."""
self._dag = dag
def _convert__dag(self, dag: DAG | None) -> DAG | None:
# Called automatically by __setattr__ method
from airflow.sdk.definitions.dag import DAG
if dag is None:
return dag
if not isinstance(dag, DAG):
raise TypeError(f"Expected dag; received {dag.__class__.__name__}")
if self._dag is not None and self._dag is not dag:
raise ValueError(f"The dag assigned to {self} can not be changed.")
if self.__from_mapped:
pass # Don't add to dag -- the mapped task takes the place.
elif dag.task_dict.get(self.task_id) is not self:
dag.add_task(self)
return dag
@staticmethod
def _convert_retries(retries: Any) -> int | None:
if retries is None:
return 0
if type(retries) == int: # noqa: E721
return retries
try:
parsed_retries = int(retries)
except (TypeError, ValueError):
raise TypeError(f"'retries' type must be int, not {type(retries).__name__}")
return parsed_retries
@staticmethod
def _convert_timedelta(value: float | timedelta | None) -> timedelta | None:
if value is None or isinstance(value, timedelta):
return value
return timedelta(seconds=value)
_convert_retry_delay = _convert_timedelta
_convert_max_retry_delay = _convert_timedelta
@staticmethod
def _convert_resources(resources: dict[str, Any] | None) -> Resources | None:
if resources is None:
return None
from airflow.sdk.definitions.operator_resources import Resources
if isinstance(resources, Resources):
return resources
return Resources(**resources)
def _convert_is_setup(self, value: bool) -> bool:
"""
Setter for is_setup property.
:meta private:
"""
if self.is_teardown and value:
raise ValueError(f"Cannot mark task '{self.task_id}' as setup; task is already a teardown.")
return value
def _convert_is_teardown(self, value: bool) -> bool:
if self.is_setup and value:
raise ValueError(f"Cannot mark task '{self.task_id}' as teardown; task is already a setup.")
return value
@property
def task_display_name(self) -> str:
return self._task_display_name or self.task_id
def has_dag(self):
"""Return True if the Operator has been assigned to a Dag."""
return self._dag is not None
def _set_xcomargs_dependencies(self) -> None:
from airflow.sdk.definitions.xcom_arg import XComArg
for f in self.template_fields:
arg = getattr(self, f, NOTSET)
if arg is not NOTSET:
XComArg.apply_upstream_relationship(self, arg)
def _set_xcomargs_dependency(self, field: str, newvalue: Any) -> None:
"""
Resolve upstream dependencies of a task.
In this way passing an ``XComArg`` as value for a template field
will result in creating upstream relation between two tasks.
**Example**: ::
with DAG(...):
generate_content = GenerateContentOperator(task_id="generate_content")
send_email = EmailOperator(..., html_content=generate_content.output)
# This is equivalent to
with DAG(...):
generate_content = GenerateContentOperator(task_id="generate_content")
send_email = EmailOperator(
..., html_content="{{ task_instance.xcom_pull('generate_content') }}"
)
generate_content >> send_email
"""
from airflow.sdk.definitions.xcom_arg import XComArg
if field not in self.template_fields:
return
XComArg.apply_upstream_relationship(self, newvalue)
def on_kill(self) -> None:
"""
Override this method to clean up subprocesses when a task instance gets killed.
Any use of the threading, subprocess or multiprocessing module within an
operator needs to be cleaned up, or it will leave ghost processes behind.
"""
def __repr__(self):
return f"<Task({self.task_type}): {self.task_id}>"
@property
def operator_class(self) -> type[BaseOperator]: # type: ignore[override]
return self.__class__
@property
def task_type(self) -> str:
"""@property: type of the task."""
return self.__class__.__name__
@property
def operator_name(self) -> str:
"""@property: use a more friendly display name for the operator, if set."""
try:
return self.custom_operator_name # type: ignore
except AttributeError:
return self.task_type
@property
def roots(self) -> list[BaseOperator]:
"""Required by DAGNode."""
return [self]
@property
def leaves(self) -> list[BaseOperator]:
"""Required by DAGNode."""
return [self]
@property
def output(self) -> XComArg:
"""Returns reference to XCom pushed by current operator."""
from airflow.sdk.definitions.xcom_arg import XComArg
return XComArg(operator=self)
@classmethod
def get_serialized_fields(cls):
"""Stringified Dags and operators contain exactly these fields."""
if not cls.__serialized_fields:
from airflow.sdk.definitions._internal.contextmanager import DagContext
# make sure the following "fake" task is not added to current active
# dag in context, otherwise, it will result in
# `RuntimeError: dictionary changed size during iteration`
# Exception in SerializedDAG.serialize_dag() call.
DagContext.push(None)
cls.__serialized_fields = frozenset(
vars(BaseOperator(task_id="test")).keys()
- {
"upstream_task_ids",
"default_args",
"dag",
"_dag",
"label",
"_BaseOperator__instantiated",
"_BaseOperator__init_kwargs",
"_BaseOperator__from_mapped",
"on_failure_fail_dagrun",
"task_group",
"_task_type",
"operator_extra_links",
"on_execute_callback",
"on_failure_callback",
"on_success_callback",
"on_retry_callback",
"on_skipped_callback",
}
| { # Class level defaults, or `@property` need to be added to this list
"start_date",
"end_date",
"task_type",
"ui_color",
"ui_fgcolor",
"template_ext",
"template_fields",
"template_fields_renderers",
"params",
"is_setup",
"is_teardown",
"on_failure_fail_dagrun",
"map_index_template",
"start_trigger_args",
"_needs_expansion",
"start_from_trigger",
"max_retry_delay",
"has_on_execute_callback",
"has_on_failure_callback",
"has_on_success_callback",
"has_on_retry_callback",
"has_on_skipped_callback",
}
)
DagContext.pop()
return cls.__serialized_fields
def prepare_for_execution(self) -> Self:
"""Lock task for execution to disable custom action in ``__setattr__`` and return a copy."""
other = copy.copy(self)
other._lock_for_execution = True
return other
def serialize_for_task_group(self) -> tuple[DagAttributeTypes, Any]:
"""Serialize; required by DAGNode."""
from airflow.serialization.enums import DagAttributeTypes
return DagAttributeTypes.OP, self.task_id
def unmap(self, resolve: None | Mapping[str, Any]) -> Self:
"""
Get the "normal" operator from the current operator.
Since a BaseOperator is not mapped to begin with, this simply returns
the original operator.
:meta private:
"""
return self
def expand_start_trigger_args(self, *, context: Context) -> StartTriggerArgs | None:
"""
Get the start_trigger_args value of the current abstract operator.
Since a BaseOperator is not mapped to begin with, this simply returns
the original value of start_trigger_args.
:meta private:
"""
return self.start_trigger_args
def render_template_fields(
self,
context: Context,
jinja_env: jinja2.Environment | None = None,
) -> None:
"""
Template all attributes listed in *self.template_fields*.
This mutates the attributes in-place and is irreversible.
:param context: Context dict with values to apply on content.
:param jinja_env: Jinja's environment to use for rendering.
"""
if not jinja_env:
jinja_env = self.get_template_env()
self._do_render_template_fields(self, self.template_fields, context, jinja_env, set())
def pre_execute(self, context: Any):
"""Execute right before self.execute() is called."""
def execute(self, context: Context) -> Any:
"""
Derive when creating an operator.
The main method to execute the task. Context is the same dictionary used
as when rendering jinja templates.
Refer to get_template_context for more context.
"""
raise NotImplementedError()
def post_execute(self, context: Any, result: Any = None):
"""
Execute right after self.execute() is called.
It is passed the execution context and any results returned by the operator.
"""
def defer(
self,
*,
trigger: BaseTrigger,
method_name: str,
kwargs: dict[str, Any] | None = None,
timeout: timedelta | int | float | None = None,
) -> NoReturn:
"""
Mark this Operator "deferred", suspending its execution until the provided trigger fires an event.
This is achieved by raising a special exception (TaskDeferred)
which is caught in the main _execute_task wrapper. Triggers can send execution back to task or end
the task instance directly. If the trigger will end the task instance itself, ``method_name`` should
be None; otherwise, provide the name of the method that should be used when resuming execution in
the task.
"""
from airflow.sdk.exceptions import TaskDeferred
raise TaskDeferred(trigger=trigger, method_name=method_name, kwargs=kwargs, timeout=timeout)
def resume_execution(self, next_method: str, next_kwargs: dict[str, Any] | None, context: Context):
"""Entrypoint method called by the Task Runner (instead of execute) when this task is resumed."""
from airflow.sdk.exceptions import TaskDeferralError, TaskDeferralTimeout
if next_kwargs is None:
next_kwargs = {}
# __fail__ is a special signal value for next_method that indicates
# this task was scheduled specifically to fail.
if next_method == TRIGGER_FAIL_REPR:
next_kwargs = next_kwargs or {}
traceback = next_kwargs.get("traceback")
if traceback is not None:
self.log.error("Trigger failed:\n%s", "\n".join(traceback))
if (error := next_kwargs.get("error", "Unknown")) == TriggerFailureReason.TRIGGER_TIMEOUT:
raise TaskDeferralTimeout(error)
raise TaskDeferralError(error)
# Grab the callable off the Operator/Task and add in any kwargs
execute_callable = getattr(self, next_method)
return execute_callable(context, **next_kwargs)
def dry_run(self) -> None:
"""Perform dry run for the operator - just render template fields."""
self.log.info("Dry run")
for f in self.template_fields:
try:
content = getattr(self, f)
except AttributeError:
raise AttributeError(
f"{f!r} is configured as a template field "
f"but {self.task_type} does not have this attribute."
)
if content and isinstance(content, str):
self.log.info("Rendering template for %s", f)
self.log.info(content)
@property
def has_on_execute_callback(self) -> bool:
"""Return True if the task has execute callbacks."""
return bool(self.on_execute_callback)
@property
def has_on_failure_callback(self) -> bool:
"""Return True if the task has failure callbacks."""
return bool(self.on_failure_callback)
@property
def has_on_success_callback(self) -> bool:
"""Return True if the task has success callbacks."""
return bool(self.on_success_callback)
@property
def has_on_retry_callback(self) -> bool:
"""Return True if the task has retry callbacks."""
return bool(self.on_retry_callback)
@property
def has_on_skipped_callback(self) -> bool:
"""Return True if the task has skipped callbacks."""
return bool(self.on_skipped_callback)
def chain(*tasks: DependencyMixin | Sequence[DependencyMixin]) -> None:
r"""
Given a number of tasks, builds a dependency chain.
This function accepts values of BaseOperator (aka tasks), EdgeModifiers (aka Labels), XComArg, TaskGroups,
or lists containing any mix of these types (or a mix in the same list). If you want to chain between two
lists you must ensure they have the same length.
Using classic operators/sensors:
.. code-block:: python
chain(t1, [t2, t3], [t4, t5], t6)
is equivalent to::
/ -> t2 -> t4 \
t1 -> t6
\ -> t3 -> t5 /
.. code-block:: python
t1.set_downstream(t2)
t1.set_downstream(t3)
t2.set_downstream(t4)
t3.set_downstream(t5)
t4.set_downstream(t6)
t5.set_downstream(t6)
Using task-decorated functions aka XComArgs:
.. code-block:: python
chain(x1(), [x2(), x3()], [x4(), x5()], x6())
is equivalent to::
/ -> x2 -> x4 \
x1 -> x6
\ -> x3 -> x5 /
.. code-block:: python
x1 = x1()
x2 = x2()
x3 = x3()
x4 = x4()
x5 = x5()
x6 = x6()
x1.set_downstream(x2)
x1.set_downstream(x3)
x2.set_downstream(x4)
x3.set_downstream(x5)
x4.set_downstream(x6)
x5.set_downstream(x6)
Using TaskGroups:
.. code-block:: python
chain(t1, task_group1, task_group2, t2)
t1.set_downstream(task_group1)
task_group1.set_downstream(task_group2)
task_group2.set_downstream(t2)
It is also possible to mix between classic operator/sensor, EdgeModifiers, XComArg, and TaskGroups:
.. code-block:: python
chain(t1, [Label("branch one"), Label("branch two")], [x1(), x2()], task_group1, x3())
is equivalent to::
/ "branch one" -> x1 \
t1 -> task_group1 -> x3
\ "branch two" -> x2 /
.. code-block:: python
x1 = x1()
x2 = x2()
x3 = x3()
label1 = Label("branch one")
label2 = Label("branch two")
t1.set_downstream(label1)
label1.set_downstream(x1)
t2.set_downstream(label2)
label2.set_downstream(x2)
x1.set_downstream(task_group1)
x2.set_downstream(task_group1)
task_group1.set_downstream(x3)
# or
x1 = x1()
x2 = x2()
x3 = x3()
t1.set_downstream(x1, edge_modifier=Label("branch one"))
t1.set_downstream(x2, edge_modifier=Label("branch two"))
x1.set_downstream(task_group1)
x2.set_downstream(task_group1)
task_group1.set_downstream(x3)
:param tasks: Individual and/or list of tasks, EdgeModifiers, XComArgs, or TaskGroups to set dependencies
"""
for up_task, down_task in zip(tasks, tasks[1:]):
if isinstance(up_task, DependencyMixin):
up_task.set_downstream(down_task)
continue
if isinstance(down_task, DependencyMixin):
down_task.set_upstream(up_task)
continue
if not isinstance(up_task, Sequence) or not isinstance(down_task, Sequence):
raise TypeError(f"Chain not supported between instances of {type(up_task)} and {type(down_task)}")
up_task_list = up_task
down_task_list = down_task
if len(up_task_list) != len(down_task_list):
raise ValueError(
f"Chain not supported for different length Iterable. "
f"Got {len(up_task_list)} and {len(down_task_list)}."
)
for up_t, down_t in zip(up_task_list, down_task_list):
up_t.set_downstream(down_t)
def cross_downstream(
from_tasks: Sequence[DependencyMixin],
to_tasks: DependencyMixin | Sequence[DependencyMixin],
):
r"""
Set downstream dependencies for all tasks in from_tasks to all tasks in to_tasks.
Using classic operators/sensors:
.. code-block:: python
cross_downstream(from_tasks=[t1, t2, t3], to_tasks=[t4, t5, t6])
is equivalent to::
t1 ---> t4
\ /
t2 -X -> t5
/ \
t3 ---> t6
.. code-block:: python
t1.set_downstream(t4)
t1.set_downstream(t5)
t1.set_downstream(t6)
t2.set_downstream(t4)
t2.set_downstream(t5)
t2.set_downstream(t6)
t3.set_downstream(t4)
t3.set_downstream(t5)
t3.set_downstream(t6)
Using task-decorated functions aka XComArgs:
.. code-block:: python
cross_downstream(from_tasks=[x1(), x2(), x3()], to_tasks=[x4(), x5(), x6()])
is equivalent to::
x1 ---> x4
\ /
x2 -X -> x5
/ \
x3 ---> x6
.. code-block:: python
x1 = x1()
x2 = x2()
x3 = x3()
x4 = x4()
x5 = x5()
x6 = x6()
x1.set_downstream(x4)
x1.set_downstream(x5)
x1.set_downstream(x6)
x2.set_downstream(x4)
x2.set_downstream(x5)
x2.set_downstream(x6)
x3.set_downstream(x4)
x3.set_downstream(x5)
x3.set_downstream(x6)
It is also possible to mix between classic operator/sensor and XComArg tasks:
.. code-block:: python
cross_downstream(from_tasks=[t1, x2(), t3], to_tasks=[x1(), t2, x3()])
is equivalent to::
t1 ---> x1
\ /
x2 -X -> t2
/ \
t3 ---> x3
.. code-block:: python
x1 = x1()
x2 = x2()
x3 = x3()
t1.set_downstream(x1)
t1.set_downstream(t2)
t1.set_downstream(x3)
x2.set_downstream(x1)
x2.set_downstream(t2)
x2.set_downstream(x3)
t3.set_downstream(x1)
t3.set_downstream(t2)
t3.set_downstream(x3)
:param from_tasks: List of tasks or XComArgs to start from.
:param to_tasks: List of tasks or XComArgs to set as downstream dependencies.
"""
for task in from_tasks:
task.set_downstream(to_tasks)
def chain_linear(*elements: DependencyMixin | Sequence[DependencyMixin]):
"""
Simplify task dependency definition.
E.g.: suppose you want precedence like so::
╭─op2─╮ ╭─op4─╮
op1─┤ ├─├─op5─┤─op7
╰-op3─╯ ╰-op6─╯
Then you can accomplish like so::
chain_linear(op1, [op2, op3], [op4, op5, op6], op7)
:param elements: a list of operators / lists of operators
"""
if not elements:
raise ValueError("No tasks provided; nothing to do.")
prev_elem = None
deps_set = False
for curr_elem in elements:
if isinstance(curr_elem, EdgeModifier):
raise ValueError("Labels are not supported by chain_linear")
if prev_elem is not None:
for task in prev_elem:
task >> curr_elem
if not deps_set:
deps_set = True
prev_elem = [curr_elem] if isinstance(curr_elem, DependencyMixin) else curr_elem
if not deps_set:
raise ValueError("No dependencies were set. Did you forget to expand with `*`?")
| BaseOperator |
python | falconry__falcon | falcon/bench/bench.py | {
"start": 1775,
"end": 11334
} | class ____:
"""Mock object representing a WSGI `start_response` callable."""
def __init__(self):
self._called = 0
self.status = None
self.headers = None
self.exc_info = None
def __call__(self, status, headers, exc_info=None):
"""Implement the PEP-3333 `start_response` protocol."""
self._called += 1
self.status = status
self.headers = headers
self.exc_info = exc_info
@property
def call_count(self):
return self._called
def bench(func, iterations, stat_memory):
gc.collect()
heap_diff = None
if heapy and stat_memory:
heap_before = heapy.heap()
total_sec = timeit.timeit(func, setup=gc.enable, number=iterations)
if heapy and stat_memory:
heap_diff = heapy.heap() - heap_before
sec_per_req = Decimal(str(total_sec)) / Decimal(str(iterations))
return (sec_per_req, heap_diff)
def determine_iterations(func):
# NOTE(kgriffs): Algorithm adapted from IPython's magic timeit
# function to determine iterations so that 0.2 <= total time < 2.0
iterations = ITER_DETECTION_STARTING
for __ in range(1, ITER_DETECTION_MAX_ATTEMPTS):
gc.collect()
total_sec = timeit.timeit(func, setup=gc.enable, number=int(iterations))
if total_sec >= ITER_DETECTION_DURATION_MIN:
assert total_sec < ITER_DETECTION_DURATION_MAX
break
iterations *= ITER_DETECTION_MULTIPLIER
return int(iterations)
def profile(name, env, filename=None, verbose=False):
if filename:
filename = name + '-' + filename
print('Profiling %s ==> %s' % (name, filename))
else:
filename = None
title = name + ' profile'
print()
print('=' * len(title))
print(title)
print('=' * len(title))
func = create_bench(name, env)
gc.collect()
num_iterations = 100000
if PYPY:
print('JIT warmup...')
# TODO(kgriffs): Measure initial time, and keep iterating until
# performance increases and then steadies
for x in range(num_iterations * JIT_WARMING_MULTIPLIER):
func()
print('Ready.')
code = 'for x in range({0}): func()'.format(num_iterations)
if verbose:
if pprofile is None:
print('pprofile not found. Please install pprofile and try again.')
return
pprofile.runctx(code, locals(), globals(), filename=filename)
else:
cProfile.runctx(code, locals(), globals(), sort='tottime', filename=filename)
def profile_vmprof(name, env):
if vmprof is None:
print('vmprof not found. Please install vmprof and try again.')
return
func = create_bench(name, env)
gc.collect()
#
# Based on: https://github.com/vmprof/vmprof-python/blob/master/vmprof/__main__.py
#
prof_file = tempfile.NamedTemporaryFile(delete=False)
filename = prof_file.name
vmprof.enable(prof_file.fileno())
try:
for __ in range(1000000):
func()
except BaseException as e:
if not isinstance(e, (KeyboardInterrupt, SystemExit)):
raise
vmprof.disable()
service = Service('vmprof.com')
service.post(
{
Service.FILE_CPU_PROFILE: filename,
Service.FILE_JIT_PROFILE: filename + '.jit',
'argv': ' '.join(sys.argv[:]),
'VM': platform.python_implementation(),
}
)
prof_file.close()
def exhaust(iterator_or_generator):
# from https://docs.python.org/dev/library/itertools.html#itertools-recipes
deque(iterator_or_generator, maxlen=0)
def create_bench(name, env):
srmock = StartResponseMockLite()
function = name.lower().replace('-', '_')
app = eval('create.{0}(BODY, HEADERS)'.format(function))
def bench():
app(env, srmock)
assert srmock.status == '200 OK'
def bench_generator():
exhaust(app(env, srmock))
assert srmock.status == '200 OK'
if inspect.isgeneratorfunction(app):
return bench_generator
else:
return bench
def consolidate_datasets(datasets):
results = defaultdict(list)
for dataset in datasets:
for name, sec_per_req, _ in dataset:
results[name].append(sec_per_req)
return [(name, min(vector)) for name, vector in results.items()]
def round_to_int(dec):
return int(dec.to_integral_value())
def avg(array):
return sum(array) / len(array)
def hello_env():
request_headers = {'Content-Type': 'application/json'}
return helpers.create_environ(
'/hello/584/test', query_string='limit=10&thing=ab', headers=request_headers
)
def queues_env():
request_headers = {'Content-Type': 'application/json'}
path = '/v1/852809/queues/0fd4c8c6-bd72-11e2-8e47-db5ebd4c8125/claims/db5ebd4c8125'
qs = 'limit=10&thing=a+b&x=%23%24'
return helpers.create_environ(path, query_string=qs, headers=request_headers)
def get_env(framework):
return queues_env() if framework == 'falcon-ext' else hello_env()
def run(frameworks, trials, iterations, stat_memory):
# Skip any frameworks that are not installed
for name in frameworks:
try:
create_bench(name, hello_env())
except ImportError as ex:
print(ex)
print('Skipping missing library: ' + name)
del frameworks[frameworks.index(name)]
print()
datasets = []
if not frameworks:
print('Nothing to do.\n')
return datasets
benchmarks = []
for name in frameworks:
bm = create_bench(name, get_env(name))
bm_iterations = iterations if iterations else determine_iterations(bm)
if PYPY:
print('{}: JIT warmup'.format(name))
# TODO(kgriffs): Measure initial time, and keep iterating until
# performance increases and then steadies
bench(bm, bm_iterations * JIT_WARMING_MULTIPLIER, False)
bm_iterations = iterations if iterations else determine_iterations(bm)
benchmarks.append((name, bm_iterations, bm))
print('{}: {} iterations'.format(name, bm_iterations))
print()
for r in range(trials):
random.shuffle(frameworks)
sys.stdout.write('Benchmarking, Trial %d of %d' % (r + 1, trials))
sys.stdout.flush()
dataset = []
for name, bm_iterations, bm in benchmarks:
sec_per_req, heap_diff = bench(bm, bm_iterations, stat_memory)
dataset.append((name, sec_per_req, heap_diff))
sys.stdout.write('.')
sys.stdout.flush()
datasets.append(dataset)
print('done.')
return datasets
def main():
frameworks = [
'bottle',
'django',
'falcon',
'falcon-ext',
'flask',
'pecan',
'werkzeug',
]
parser = argparse.ArgumentParser(description='Falcon benchmark runner')
parser.add_argument(
'-b',
'--benchmark',
type=str,
action='append',
choices=frameworks,
dest='frameworks',
nargs='+',
)
parser.add_argument('-i', '--iterations', type=int, default=0)
parser.add_argument('-t', '--trials', type=int, default=10)
parser.add_argument(
'-p', '--profile', type=str, choices=['standard', 'verbose', 'vmprof']
)
parser.add_argument('-o', '--profile-output', type=str, default=None)
parser.add_argument('-m', '--stat-memory', action='store_true')
args = parser.parse_args()
if args.stat_memory and heapy is None:
print('WARNING: Guppy not installed; memory stats are unavailable.\n')
if args.frameworks:
frameworks = args.frameworks
# Normalize frameworks type
normalized_frameworks = []
for one_or_many in frameworks:
if isinstance(one_or_many, list):
normalized_frameworks.extend(one_or_many)
else:
normalized_frameworks.append(one_or_many)
frameworks = normalized_frameworks
# Profile?
if args.profile:
framework = 'falcon-ext'
if args.profile == 'vmprof':
profile_vmprof(framework, get_env(framework))
else:
profile(
framework,
get_env(framework),
filename=args.profile_output,
verbose=(args.profile == 'verbose'),
)
print()
return
# Otherwise, benchmark
datasets = run(frameworks, args.trials, args.iterations, args.stat_memory)
if not datasets:
return
dataset = consolidate_datasets(datasets)
dataset = sorted(dataset, key=lambda r: r[1])
baseline = dataset[-1][1]
print('\nResults:\n')
for i, (name, sec_per_req) in enumerate(dataset):
req_per_sec = round_to_int(Decimal(1) / sec_per_req)
us_per_req = sec_per_req * Decimal(10**6)
factor = round_to_int(baseline / sec_per_req)
print(
'{3}. {0:.<20s}{1:.>06d} req/sec or {2: >3.2f} μs/req ({4}x)'.format(
name, req_per_sec, us_per_req, i + 1, factor
)
)
if heapy and args.stat_memory:
print()
for name, _, heap_diff in datasets[0]:
title = 'Memory change induced by ' + name
print()
print('=' * len(title))
print(title)
print('=' * len(title))
print(heap_diff)
print()
if __name__ == '__main__':
main()
| StartResponseMockLite |
python | walkccc__LeetCode | solutions/132. Palindrome Partitioning II/132.py | {
"start": 0,
"end": 685
} | class ____:
def minCut(self, s: str) -> int:
n = len(s)
# isPalindrome[i][j] := True if s[i..j] is a palindrome
isPalindrome = [[True] * n for _ in range(n)]
# dp[i] := the minimum cuts needed for a palindrome partitioning of s[0..i]
dp = [n] * n
for l in range(2, n + 1):
i = 0
for j in range(l - 1, n):
isPalindrome[i][j] = s[i] == s[j] and isPalindrome[i + 1][j - 1]
i += 1
for i in range(n):
if isPalindrome[0][i]:
dp[i] = 0
continue
# Try all the possible partitions.
for j in range(i):
if isPalindrome[j + 1][i]:
dp[i] = min(dp[i], dp[j] + 1)
return dp[-1]
| Solution |
python | pandas-dev__pandas | pandas/tests/indexes/categorical/test_indexing.py | {
"start": 11558,
"end": 12585
} | class ____:
def test_where(self, listlike_box):
klass = listlike_box
i = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = CategoricalIndex([np.nan] + i[1:].tolist(), categories=i.categories)
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_non_categories(self):
ci = CategoricalIndex(["a", "b", "c", "d"])
mask = np.array([True, False, True, False])
result = ci.where(mask, 2)
expected = Index(["a", 2, "c", 2], dtype=object)
tm.assert_index_equal(result, expected)
msg = "Cannot setitem on a Categorical with a new category"
with pytest.raises(TypeError, match=msg):
# Test the Categorical method directly
ci._data._where(mask, 2)
| TestWhere |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 88234,
"end": 89177
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, api_token: str, workspace: str, start_date: Optional[str] = None):
"""Airbyte Source for Orbit.
Documentation can be found at https://docs.airbyte.com/integrations/sources/orbit
Args:
name (str): The name of the destination.
api_token (str): Authorizes you to work with Orbit workspaces associated with the token.
workspace (str): The unique name of the workspace that your API token is associated with.
start_date (Optional[str]): Date in the format 2022-06-26. Only load members whose last activities are after this date.
"""
self.api_token = check.str_param(api_token, "api_token")
self.workspace = check.str_param(workspace, "workspace")
self.start_date = check.opt_str_param(start_date, "start_date")
super().__init__("Orbit", name)
| OrbitSource |
python | Netflix__metaflow | metaflow/tutorials/04-playlist-plus/playlist.py | {
"start": 506,
"end": 5021
} | class ____(FlowSpec):
"""
The next version of our playlist generator that adds a 'hint' parameter to
choose a bonus movie closest to the 'hint'.
The flow performs the following steps:
1) Load the genre-specific statistics from the MovieStatsFlow.
2) In parallel branches:
- A) Build a playlist from the top films in the requested genre.
- B) Choose a bonus movie that has the closest string edit distance to
the user supplied hint.
3) Join the two to create a movie playlist and display it.
"""
genre = Parameter(
"genre", help="Filter movies for a particular genre.", default="Sci-Fi"
)
hint = Parameter(
"hint",
help="Give a hint to the bonus movie algorithm.",
default="Metaflow Release",
)
recommendations = Parameter(
"recommendations",
help="The number of movies recommended for the playlist.",
default=5,
)
@step
def start(self):
"""
Use the Metaflow client to retrieve the latest successful run from our
MovieStatsFlow and assign them as data artifacts in this flow.
"""
# Load the analysis from the MovieStatsFlow.
from metaflow import Flow, get_metadata
# Print metadata provider
print("Using metadata provider: %s" % get_metadata())
# Load the analysis from the MovieStatsFlow.
run = Flow("MovieStatsFlow").latest_successful_run
print("Using analysis from '%s'" % str(run))
# Get the dataframe from the start step before we sliced into into
# genre-specific dataframes.
self.dataframe = run["start"].task.data.dataframe
# Also grab the summary statistics.
self.genre_stats = run.data.genre_stats
# Compute our two recommendation types in parallel.
self.next(self.bonus_movie, self.genre_movies)
@conda(libraries={"editdistance": "0.5.3"})
@step
def bonus_movie(self):
"""
Use the user supplied 'hint' argument to choose a bonus movie that has
the closest string edit distance to the hint.
This step uses 'conda' to isolate the environment. Note that the
package 'editdistance' need not be installed in your python
environment.
"""
import editdistance
# Define a helper function to compute the similarity between two
# strings.
def _edit_distance(movie_title):
return editdistance.eval(self.hint, movie_title)
# Compute the distance and take the argmin to find the closest title.
distance = [
_edit_distance(movie_title) for movie_title in self.dataframe["movie_title"]
]
index = distance.index(min(distance))
self.bonus = (
self.dataframe["movie_title"][index],
self.dataframe["genres"][index],
)
self.next(self.join)
@step
def genre_movies(self):
"""
Select the top performing movies from the use specified genre.
"""
from random import shuffle
# For the genre of interest, generate a potential playlist using only
# highest gross box office titles (i.e. those in the last quartile).
genre = self.genre.lower()
if genre not in self.genre_stats:
self.movies = []
else:
df = self.genre_stats[genre]["dataframe"]
quartiles = self.genre_stats[genre]["quartiles"]
self.movies = [
df["movie_title"][i]
for i, g in enumerate(df["gross"])
if g >= quartiles[-1]
]
# Shuffle the content.
shuffle(self.movies)
self.next(self.join)
@step
def join(self, inputs):
"""
Join our parallel branches and merge results.
"""
self.playlist = inputs.genre_movies.movies
self.bonus = inputs.bonus_movie.bonus
self.next(self.end)
@step
def end(self):
"""
This step simply prints out the playlist.
"""
# Print the playlist.
print("Playlist for movies in genre '%s'" % self.genre)
for pick, movie in enumerate(self.playlist, start=1):
print("Pick %d: '%s'" % (pick, movie))
if pick >= self.recommendations:
break
print("Bonus Pick: '%s' from '%s'" % (self.bonus[0], self.bonus[1]))
if __name__ == "__main__":
PlayListFlow()
| PlayListFlow |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_column_values_to_not_be_in_set.py | {
"start": 2136,
"end": 15347
} | class ____(ColumnMapExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectColumnValuesToNotBeInSet is a \
Column Map Expectation.
Column Map Expectations are one of the most common types of Expectation.
They are evaluated for a single column and ask a yes/no question for every row in that column.
Based on the result, they then calculate the percentage of rows that gave a positive answer. If the percentage is high enough, the Expectation considers that data valid.
Args:
column (str): \
{COLUMN_DESCRIPTION}
value_set (set-like): \
{VALUE_SET_DESCRIPTION}
Other Parameters:
mostly (None or a float between 0 and 1): \
{MOSTLY_DESCRIPTION} \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly).
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
See Also:
[ExpectColumnValuesToBeInSet](https://greatexpectations.io/expectations/expect_column_values_to_be_in_set)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[11]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[12]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[13]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 1 1
1 2 1
2 4 1
Code Examples:
Passing Case:
Input:
ExpectColumnValuesToNotBeInSet(
column="test2",
value_set=[2, 4]
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 0.0,
"unexpected_percent_nonmissing": 0.0
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectColumnValuesToNotBeInSet(
column="test",
value_set=[2, 4],
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 2,
"unexpected_percent": 66.66666666666666,
"partial_unexpected_list": [
2,
4
],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 66.66666666666666,
"unexpected_percent_nonmissing": 66.66666666666666
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
value_set: ValueSetField
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
map_metric = "column_values.not_in_set"
success_keys = (
"value_set",
"mostly",
)
args_keys = (
"column",
"value_set",
)
class Config:
title = "Expect column values to not be in set"
@staticmethod
def schema_extra(
schema: Dict[str, Any], model: Type[ExpectColumnValuesToNotBeInSet]
) -> None:
ColumnMapExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("value_set", RendererValueType.ARRAY),
("mostly", RendererValueType.NUMBER),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
template_str = ""
if params.value_set:
array_param_name = "value_set"
param_prefix = "v__"
renderer_configuration = cls._add_array_params(
array_param_name=array_param_name,
param_prefix=param_prefix,
renderer_configuration=renderer_configuration,
)
value_set_str: str = cls._get_array_string(
array_param_name=array_param_name,
param_prefix=param_prefix,
renderer_configuration=renderer_configuration,
)
template_str += f"values must not belong to this set: {value_set_str}"
if params.mostly and params.mostly.value < 1.0:
renderer_configuration = cls._add_mostly_pct_param(
renderer_configuration=renderer_configuration
)
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
):
renderer_configuration = RendererConfiguration(
configuration=configuration,
result=result,
runtime_configuration=runtime_configuration,
)
params = substitute_none_for_missing(
renderer_configuration.kwargs,
[
"column",
"value_set",
"mostly",
"row_condition",
"condition_parser",
],
)
if params["value_set"] is None or len(params["value_set"]) == 0:
values_string = "[ ]"
else:
for i, v in enumerate(params["value_set"]):
params[f"v__{i!s}"] = v
values_string = " ".join([f"$v__{i!s}" for i, v in enumerate(params["value_set"])])
template_str = f"values must not belong to this set: {values_string}"
if params["mostly"] is not None:
if isinstance(params["mostly"], (int, float)) and params["mostly"] < 1.0:
params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".") # noqa: E501 # FIXME CoP
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if renderer_configuration.include_column_name:
template_str = f"$column {template_str}"
styling = runtime_configuration.get("styling", {}) if runtime_configuration else {}
if params["row_condition"] is not None:
conditional_template_str = parse_row_condition_string(params["row_condition"])
template_str, styling = _style_row_condition(
conditional_template_str,
template_str,
params,
styling,
)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
def _pandas_column_values_not_in_set( # noqa: PLR0913 # FIXME CoP
self,
series: pd.Series,
metrics: Dict,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
runtime_configuration: Optional[dict] = None,
filter_column_isnull: bool = True,
):
from great_expectations.execution_engine import PandasExecutionEngine
value_set = metric_value_kwargs["value_set"]
if value_set is None:
# Vacuously true
return np.ones(len(series), dtype=np.bool_)
if pd.api.types.is_datetime64_any_dtype(series):
parsed_value_set = PandasExecutionEngine.parse_value_set(value_set=value_set)
else:
parsed_value_set = value_set
return pd.DataFrame({"column_values.not_in_set": ~series.isin(parsed_value_set)})
| ExpectColumnValuesToNotBeInSet |
python | sphinx-doc__sphinx | sphinx/builders/_epub_base.py | {
"start": 2183,
"end": 2263
} | class ____(NamedTuple):
href: str
id: str
media_type: str
| ManifestItem |
python | coleifer__peewee | playhouse/apsw_ext.py | {
"start": 4965,
"end": 5018
} | class ____(_DecimalField):
db_value = nh
| DecimalField |
python | Netflix__metaflow | metaflow/plugins/cards/exception.py | {
"start": 163,
"end": 638
} | class ____(MetaflowException):
"""
This exception is raised with MetaflowCard class is not present for a particular card type.
"""
headline = "MetaflowCard not found"
def __init__(self, card_name):
exc = traceback.format_exc()
msg = (
"MetaflowCard named %s not found. Check the `type` "
"attribute in @card" % (card_name)
)
super(CardClassFoundException, self).__init__(msg)
| CardClassFoundException |
python | astropy__astropy | astropy/io/fits/column.py | {
"start": 15223,
"end": 15613
} | class ____(_FormatP):
"""Carries type description of the Q format for variable length arrays.
The Q format is like the P format but uses 64-bit integers in the array
descriptors, allowing for heaps stored beyond 2GB into a file.
"""
_format_code = "Q"
_format_re = re.compile(_FormatP._format_re_template.format(_format_code))
_descriptor_format = "2i8"
| _FormatQ |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_reflection.py | {
"start": 6939,
"end": 9593
} | class ____(fixtures.TablesTest):
__sparse_driver_backend__ = True
__requires__ = ("index_reflection",)
@classmethod
def define_tables(cls, metadata):
tt = Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
Column("data2", String(50)),
)
Index("my_idx", tt.c.data)
if testing.requires.schemas.enabled:
tt = Table(
"test_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(50)),
schema=config.test_schema,
)
Index("my_idx_s", tt.c.data)
kind = testing.combinations("dialect", "inspector", argnames="kind")
def _has_index(self, kind, conn):
if kind == "dialect":
return lambda *a, **k: config.db.dialect.has_index(conn, *a, **k)
else:
return inspect(conn).has_index
@kind
def test_has_index(self, kind, connection, metadata):
meth = self._has_index(kind, connection)
assert meth("test_table", "my_idx")
assert not meth("test_table", "my_idx_s")
assert not meth("nonexistent_table", "my_idx")
assert not meth("test_table", "nonexistent_idx")
assert not meth("test_table", "my_idx_2")
assert not meth("test_table_2", "my_idx_3")
idx = Index("my_idx_2", self.tables.test_table.c.data2)
tbl = Table(
"test_table_2",
metadata,
Column("foo", Integer),
Index("my_idx_3", "foo"),
)
idx.create(connection)
tbl.create(connection)
try:
if kind == "inspector":
assert not meth("test_table", "my_idx_2")
assert not meth("test_table_2", "my_idx_3")
meth.__self__.clear_cache()
assert meth("test_table", "my_idx_2") is True
assert meth("test_table_2", "my_idx_3") is True
finally:
tbl.drop(connection)
idx.drop(connection)
@testing.requires.schemas
@kind
def test_has_index_schema(self, kind, connection):
meth = self._has_index(kind, connection)
assert meth("test_table", "my_idx_s", schema=config.test_schema)
assert not meth("test_table", "my_idx", schema=config.test_schema)
assert not meth(
"nonexistent_table", "my_idx_s", schema=config.test_schema
)
assert not meth(
"test_table", "nonexistent_idx_s", schema=config.test_schema
)
| HasIndexTest |
python | pytorch__pytorch | test/inductor/test_triton_syntax.py | {
"start": 179,
"end": 1898
} | class ____(TestCase):
@requires_gpu()
def test_triton_sqrt(self):
# https://github.com/pytorch/pytorch/issues/142328
import math
import torch.nn as nn
def newtonschulz5(G, steps: int, eps=1e-7):
assert len(G.shape) == 2
a, b, c = (3.4445, -4.7750, 2.0315)
X = G.to(
torch.bfloat16
if torch.cuda.is_bf16_supported(including_emulation=False)
else torch.float16
)
X /= X.norm() + eps # ensure top singular value <= 1
if G.size(0) > G.size(1):
X = X.T
for _ in range(steps):
A = X @ X.T
B = b * A + c * A @ A
X = a * X + B @ X
if G.size(0) > G.size(1):
X = X.T
return X
@torch.compile(backend="inductor")
def scaled_newton_schulz(G, steps: int):
shape = G.shape
dtype = G.dtype
G = G.reshape(shape[0], -1)
G = newtonschulz5(G, steps)
G = G.reshape(shape).type(dtype)
G = G * math.sqrt(max(1, shape[0] / G[0].numel()))
return G
model = nn.Sequential(
nn.Linear(16, 16, bias=False),
nn.Linear(16, 32, bias=False),
).to(device=torch.device(GPU_TYPE))
loss = model(torch.randn(4, 16, device=torch.device(GPU_TYPE))).sum()
loss.backward()
scaled_newton_schulz(model[0].weight.grad, 6)
scaled_newton_schulz(model[1].weight.grad, 6)
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if HAS_GPU:
run_tests()
| TestTritonSyntacticallyValid |
python | joke2k__faker | tests/providers/test_ssn.py | {
"start": 23211,
"end": 23317
} | class ____(TestEsES):
def setUp(self):
self.fake = Faker("es_CA")
Faker.seed(0)
| TestEsCA |
python | kamyu104__LeetCode-Solutions | Python/count-subarrays-with-score-less-than-k.py | {
"start": 60,
"end": 489
} | class ____(object):
def countSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
result = total = left = 0
for right in xrange(len(nums)):
total += nums[right]
while total*(right-left+1) >= k:
total -= nums[left]
left += 1
result += right-left+1
return result
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/styles/test_write_style_sheet.py | {
"start": 295,
"end": 835
} | class ____(unittest.TestCase):
"""
Test the Styles _write_style_sheet() method.
"""
def setUp(self):
self.fh = StringIO()
self.styles = Styles()
self.styles._set_filehandle(self.fh)
def test_write_style_sheet(self):
"""Test the _write_style_sheet() method"""
self.styles._write_style_sheet()
exp = """<styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main">"""
got = self.fh.getvalue()
self.assertEqual(exp, got)
| TestWriteStyleSheet |
python | django__django | tests/admin_views/admin.py | {
"start": 19743,
"end": 20036
} | class ____(admin.ModelAdmin):
list_display = ("name", "age", "is_employee", "colored_name")
ordering = ("name",)
@admin.display(ordering="name")
def colored_name(self, obj):
return format_html('<span style="color: #ff00ff;">{}</span>', obj.name)
| ComplexSortedPersonAdmin |
python | pexpect__pexpect | tests/test_unicode.py | {
"start": 294,
"end": 6531
} | class ____(PexpectTestCase.PexpectTestCase):
def test_expect_basic (self):
p = pexpect.spawnu('cat')
p.sendline('Hello')
p.sendline('there')
p.sendline('Mr. þython') # þ is more like th than p, but never mind
p.expect('Hello')
p.expect('there')
p.expect('Mr. þython')
p.sendeof ()
p.expect (pexpect.EOF)
def test_expect_exact_basic (self):
p = pexpect.spawnu('cat')
p.sendline('Hello')
p.sendline('there')
p.sendline('Mr. þython')
p.expect_exact('Hello')
p.expect_exact('there')
p.expect_exact('Mr. þython')
p.sendeof()
p.expect_exact (pexpect.EOF)
def test_expect_setecho_toggle(self):
'''This tests that echo may be toggled off.
'''
p = pexpect.spawnu('cat', timeout=5)
try:
self._expect_echo_toggle_off(p)
except IOError:
if sys.platform.lower().startswith('sunos'):
if hasattr(unittest, 'SkipTest'):
raise unittest.SkipTest("Not supported on this platform.")
return 'skip'
raise
self._expect_echo_toggle_on(p)
def test_expect_echo_exact (self):
'''Like test_expect_echo(), but using expect_exact().
'''
p = pexpect.spawnu('cat', timeout=5)
p.expect = p.expect_exact
self._expect_echo(p)
def test_expect_setecho_toggle_exact(self):
p = pexpect.spawnu('cat', timeout=5)
p.expect = p.expect_exact
try:
self._expect_echo_toggle_off(p)
except IOError:
if sys.platform.lower().startswith('sunos'):
if hasattr(unittest, 'SkipTest'):
raise unittest.SkipTest("Not supported on this platform.")
return 'skip'
raise
self._expect_echo_toggle_on(p)
def _expect_echo (self, p):
p.sendline('1234') # Should see this twice (once from tty echo and again from cat).
index = p.expect (['1234', 'abcdé', 'wxyz', pexpect.EOF, pexpect.TIMEOUT])
assert index == 0, (index, p.before)
index = p.expect (['1234', 'abcdé', 'wxyz', pexpect.EOF])
assert index == 0, index
def _expect_echo_toggle_off(self, p):
p.setecho(0) # Turn off tty echo
p.waitnoecho()
p.sendline('abcdé') # Now, should only see this once.
p.sendline('wxyz') # Should also be only once.
index = p.expect ([pexpect.EOF,pexpect.TIMEOUT, 'abcdé', 'wxyz', '1234'])
assert index == 2, index
index = p.expect ([pexpect.EOF, 'abcdé', 'wxyz', '7890'])
assert index == 2, index
def _expect_echo_toggle_on(self, p):
p.setecho(1) # Turn on tty echo
time.sleep(0.2) # there is no waitecho() !
p.sendline('7890') # Should see this twice.
index = p.expect ([pexpect.EOF, 'abcdé', 'wxyz', '7890'])
assert index == 3, index
index = p.expect ([pexpect.EOF, 'abcdé', 'wxyz', '7890'])
assert index == 3, index
p.sendeof()
def test_log_unicode(self):
msg = "abcΩ÷"
filename_send = tempfile.mktemp()
filename_read = tempfile.mktemp()
p = pexpect.spawnu('cat')
if platform.python_version_tuple() < ('3', '0', '0'):
import codecs
def open(fname, mode, **kwargs):
if 'newline' in kwargs:
del kwargs['newline']
return codecs.open(fname, mode, **kwargs)
else:
import io
open = io.open
p.logfile_send = open(filename_send, 'w', encoding='utf-8')
p.logfile_read = open(filename_read, 'w', encoding='utf-8')
p.sendline(msg)
p.sendeof()
p.expect(pexpect.EOF)
p.close()
p.logfile_send.close()
p.logfile_read.close()
# ensure the 'send' log is correct,
with open(filename_send, 'r', encoding='utf-8') as f:
self.assertEqual(f.read(), msg + '\n\x04')
os.unlink(filename_send)
# ensure the 'read' log is correct,
with open(filename_read, 'r', encoding='utf-8', newline='') as f:
output = f.read().replace(_CAT_EOF, '')
self.assertEqual(output, (msg + '\r\n')*2 )
os.unlink(filename_read)
def test_spawn_expect_ascii_unicode(self):
# A bytes-based spawn should be able to handle ASCII-only unicode, for
# backwards compatibility.
p = pexpect.spawn('cat')
p.sendline('Camelot')
p.expect('Camelot')
p.sendline('Aargh')
p.sendline('Aårgh')
p.expect_exact('Aargh')
p.sendeof()
p.expect(pexpect.EOF)
def test_spawn_send_unicode(self):
# A bytes-based spawn should be able to send arbitrary unicode
p = pexpect.spawn('cat')
p.sendline('3½')
p.sendeof()
p.expect(pexpect.EOF)
def test_spawn_utf8_incomplete(self):
# This test case ensures correct incremental decoding, which
# otherwise fails when the stream inspected by os.read()
# does not align exactly at a utf-8 multibyte boundary:
# UnicodeDecodeError: 'utf8' codec can't decode byte 0xe2 in
# position 0: unexpected end of data
p = pexpect.spawnu('cat', maxread=1)
p.sendline('▁▂▃▄▅▆▇█')
p.sendeof()
p.expect('▁▂▃▄▅▆▇█')
def test_readline_bin_echo(self):
# Test using readline() with spawnu objects. pexpect 3.2 had threw
# a TypeError when concatenating a bytestring to a unicode type.
# given,
child = pexpect.spawnu('echo', ['input', ])
# exercise,
assert child.readline() == 'input' + child.crlf
def test_unicode_argv(self):
""" Ensure a program can be executed with unicode arguments. """
p = pexpect.spawn(u'echo ǝpoɔıun', timeout=5, encoding='utf8')
p.expect(u'ǝpoɔıun')
p.expect(pexpect.EOF)
assert not p.isalive()
assert p.exitstatus == 0
if __name__ == '__main__':
unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(UnicodeTests)
| UnicodeTests |
python | django__django | django/db/models/functions/datetime.py | {
"start": 5140,
"end": 5195
} | class ____(Extract):
lookup_name = "hour"
| ExtractHour |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/definition_config_schema.py | {
"start": 1552,
"end": 2727
} | class ____(ABC):
@abstractmethod
def as_field(self) -> Field:
raise NotImplementedError()
@property
def config_type(self) -> Optional[ConfigType]:
field = self.as_field()
return field.config_type if field else None
@property
def is_required(self) -> bool:
field = self.as_field()
return field.is_required if field else False
@property
def default_provided(self) -> bool:
field = self.as_field()
return field.default_provided if field else False
@property
def default_value(self) -> Any:
field = self.as_field()
check.invariant(self.default_provided, "Asking for default value when none was provided")
return field.default_value if field else None
@property
def default_value_as_json_str(self) -> str:
field = self.as_field()
check.invariant(self.default_provided, "Asking for default value when none was provided")
return field.default_value_as_json_str
@property
def description(self) -> Optional[str]:
field = self.as_field()
return field.description if field else None
| IDefinitionConfigSchema |
python | pytorch__pytorch | torch/distributed/optim/functional_adamw.py | {
"start": 812,
"end": 7511
} | class ____:
def __init__(
self,
params: list[Tensor],
lr: float = 1e-3,
betas: tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 1e-2,
amsgrad: bool = False,
maximize: bool = False,
foreach: bool = False,
fused: bool = False,
_allow_empty_param_list: bool = False,
):
_scripted_functional_optimizer_deprecation_warning(stacklevel=2)
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
if not 0.0 <= weight_decay:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
self.defaults = {
"lr": lr,
"eps": eps,
"beta1": betas[0],
"beta2": betas[1],
"weight_decay": weight_decay,
}
self.amsgrad = amsgrad
self.maximize = maximize
self.foreach = foreach
self.fused = fused
self.state = torch.jit.annotate(dict[torch.Tensor, dict[str, torch.Tensor]], {})
if len(params) == 0 and not _allow_empty_param_list:
raise ValueError("optimizer got an empty parameter list")
# NOTE: we only have one param_group and don't allow user to add additional
# param group as it's not a common use case.
self.param_group = {"params": params}
def step_param(self, param: Tensor, grad: Tensor | None):
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps: list[Tensor] = []
has_complex = torch.is_complex(param)
if grad is not None:
params_with_grad.append(param)
grads.append(grad)
# Lazy state initialization
if param not in self.state:
self.state[param] = {}
state = self.state[param]
state["step"] = torch.tensor(0.0)
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(
param, memory_format=torch.preserve_format
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
param, memory_format=torch.preserve_format
)
if self.amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(
param, memory_format=torch.preserve_format
)
state = self.state[param]
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if self.amsgrad:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
state_steps.append(state["step"])
with torch.no_grad():
F.adamw(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=self.amsgrad,
maximize=self.maximize,
beta1=self.defaults["beta1"],
beta2=self.defaults["beta2"],
lr=self.defaults["lr"],
weight_decay=self.defaults["weight_decay"],
eps=self.defaults["eps"],
foreach=self.foreach,
fused=self.fused,
grad_scale=None,
found_inf=None,
has_complex=has_complex,
)
def step(self, gradients: list[Tensor | None]):
params = self.param_group["params"]
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps: list[Tensor] = []
if len(params) != len(gradients):
raise ValueError(
"the gradients passed in does not equal to the size of the parameters!"
+ f"Params length: {len(params)}. "
+ f"Gradients length: {len(gradients)}"
)
has_complex = False
for param, gradient in zip(self.param_group["params"], gradients):
if gradient is not None:
has_complex |= torch.is_complex(param)
params_with_grad.append(param)
grads.append(gradient)
# Lazy state initialization
if param not in self.state:
self.state[param] = {}
state = self.state[param]
state["step"] = torch.tensor(0.0)
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(
param, memory_format=torch.preserve_format
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
param, memory_format=torch.preserve_format
)
if self.amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(
param, memory_format=torch.preserve_format
)
state = self.state[param]
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if self.amsgrad:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
state_steps.append(state["step"])
with torch.no_grad():
F.adamw(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=self.amsgrad,
maximize=self.maximize,
beta1=self.defaults["beta1"],
beta2=self.defaults["beta2"],
lr=self.defaults["lr"],
weight_decay=self.defaults["weight_decay"],
eps=self.defaults["eps"],
foreach=self.foreach,
fused=self.fused,
grad_scale=None,
found_inf=None,
has_complex=has_complex,
)
| _FunctionalAdamW |
python | pennersr__django-allauth | allauth/headless/base/response.py | {
"start": 5297,
"end": 5433
} | class ____(APIResponse):
def __init__(self, request):
super().__init__(request, status=HTTPStatus.FORBIDDEN)
| ForbiddenResponse |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.