language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | yaml__pyyaml | lib/yaml/emitter.py | {
"start": 967,
"end": 43006
} | class ____:
DEFAULT_TAG_PREFIXES = {
'!' : '!',
'tag:yaml.org,2002:' : '!!',
}
def __init__(self, stream, canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
# The stream should have the methods `write` and possibly `flush`.
self.stream = stream
# Encoding can be overridden by STREAM-START.
self.encoding = None
# Emitter is a state machine with a stack of states to handle nested
# structures.
self.states = []
self.state = self.expect_stream_start
# Current event and the event queue.
self.events = []
self.event = None
# The current indentation level and the stack of previous indents.
self.indents = []
self.indent = None
# Flow level.
self.flow_level = 0
# Contexts.
self.root_context = False
self.sequence_context = False
self.mapping_context = False
self.simple_key_context = False
# Characteristics of the last emitted character:
# - current position.
# - is it a whitespace?
# - is it an indention character
# (indentation space, '-', '?', or ':')?
self.line = 0
self.column = 0
self.whitespace = True
self.indention = True
# Whether the document requires an explicit document indicator
self.open_ended = False
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
self.best_indent = 2
if indent and 1 < indent < 10:
self.best_indent = indent
self.best_width = 80
if width and width > self.best_indent*2:
self.best_width = width
self.best_line_break = '\n'
if line_break in ['\r', '\n', '\r\n']:
self.best_line_break = line_break
# Tag prefixes.
self.tag_prefixes = None
# Prepared anchor and tag.
self.prepared_anchor = None
self.prepared_tag = None
# Scalar analysis and style.
self.analysis = None
self.style = None
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def emit(self, event):
self.events.append(event)
while not self.need_more_events():
self.event = self.events.pop(0)
self.state()
self.event = None
# In some cases, we wait for a few next events before emitting.
def need_more_events(self):
if not self.events:
return True
event = self.events[0]
if isinstance(event, DocumentStartEvent):
return self.need_events(1)
elif isinstance(event, SequenceStartEvent):
return self.need_events(2)
elif isinstance(event, MappingStartEvent):
return self.need_events(3)
else:
return False
def need_events(self, count):
level = 0
for event in self.events[1:]:
if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
level += 1
elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
level -= 1
elif isinstance(event, StreamEndEvent):
level = -1
if level < 0:
return False
return (len(self.events) < count+1)
def increase_indent(self, flow=False, indentless=False):
self.indents.append(self.indent)
if self.indent is None:
if flow:
self.indent = self.best_indent
else:
self.indent = 0
elif not indentless:
self.indent += self.best_indent
# States.
# Stream handlers.
def expect_stream_start(self):
if isinstance(self.event, StreamStartEvent):
if self.event.encoding and not hasattr(self.stream, 'encoding'):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
raise EmitterError("expected StreamStartEvent, but got %s"
% self.event)
def expect_nothing(self):
raise EmitterError("expected nothing, but got %s" % self.event)
# Document handlers.
def expect_first_document_start(self):
return self.expect_document_start(first=True)
def expect_document_start(self, first=False):
if isinstance(self.event, DocumentStartEvent):
if (self.event.version or self.event.tags) and self.open_ended:
self.write_indicator('...', True)
self.write_indent()
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
if self.event.tags:
handles = sorted(self.event.tags.keys())
for handle in handles:
prefix = self.event.tags[handle]
self.tag_prefixes[prefix] = handle
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
implicit = (first and not self.event.explicit and not self.canonical
and not self.event.version and not self.event.tags
and not self.check_empty_document())
if not implicit:
self.write_indent()
self.write_indicator('---', True)
if self.canonical:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
if self.open_ended:
self.write_indicator('...', True)
self.write_indent()
self.write_stream_end()
self.state = self.expect_nothing
else:
raise EmitterError("expected DocumentStartEvent, but got %s"
% self.event)
def expect_document_end(self):
if isinstance(self.event, DocumentEndEvent):
self.write_indent()
if self.event.explicit:
self.write_indicator('...', True)
self.write_indent()
self.flush_stream()
self.state = self.expect_document_start
else:
raise EmitterError("expected DocumentEndEvent, but got %s"
% self.event)
def expect_document_root(self):
self.states.append(self.expect_document_end)
self.expect_node(root=True)
# Node handlers.
def expect_node(self, root=False, sequence=False, mapping=False,
simple_key=False):
self.root_context = root
self.sequence_context = sequence
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
self.expect_alias()
elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
self.process_anchor('&')
self.process_tag()
if isinstance(self.event, ScalarEvent):
self.expect_scalar()
elif isinstance(self.event, SequenceStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_sequence():
self.expect_flow_sequence()
else:
self.expect_block_sequence()
elif isinstance(self.event, MappingStartEvent):
if self.flow_level or self.canonical or self.event.flow_style \
or self.check_empty_mapping():
self.expect_flow_mapping()
else:
self.expect_block_mapping()
else:
raise EmitterError("expected NodeEvent, but got %s" % self.event)
def expect_alias(self):
if self.event.anchor is None:
raise EmitterError("anchor is not specified for alias")
self.process_anchor('*')
self.state = self.states.pop()
def expect_scalar(self):
self.increase_indent(flow=True)
self.process_scalar()
self.indent = self.indents.pop()
self.state = self.states.pop()
# Flow sequence handlers.
def expect_flow_sequence(self):
self.write_indicator('[', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_sequence_item
def expect_first_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator(']', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
def expect_flow_sequence_item(self):
if isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(',', False)
self.write_indent()
self.write_indicator(']', False)
self.state = self.states.pop()
else:
self.write_indicator(',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
self.states.append(self.expect_flow_sequence_item)
self.expect_node(sequence=True)
# Flow mapping handlers.
def expect_flow_mapping(self):
self.write_indicator('{', True, whitespace=True)
self.flow_level += 1
self.increase_indent(flow=True)
self.state = self.expect_first_flow_mapping_key
def expect_first_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
self.write_indicator('}', False)
self.state = self.states.pop()
else:
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_key(self):
if isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.flow_level -= 1
if self.canonical:
self.write_indicator(',', False)
self.write_indent()
self.write_indicator('}', False)
self.state = self.states.pop()
else:
self.write_indicator(',', False)
if self.canonical or self.column > self.best_width:
self.write_indent()
if not self.canonical and self.check_simple_key():
self.states.append(self.expect_flow_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True)
self.states.append(self.expect_flow_mapping_value)
self.expect_node(mapping=True)
def expect_flow_mapping_simple_value(self):
self.write_indicator(':', False)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
def expect_flow_mapping_value(self):
if self.canonical or self.column > self.best_width:
self.write_indent()
self.write_indicator(':', True)
self.states.append(self.expect_flow_mapping_key)
self.expect_node(mapping=True)
# Block sequence handlers.
def expect_block_sequence(self):
indentless = (self.mapping_context and not self.indention)
self.increase_indent(flow=False, indentless=indentless)
self.state = self.expect_first_block_sequence_item
def expect_first_block_sequence_item(self):
return self.expect_block_sequence_item(first=True)
def expect_block_sequence_item(self, first=False):
if not first and isinstance(self.event, SequenceEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
self.write_indicator('-', True, indention=True)
self.states.append(self.expect_block_sequence_item)
self.expect_node(sequence=True)
# Block mapping handlers.
def expect_block_mapping(self):
self.increase_indent(flow=False)
self.state = self.expect_first_block_mapping_key
def expect_first_block_mapping_key(self):
return self.expect_block_mapping_key(first=True)
def expect_block_mapping_key(self, first=False):
if not first and isinstance(self.event, MappingEndEvent):
self.indent = self.indents.pop()
self.state = self.states.pop()
else:
self.write_indent()
if self.check_simple_key():
self.states.append(self.expect_block_mapping_simple_value)
self.expect_node(mapping=True, simple_key=True)
else:
self.write_indicator('?', True, indention=True)
self.states.append(self.expect_block_mapping_value)
self.expect_node(mapping=True)
def expect_block_mapping_simple_value(self):
self.write_indicator(':', False)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
def expect_block_mapping_value(self):
self.write_indent()
self.write_indicator(':', True, indention=True)
self.states.append(self.expect_block_mapping_key)
self.expect_node(mapping=True)
# Checkers.
def check_empty_sequence(self):
return (isinstance(self.event, SequenceStartEvent) and self.events
and isinstance(self.events[0], SequenceEndEvent))
def check_empty_mapping(self):
return (isinstance(self.event, MappingStartEvent) and self.events
and isinstance(self.events[0], MappingEndEvent))
def check_empty_document(self):
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
return (isinstance(event, ScalarEvent) and event.anchor is None
and event.tag is None and event.implicit and event.value == '')
def check_simple_key(self):
length = 0
if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
and self.event.tag is not None:
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
if isinstance(self.event, ScalarEvent):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
return (length < 128 and (isinstance(self.event, AliasEvent)
or (isinstance(self.event, ScalarEvent)
and not self.analysis.empty and not self.analysis.multiline)
or self.check_empty_sequence() or self.check_empty_mapping()))
# Anchor, Tag, and Scalar processors.
def process_anchor(self, indicator):
if self.event.anchor is None:
self.prepared_anchor = None
return
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
if self.prepared_anchor:
self.write_indicator(indicator+self.prepared_anchor, True)
self.prepared_anchor = None
def process_tag(self):
tag = self.event.tag
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
if ((not self.canonical or tag is None) and
((self.style == '' and self.event.implicit[0])
or (self.style != '' and self.event.implicit[1]))):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
tag = '!'
self.prepared_tag = None
else:
if (not self.canonical or tag is None) and self.event.implicit:
self.prepared_tag = None
return
if tag is None:
raise EmitterError("tag is not specified")
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
self.prepared_tag = None
def choose_scalar_style(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
if not self.event.style and self.event.implicit[0]:
if (not (self.simple_key_context and
(self.analysis.empty or self.analysis.multiline))
and (self.flow_level and self.analysis.allow_flow_plain
or (not self.flow_level and self.analysis.allow_block_plain))):
return ''
if self.event.style and self.event.style in '|>':
if (not self.flow_level and not self.simple_key_context
and self.analysis.allow_block):
return self.event.style
if not self.event.style or self.event.style == '\'':
if (self.analysis.allow_single_quoted and
not (self.simple_key_context and self.analysis.multiline)):
return '\''
return '"'
def process_scalar(self):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
split = (not self.simple_key_context)
#if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
elif self.style == '\'':
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
elif self.style == '|':
self.write_literal(self.analysis.scalar)
else:
self.write_plain(self.analysis.scalar, split)
self.analysis = None
self.style = None
# Analyzers.
def prepare_version(self, version):
major, minor = version
if major != 1:
raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
return '%d.%d' % (major, minor)
def prepare_tag_handle(self, handle):
if not handle:
raise EmitterError("tag handle must not be empty")
if handle[0] != '!' or handle[-1] != '!':
raise EmitterError("tag handle must start and end with '!': %r" % handle)
for ch in handle[1:-1]:
if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_'):
raise EmitterError("invalid character %r in the tag handle: %r"
% (ch, handle))
return handle
def prepare_tag_prefix(self, prefix):
if not prefix:
raise EmitterError("tag prefix must not be empty")
chunks = []
start = end = 0
if prefix[0] == '!':
end = 1
while end < len(prefix):
ch = prefix[end]
if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?!:@&=+$,_.~*\'()[]':
end += 1
else:
if start < end:
chunks.append(prefix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append('%%%02X' % ord(ch))
if start < end:
chunks.append(prefix[start:end])
return ''.join(chunks)
def prepare_tag(self, tag):
if not tag:
raise EmitterError("tag must not be empty")
if tag == '!':
return tag
handle = None
suffix = tag
prefixes = sorted(self.tag_prefixes.keys())
for prefix in prefixes:
if tag.startswith(prefix) \
and (prefix == '!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
suffix = tag[len(prefix):]
chunks = []
start = end = 0
while end < len(suffix):
ch = suffix[end]
if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-;/?:@&=+$,_.~*\'()[]' \
or (ch == '!' and handle != '!'):
end += 1
else:
if start < end:
chunks.append(suffix[start:end])
start = end = end+1
data = ch.encode('utf-8')
for ch in data:
chunks.append('%%%02X' % ch)
if start < end:
chunks.append(suffix[start:end])
suffix_text = ''.join(chunks)
if handle:
return '%s%s' % (handle, suffix_text)
else:
return '!<%s>' % suffix_text
def prepare_anchor(self, anchor):
if not anchor:
raise EmitterError("anchor must not be empty")
for ch in anchor:
if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
or ch in '-_'):
raise EmitterError("invalid character %r in the anchor: %r"
% (ch, anchor))
return anchor
def analyze_scalar(self, scalar):
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
allow_flow_plain=False, allow_block_plain=True,
allow_single_quoted=True, allow_double_quoted=True,
allow_block=False)
# Indicators and special characters.
block_indicators = False
flow_indicators = False
line_breaks = False
special_characters = False
# Important whitespace combinations.
leading_space = False
leading_break = False
trailing_space = False
trailing_break = False
break_space = False
space_break = False
# Check document indicators.
if scalar.startswith('---') or scalar.startswith('...'):
block_indicators = True
flow_indicators = True
# First character or preceded by a whitespace.
preceded_by_whitespace = True
# Last character or followed by a whitespace.
followed_by_whitespace = (len(scalar) == 1 or
scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
# The previous character is a space.
previous_space = False
# The previous character is a break.
previous_break = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
if ch in '#,[]{}&*!|>\'\"%@`':
flow_indicators = True
block_indicators = True
if ch in '?:':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == '-' and followed_by_whitespace:
flow_indicators = True
block_indicators = True
else:
# Some indicators cannot appear within a scalar as well.
if ch in ',?[]{}':
flow_indicators = True
if ch == ':':
flow_indicators = True
if followed_by_whitespace:
block_indicators = True
if ch == '#' and preceded_by_whitespace:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
if ch in '\n\x85\u2028\u2029':
line_breaks = True
if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
or '\uE000' <= ch <= '\uFFFD'
or '\U00010000' <= ch < '\U0010ffff') and ch != '\uFEFF':
unicode_characters = True
if not self.allow_unicode:
special_characters = True
else:
special_characters = True
# Detect important whitespace combinations.
if ch == ' ':
if index == 0:
leading_space = True
if index == len(scalar)-1:
trailing_space = True
if previous_break:
break_space = True
previous_space = True
previous_break = False
elif ch in '\n\x85\u2028\u2029':
if index == 0:
leading_break = True
if index == len(scalar)-1:
trailing_break = True
if previous_space:
space_break = True
previous_space = False
previous_break = True
else:
previous_space = False
previous_break = False
# Prepare for the next character.
index += 1
preceded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
followed_by_whitespace = (index+1 >= len(scalar) or
scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
# Let's decide what styles are allowed.
allow_flow_plain = True
allow_block_plain = True
allow_single_quoted = True
allow_double_quoted = True
allow_block = True
# Leading and trailing whitespaces are bad for plain scalars.
if (leading_space or leading_break
or trailing_space or trailing_break):
allow_flow_plain = allow_block_plain = False
# We do not permit trailing spaces for block scalars.
if trailing_space:
allow_block = False
# Spaces at the beginning of a new line are only acceptable for block
# scalars.
if break_space:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
# Spaces followed by breaks, as well as special character are only
# allowed for double quoted scalars.
if space_break or special_characters:
allow_flow_plain = allow_block_plain = \
allow_single_quoted = allow_block = False
# Although the plain scalar writer supports breaks, we never emit
# multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
# Flow indicators are forbidden for flow plain scalars.
if flow_indicators:
allow_flow_plain = False
# Block indicators are forbidden for block plain scalars.
if block_indicators:
allow_block_plain = False
return ScalarAnalysis(scalar=scalar,
empty=False, multiline=line_breaks,
allow_flow_plain=allow_flow_plain,
allow_block_plain=allow_block_plain,
allow_single_quoted=allow_single_quoted,
allow_double_quoted=allow_double_quoted,
allow_block=allow_block)
# Writers.
def flush_stream(self):
if hasattr(self.stream, 'flush'):
self.stream.flush()
def write_stream_start(self):
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
self.stream.write('\uFEFF'.encode(self.encoding))
def write_stream_end(self):
self.flush_stream()
def write_indicator(self, indicator, need_whitespace,
whitespace=False, indention=False):
if self.whitespace or not need_whitespace:
data = indicator
else:
data = ' '+indicator
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
self.open_ended = False
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_indent(self):
indent = self.indent or 0
if not self.indention or self.column > indent \
or (self.column == indent and not self.whitespace):
self.write_line_break()
if self.column < indent:
self.whitespace = True
data = ' '*(indent-self.column)
self.column = indent
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_line_break(self, data=None):
if data is None:
data = self.best_line_break
self.whitespace = True
self.indention = True
self.line += 1
self.column = 0
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
def write_version_directive(self, version_text):
data = '%%YAML %s' % version_text
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
def write_tag_directive(self, handle_text, prefix_text):
data = '%%TAG %s %s' % (handle_text, prefix_text)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_line_break()
# Scalar streams.
def write_single_quoted(self, text, split=True):
self.write_indicator('\'', True)
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch is None or ch != ' ':
if start+1 == end and self.column > self.best_width and split \
and start != 0 and end != len(text):
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
if text[start] == '\n':
self.write_line_break()
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch == '\'':
data = '\'\''
self.column += 2
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
spaces = (ch == ' ')
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
self.write_indicator('\'', False)
ESCAPE_REPLACEMENTS = {
'\0': '0',
'\x07': 'a',
'\x08': 'b',
'\x09': 't',
'\x0A': 'n',
'\x0B': 'v',
'\x0C': 'f',
'\x0D': 'r',
'\x1B': 'e',
'\"': '\"',
'\\': '\\',
'\x85': 'N',
'\xA0': '_',
'\u2028': 'L',
'\u2029': 'P',
}
def write_double_quoted(self, text, split=True):
self.write_indicator('"', True)
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
or not ('\x20' <= ch <= '\x7E'
or (self.allow_unicode
and ('\xA0' <= ch <= '\uD7FF'
or '\uE000' <= ch <= '\uFFFD'))):
if start < end:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
if ch in self.ESCAPE_REPLACEMENTS:
data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
elif ch <= '\xFF':
data = '\\x%02X' % ord(ch)
elif ch <= '\uFFFF':
data = '\\u%04X' % ord(ch)
else:
data = '\\U%08X' % ord(ch)
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end+1
if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
and self.column+(end-start) > self.best_width and split:
data = text[start:end]+'\\'
if start < end:
start = end
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.write_indent()
self.whitespace = False
self.indention = False
if text[start] == ' ':
data = '\\'
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
end += 1
self.write_indicator('"', False)
def determine_block_hints(self, text):
hints = ''
if text:
if text[0] in ' \n\x85\u2028\u2029':
hints += str(self.best_indent)
if text[-1] not in '\n\x85\u2028\u2029':
hints += '-'
elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
hints += '+'
return hints
def write_folded(self, text):
hints = self.determine_block_hints(text)
self.write_indicator('>'+hints, True)
if hints[-1:] == '+':
self.open_ended = True
self.write_line_break()
leading_space = True
spaces = False
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
if not leading_space and ch is not None and ch != ' ' \
and text[start] == '\n':
self.write_line_break()
leading_space = (ch == ' ')
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
elif spaces:
if ch != ' ':
if start+1 == end and self.column > self.best_width:
self.write_indent()
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in '\n\x85\u2028\u2029')
spaces = (ch == ' ')
end += 1
def write_literal(self, text):
hints = self.determine_block_hints(text)
self.write_indicator('|'+hints, True)
if hints[-1:] == '+':
self.open_ended = True
self.write_line_break()
breaks = True
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if breaks:
if ch is None or ch not in '\n\x85\u2028\u2029':
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
if ch is not None:
self.write_indent()
start = end
else:
if ch is None or ch in '\n\x85\u2028\u2029':
data = text[start:end]
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
if ch is None:
self.write_line_break()
start = end
if ch is not None:
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
def write_plain(self, text, split=True):
if self.root_context:
self.open_ended = True
if not text:
return
if not self.whitespace:
data = ' '
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
self.whitespace = False
self.indention = False
spaces = False
breaks = False
start = end = 0
while end <= len(text):
ch = None
if end < len(text):
ch = text[end]
if spaces:
if ch != ' ':
if start+1 == end and self.column > self.best_width and split:
self.write_indent()
self.whitespace = False
self.indention = False
else:
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
elif breaks:
if ch not in '\n\x85\u2028\u2029':
if text[start] == '\n':
self.write_line_break()
for br in text[start:end]:
if br == '\n':
self.write_line_break()
else:
self.write_line_break(br)
self.write_indent()
self.whitespace = False
self.indention = False
start = end
else:
if ch is None or ch in ' \n\x85\u2028\u2029':
data = text[start:end]
self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
if ch is not None:
spaces = (ch == ' ')
breaks = (ch in '\n\x85\u2028\u2029')
end += 1
| Emitter |
python | kamyu104__LeetCode-Solutions | Python/online-majority-element-in-subarray.py | {
"start": 1170,
"end": 2769
} | class ____(object):
def __init__(self, arr):
"""
:type arr: List[int]
"""
self.__arr = arr
self.__inv_idx = collections.defaultdict(list)
for i, x in enumerate(self.__arr):
self.__inv_idx[x].append(i)
self.__bound = int(round((len(arr)**0.5)))
self.__majorities = [i for i, group in self.__inv_idx.iteritems() if len(group) >= self.__bound]
def query(self, left, right, threshold):
"""
:type left: int
:type right: int
:type threshold: int
:rtype: int
"""
def count(inv_idx, m, left, right):
return bisect.bisect_right(inv_idx[m], right) - \
bisect.bisect_left(inv_idx[m], left)
def boyer_moore_majority_vote(nums, left, right):
m, cnt = nums[left], 1
for i in xrange(left+1, right+1):
if m == nums[i]:
cnt += 1
else:
cnt -= 1
if cnt == 0:
m = nums[i]
cnt = 1
return m
if right-left+1 < self.__bound:
m = boyer_moore_majority_vote(self.__arr, left, right)
if count(self.__inv_idx, m, left, right) >= threshold:
return m
else:
for m in self.__majorities:
if count(self.__inv_idx, m, left, right) >= threshold:
return m
return -1
# Time: ctor: O(nlogn)
# query: O((logn)^2)
# Space: O(n)
import functools
| MajorityChecker2 |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/network/download.py | {
"start": 3794,
"end": 4825
} | class ____:
def __init__(
self,
session: PipSession,
progress_bar: str,
) -> None:
self._session = session
self._progress_bar = progress_bar
def __call__(self, link: Link, location: str) -> Tuple[str, str]:
"""Download the file given by link into location."""
try:
resp = _http_get_download(self._session, link)
except NetworkConnectionError as e:
assert e.response is not None
logger.critical(
"HTTP error %s while getting %s", e.response.status_code, link
)
raise
filename = _get_http_response_filename(resp, link)
filepath = os.path.join(location, filename)
chunks = _prepare_download(resp, link, self._progress_bar)
with open(filepath, "wb") as content_file:
for chunk in chunks:
content_file.write(chunk)
content_type = resp.headers.get("Content-Type", "")
return filepath, content_type
| Downloader |
python | huggingface__transformers | tests/models/videomae/test_modeling_videomae.py | {
"start": 6592,
"end": 16495
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as VideoMAE does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
pipeline_model_mapping = (
{"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
# Addition keys that are required for forward, used in tests where we manipulate and create new input dict from scratch
additional_model_inputs = ["bool_masked_pos"]
test_resize_embeddings = False
test_torch_exportable = True
def setUp(self):
self.model_tester = VideoMAEModelTester(self)
self.config_tester = ConfigTester(self, config_class=VideoMAEConfig, has_text_modality=False, hidden_size=37)
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = copy.deepcopy(inputs_dict)
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
mask = torch.ones((self.model_tester.num_masks,))
mask = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))])
batch_size = inputs_dict["pixel_values"].shape[0]
bool_masked_pos = mask.expand(batch_size, -1).bool()
inputs_dict["bool_masked_pos"] = bool_masked_pos.to(torch_device)
if return_labels:
if model_class in [
*get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING),
]:
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "MCG-NJU/videomae-base"
model = VideoMAEModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_attention_outputs(self):
if not self.has_attentions:
self.skipTest(reason="Model does not have attentions")
else:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
for model_class in self.all_model_classes:
num_visible_patches = self.model_tester.seq_length - self.model_tester.num_masks
seq_len = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + 1, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_len, seq_len],
)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.hidden_states
expected_num_layers = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(hidden_states), expected_num_layers)
num_visible_patches = self.model_tester.seq_length - self.model_tester.num_masks
seq_length = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
@require_flash_attn
@require_torch_gpu
@mark.flash_attn_test
@slow
@is_flaky()
def test_flash_attn_2_inference_equivalence(self):
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
for model_class in self.all_model_classes:
if not model_class._supports_flash_attn:
self.skipTest(f"{model_class.__name__} does not support Flash Attention 2")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
inputs_dict = self._prepare_for_class(inputs_dict, model_class)
inputs_dict["pixel_values"] = inputs_dict["pixel_values"].to(torch.bfloat16)
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_fa = model_class.from_pretrained(
tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2"
)
model_fa.to(torch_device)
model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16)
model.to(torch_device)
outputs = model(**inputs_dict, output_hidden_states=True)
outputs_fa = model_fa(**inputs_dict, output_hidden_states=True)
logits = (
outputs.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs.decoder_hidden_states[-1]
)
logits_fa = (
outputs_fa.hidden_states[-1]
if not model.config.is_encoder_decoder
else outputs_fa.decoder_hidden_states[-1]
)
assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2)
# check with inference + dropout
model.train()
_ = model_fa(**inputs_dict)
@unittest.skip("Not applicable for VideoMAE")
def test_flash_attn_2_inference_equivalence_right_padding(self):
pass
# We will verify our results on a video of eating spaghetti
# Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227]
def prepare_video():
file = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset"
)
video = np.load(file)
return list(video)
@require_torch
@require_vision
| VideoMAEModelTest |
python | scrapy__scrapy | scrapy/exceptions.py | {
"start": 326,
"end": 564
} | class ____(TypeError):
"""
Indicates an invalid value has been returned by a middleware's processing method.
Internal and undocumented, it should not be raised or caught by user code.
"""
# HTTP and crawling
| _InvalidOutput |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/dtuse/package.py | {
"start": 217,
"end": 459
} | class ____(Package):
"""Simple package which uses dttop"""
homepage = "http://www.example.com"
url = "http://www.example.com/dtuse-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
depends_on("dttop")
| Dtuse |
python | pytorch__pytorch | test/test_dispatch.py | {
"start": 1559,
"end": 39166
} | class ____(TestCase):
namespace_index = 0
def test_all_invariants(self):
# Check that the regular stuff is OK!
C._dispatch_check_all_invariants()
# You probably don't want to call this directly; if your constructors
# don't commute, you can still run commute with a fixed ctor_order
# so that you can test that the destructors still commute
def run_ops(
self,
name,
ops,
ctor_order=None,
dtor_order=None,
results=None,
expect_raises=False,
):
"""
Given a list of operator registrations, run the registrations in the
order specified by ctor_order, and then run the deregistrations in
dtor_order.
If results is specified, intermediate results are checked for consistency
with results stored in results (and stored in results if this is the
first time we've seen them). Results are expected to be equivalent
modulo commutativity and inverses (thus, results is keyed on a frozenset
of in effect registrations from ops). Results stores namedtuple
Result[state, table, provenance], where state is a string that contains
non-derived kernel registered or error message if it doesn't pass;
table is a string that contains computed dispatch table entries;
provenance is a string that describes how exactly we got this string.
If expect_raises is True, it is not an error to raise an exception. Instead,
we'll store the exception string (instead of the dispatcher state)
in results. In principle we should flag these differently, but it's
very obvious when you get an error in one case but not another.
"""
# By allocating every test into a fresh namespace, this makes it less
# likely that a bug in the testing framework will result in tests
# interfering with each other
self.__class__.namespace_index += 1
if results is None:
results = {}
if ctor_order is None:
ctor_order = list(range(len(ops)))
if dtor_order is None:
dtor_order = list(reversed(ctor_order))
# Refs which retain the c10::Module object so we can explicitly control
# when each deregistration happens (deregistration occurs when the
# object gets deallocated).
refs = [None] * len(ops)
# Keep track of the set "in effect" registrations
active_ops = set()
# double underscore to make it less likely we conflict with something
# else
test_namespace = f"__test{self.namespace_index}__"
def check_invariants(actual_provenance):
C._dispatch_check_invariants(name)
# Normalize the test namespace so that expected outputs are stable
actual_state = C._dispatch_dump(f"{test_namespace}::{name}").replace(
test_namespace, "test"
)
actual_table = C._dispatch_dump_table(f"{test_namespace}::{name}").replace(
test_namespace, "test"
)
expected_state, expected_table, expected_provenance = results.setdefault(
frozenset(active_ops),
Result(actual_state, actual_table, actual_provenance),
)
self.assertMultiLineEqual(
expected_state,
actual_state,
f"expected from {expected_provenance}; actual from {actual_provenance}",
)
self.assertMultiLineEqual(
expected_table,
actual_table,
f"expected from {expected_provenance}; actual from {actual_provenance}",
)
results.setdefault(frozenset(), Result("", "", "hardcoded initial state"))
check_invariants("initial state")
# In the order specified by ctor_order, run registrations
set_to_report = frozenset(range(len(ops)))
for i, op_ix in enumerate(ctor_order):
# It would be better to DEF here, but because we manage
# lifetime of multiple registrations with multiple Library
# references (refs), we can't deal with the strict checking
# from DEF.
refs[op_ix] = C._dispatch_library("FRAGMENT", test_namespace, "")
active_ops.add(op_ix)
try:
ops[op_ix](refs[op_ix])
check_invariants(f"running ctors {ctor_order[: i + 1]}")
except RuntimeError as e:
if not expect_raises:
raise
actual = str(e).replace(test_namespace, "test")
actual = actual.split("\nException raised from ")[0]
expected, _, expected_provenance = results.setdefault(
frozenset(active_ops),
Result(
actual, "", f"error after running ctors {ctor_order[: i + 1]}"
),
)
self.assertMultiLineEqual(expected, actual, expected_provenance)
set_to_report = frozenset(active_ops)
active_ops.remove(op_ix)
# NB: this finally test asserts that if a registrations fails,
# the dispatcher is left in the same state *that it was before*!
check_invariants(
f"running ctors {ctor_order[:i]} and then failing to run ctor {op_ix} "
"(did this failure leave the dispatcher in a wedged state? "
"it shouldn't!)"
)
break
last_ctor = i
if expect_raises and len(active_ops) == len(ops):
# Destroy references first, as some test frameworks (like pytest)
# will retain references in the exception raised by assertTrue! EW!
refs = None
self.assertTrue(
False,
"expected exception to be raised, but nothing was raised "
f"(after running ctors {ctor_order})",
)
# In the order specified by dtor_order, run deregistrations
for i, op_ix in enumerate(dtor_order):
# Trigger a destruction
refs[op_ix] = None
# discard not remove, since we may not have actually deregistered
# anything if there was an error raised
if expect_raises:
active_ops.discard(op_ix)
else:
active_ops.remove(op_ix)
check_invariants(
f"running ctors {ctor_order[: last_ctor + 1]}, then running dtors {dtor_order[: i + 1]}"
)
return results[set_to_report][0]
# Operator registrations are commutative (as static initializers can
# run in any order) and invertible (by deregistration). (Subject
# to some caveats: some legacy behavior in the system are not commutative--
# we want to get rid of these!)
#
# So while in principle we could simply test a set of operations
# by just running them one by one in the order specified by the user,
# we can get more assurance about these extra properties by doing
# more work:
#
# 1. Don't run the registrations once in a fixed order: run every possible
# permutation. Similarly, run every permutation of deregistration order.
#
# 2. Don't just check the end state of the dispatcher: for every
# subset of operator registrations, ensure that the computed
# intermediate state is path independent. One thing to note:
# in this function, we assume each operation is unique. In general,
# there may be duplicated registrations, but these are usually
# idempotent or legacy. We test for behavior here separately.
#
# NB: checking all permutations means this function is exponential in
# the length of ops! So don't pass too many ops to this function!
def commute(self, name, ops, ctor_order=None, expect_raises=False):
results = {}
def go(ctor_order):
for dtor_order in itertools.permutations(range(len(ops))):
self.run_ops(
name,
ops,
ctor_order,
dtor_order,
results=results,
expect_raises=expect_raises,
)
if ctor_order is not None:
go(ctor_order)
else:
for ctor_order in itertools.permutations(range(len(ops))):
go(ctor_order)
# Return the "full" Result namedtuple after all operations are run.
# If this KeyErrors, that means that there did not exist any
# ordering of ctors which got us to the "end". That's an
# error in test construction: it means you could have
# factored the test into two smaller ones.
return results[frozenset(range(len(ops)))]
def test_def(self):
state = self.commute(
"foo",
[
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("test_def", [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo"),
# m.impl("test_def", kCPU, [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo", dispatch="CPU"),
# m.impl("test_def", kAutograd, [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo", dispatch="Autograd"),
# m.impl("test_def", kAutogradCPU, [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo", dispatch="AutogradCPU"),
],
).state
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
AutogradCPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
def test_def_impl_schema_mismatch(self):
# NB: an impl-impl mismatch is not reported eagerly; you'll find out
# about it because one of them won't match with def
state = self.commute(
"foo",
[
# m.def("foo(Tensor x, Tensor y) -> Tensor")
lambda m: m.def_("foo(Tensor x, Tensor y) -> Tensor"),
# m.impl("foo", [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo"),
],
expect_raises=True,
).state
self.assertExpectedInline(
state,
"""\
Inferred operator schema for a C++ kernel function doesn't match the expected function schema.
operator: test::foo
expected schema: test::foo(Tensor x, Tensor y) -> Tensor
registered at /dev/null:0
inferred schema: (Tensor _0) -> Tensor _0
impl_t_t
reason: The number of arguments is different. 2 vs 1.""",
)
def test_def_with_inference(self):
state = self.commute(
"foo",
[
# m.def("foo", [](const Tensor & x) { return x })
lambda m: m.def_name_t_t("foo"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU"),
# m.impl("foo", torch::kAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "Autograd"),
# m.impl("foo", torch::kAutogradCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "AutogradCPU"),
],
).state
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor _0) -> Tensor _0
debug: registered at /dev/null:0
alias analysis kind: CONSERVATIVE
CPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
AutogradCPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: default_def_name_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
def test_def_only(self):
state = self.commute(
"foo",
[
# m.def("foo(Tensor x, Tensor y) -> Tensor")
lambda m: m.def_("foo(Tensor x, Tensor y) -> Tensor"),
],
).state
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor x, Tensor y) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
""",
)
def test_impl_only(self):
state = self.commute(
"foo",
[
# m.impl("foo", [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo"),
# m.impl("foo", torch::kCPU, [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo", "CPU"),
# m.impl("foo", torch::kAutograd, [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo", "Autograd"),
# m.impl("foo", torch::kAutogradCPU, [](const Tensor& x) { return x })
lambda m: m.impl_t_t("foo", "AutogradCPU"),
],
).state
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: (none)
CPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
AutogradCPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
def test_computed_table(self):
result = self.commute(
"foo",
[
# m.def("foo", [](const Tensor & x) { return x })
lambda m: m.def_name_t_t("foo"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU", debug="fn_cpu"),
# m.impl("foo", torch::kCUDA, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "XLA", debug="fn_xla"),
# m.impl("foo", torch::kAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "Autograd", debug="fn_autograd"),
# m.impl("foo", torch::kAutogradCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "AutogradCPU", debug="fn_autogradcpu"),
],
)
state, table = result.state, result.table
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor _0) -> Tensor _0
debug: registered at /dev/null:0
alias analysis kind: CONSERVATIVE
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
XLA: fn_xla :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
AutogradCPU: fn_autogradcpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: default_def_name_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(
table, dispatch_keys_to_check
)
self.assertExpectedInline(
extracted_table,
"""\
Undefined: default_def_name_t_t [math kernel]
CPU: fn_cpu [kernel]
CUDA: default_def_name_t_t [math kernel]
XLA: fn_xla [kernel]
AutogradOther: default_def_name_t_t [math kernel]
AutogradCPU: fn_autogradcpu [kernel]
AutogradCUDA: default_def_name_t_t [math kernel]
AutogradXLA: fn_autograd [autograd kernel]
""",
)
def test_computed_table_with_cpu_math_autogradcpu_fallthrough(self):
global_m = C._dispatch_library("IMPL", "_", "AutogradCPU")
result = self.commute(
"foo",
[
# m.def("foo", [](const Tensor & x) { return x })
lambda m: m.def_name_t_t("foo"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU"),
],
)
state, table = result.state, result.table
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor _0) -> Tensor _0
debug: registered at /dev/null:0
alias analysis kind: CONSERVATIVE
CPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: default_def_name_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(
table, dispatch_keys_to_check
)
self.assertExpectedInline(
extracted_table,
"""\
Undefined: default_def_name_t_t [math kernel]
CPU: impl_t_t [kernel]
CUDA: default_def_name_t_t [math kernel]
XLA: default_def_name_t_t [math kernel]
AutogradOther: default_def_name_t_t [math kernel]
AutogradCPU: registered in pytorch framework [backend fallback]
AutogradCUDA: default_def_name_t_t [math kernel]
AutogradXLA: default_def_name_t_t [math kernel]
""",
)
def test_computed_table_with_math(self):
global_m = C._dispatch_library("IMPL", "_", "AutogradCPU")
result = self.commute(
"foo",
[
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCompositeImplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CompositeImplicitAutograd"),
],
)
state, table = result.state, result.table
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(
table, dispatch_keys_to_check
)
self.assertExpectedInline(
extracted_table,
"""\
Undefined: impl_t_t [math kernel]
CPU: impl_t_t [math kernel]
CUDA: impl_t_t [math kernel]
XLA: impl_t_t [math kernel]
AutogradOther: impl_t_t [math kernel]
AutogradCPU: impl_t_t [math kernel]
AutogradCUDA: impl_t_t [math kernel]
AutogradXLA: impl_t_t [math kernel]
""",
)
def test_computed_table_with_cpu_math(self):
global_m = C._dispatch_library("IMPL", "_", "AutogradCPU")
result = self.commute(
"foo",
[
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU", debug="fn_cpu"),
# m.impl("foo", torch::kCompositeImplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t(
"foo", "CompositeImplicitAutograd", debug="fn_math"
),
],
)
state, table = result.state, result.table
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(
table, dispatch_keys_to_check
)
self.assertExpectedInline(
extracted_table,
"""\
Undefined: fn_math [math kernel]
CPU: fn_cpu [kernel]
CUDA: fn_math [math kernel]
XLA: fn_math [math kernel]
AutogradOther: fn_math [math kernel]
AutogradCPU: registered in pytorch framework [backend fallback]
AutogradCUDA: fn_math [math kernel]
AutogradXLA: fn_math [math kernel]
""",
)
def test_computed_table_with_autograd(self):
global_m = C._dispatch_library("IMPL", "_", "AutogradCPU")
result = self.commute(
"foo",
[
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "Autograd"),
],
)
state, table = result.state, result.table
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
Autograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(
table, dispatch_keys_to_check
)
self.assertExpectedInline(
extracted_table,
"""\
AutogradOther: impl_t_t [autograd kernel]
AutogradCPU: impl_t_t [autograd kernel]
AutogradCUDA: impl_t_t [autograd kernel]
AutogradXLA: impl_t_t [autograd kernel]
""",
)
# Now that catchAll maps to CompositeImplicitAutograd, registering to both
# catchAll and CompositeImplicitAutograd breaks commutativity.
def test_computed_table_with_cpu_autograd_math(self):
result = self.commute(
"foo",
[
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU", debug="fn_cpu"),
# m.impl("foo", torch::kAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "Autograd", debug="fn_autograd"),
# m.impl("foo", torch::kCompositeImplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t(
"foo", "CompositeImplicitAutograd", debug="fn_math"
),
],
)
state, table = result.state, result.table
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(
table, dispatch_keys_to_check
)
self.assertExpectedInline(
extracted_table,
"""\
Undefined: fn_math [math kernel]
CPU: fn_cpu [kernel]
CUDA: fn_math [math kernel]
XLA: fn_math [math kernel]
AutogradOther: fn_math [math kernel]
AutogradCPU: fn_autograd [autograd kernel]
AutogradCUDA: fn_math [math kernel]
AutogradXLA: fn_math [math kernel]
""",
)
def test_computed_table_with_ambiguous_autogradother(self):
result = self.commute(
"foo",
[
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCompositeImplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t(
"foo", "CompositeImplicitAutograd", debug="fn_math"
),
# m.impl("foo", torch::kFPGA, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "FPGA", debug="fn_fpga"),
],
)
state, table = result.state, result.table
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
FPGA: fn_fpga :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(
table, dispatch_keys_to_check + ("FPGA",)
)
self.assertExpectedInline(
extracted_table,
"""\
Undefined: fn_math [math kernel]
CPU: fn_math [math kernel]
CUDA: fn_math [math kernel]
XLA: fn_math [math kernel]
AutogradOther: ambiguous_autogradother [ambiguous autogradother]
AutogradCPU: fn_math [math kernel]
AutogradCUDA: fn_math [math kernel]
AutogradXLA: fn_math [math kernel]
FPGA: fn_fpga [kernel]
""",
)
def test_computed_table_with_cpu_defaultbackend(self):
result = self.commute(
"foo",
[
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU", debug="fn_cpu"),
# m.impl("foo", torch::kCompositeExplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t(
"foo", "CompositeExplicitAutograd", debug="fn_defaultbackend"
),
],
)
state, table = result.state, result.table
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeExplicitAutograd[alias]: fn_defaultbackend :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(
table, dispatch_keys_to_check
)
self.assertExpectedInline(
extracted_table,
"""\
Undefined: fn_defaultbackend [default backend kernel]
CPU: fn_cpu [kernel]
CUDA: fn_defaultbackend [default backend kernel]
XLA: fn_defaultbackend [default backend kernel]
AutogradOther: registered in pytorch framework [backend fallback]
AutogradCPU: registered in pytorch framework [backend fallback]
AutogradCUDA: registered in pytorch framework [backend fallback]
AutogradXLA: registered in pytorch framework [backend fallback]
""",
)
def test_computed_table_with_cpu_autograd_defaultbackend(self):
result = self.commute(
"foo",
[
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU", debug="fn_cpu"),
# m.impl("foo", torch::kAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "Autograd", debug="fn_autograd"),
# m.impl("foo", torch::kCompositeExplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t(
"foo", "CompositeExplicitAutograd", debug="fn_defaultbackend"
),
],
)
state, table = result.state, result.table
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeExplicitAutograd[alias]: fn_defaultbackend :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(
table, dispatch_keys_to_check + ("FPGA",)
)
self.assertExpectedInline(
extracted_table,
"""\
Undefined: fn_defaultbackend [default backend kernel]
CPU: fn_cpu [kernel]
CUDA: fn_defaultbackend [default backend kernel]
XLA: fn_defaultbackend [default backend kernel]
AutogradOther: fn_autograd [autograd kernel]
AutogradCPU: fn_autograd [autograd kernel]
AutogradCUDA: fn_autograd [autograd kernel]
AutogradXLA: fn_autograd [autograd kernel]
FPGA: fn_defaultbackend [default backend kernel]
""",
)
def test_computed_table_with_cpu_autograd_math_defaultbackend(self):
result = self.commute(
"foo",
[
# m.def("foo(Tensor x) -> Tensor")
lambda m: m.def_("foo(Tensor x) -> Tensor"),
# m.impl("foo", torch::kCPU, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "CPU", debug="fn_cpu"),
# m.impl("foo", torch::kAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t("foo", "Autograd", debug="fn_autograd"),
# m.impl("foo", torch::kCompositeImplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t(
"foo", "CompositeImplicitAutograd", debug="fn_math"
),
# m.impl("foo", torch::kCompositeExplicitAutograd, [](const Tensor & x) { return x })
lambda m: m.impl_t_t(
"foo", "CompositeExplicitAutograd", debug="fn_defaultbackend"
),
],
)
state, table = result.state, result.table
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeExplicitAutograd[alias]: fn_defaultbackend :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
# computed dispatch table is too big, so we only check on a few entries we're interested in.
extracted_table = extract_dispatch_table_with_keys(
table, dispatch_keys_to_check
)
self.assertExpectedInline(
extracted_table,
"""\
Undefined: fn_defaultbackend [default backend kernel]
CPU: fn_cpu [kernel]
CUDA: fn_defaultbackend [default backend kernel]
XLA: fn_defaultbackend [default backend kernel]
AutogradOther: fn_autograd [autograd kernel]
AutogradCPU: fn_autograd [autograd kernel]
AutogradCUDA: fn_autograd [autograd kernel]
AutogradXLA: fn_autograd [autograd kernel]
""",
)
def test_multiple_def_error(self):
ops = [
# m.def("foo(Tensor x, Tensor y) -> Tensor")
lambda m: m.def_("foo(Tensor x, Tensor y) -> Tensor"),
# m.def("foo(Tensor x, Tensor y) -> Tensor")
lambda m: m.def_("foo(Tensor x, Tensor y) -> Tensor"),
]
self.assertExpectedInline(
self.commute("foo", ops, expect_raises=True).state,
"""Tried to register an operator (test::foo(Tensor x, Tensor y) -> Tensor) with the same name and overload """
"""name multiple times. Each overload's schema should only be registered with a single call to def(). """
"""Duplicate registration: registered at /dev/null:0. Original registration: registered at /dev/null:0""",
)
def test_def_with_explicit_alias(self):
state = self.commute(
"foo",
[
# m.def(torch::schema(
# "foo(Tensor x, Tensor y) -> Tensor",
# AliasAnalysisKind::PURE))
lambda m: m.def_(
"foo(Tensor x, Tensor y) -> Tensor", alias="PURE_FUNCTION"
)
],
).state
self.assertExpectedInline(
state,
"""\
name: test::foo
schema: test::foo(Tensor x, Tensor y) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: PURE_FUNCTION
""",
)
def test_multiple_def_alias_defaulting(self):
ops = [
# m.def(torch::schema("foo(Tensor x) -> Tensor",
# c10::AliasAnalysisKind::PURE_FUNCTION))
lambda m: m.def_("foo(Tensor x) -> Tensor", alias="PURE_FUNCTION"),
# RegisterOperators().op("foo(Tensor x) -> Tensor")
lambda m: m.def_legacy("foo(Tensor x) -> Tensor"),
]
self.assertExpectedInline(
self.commute("foo", ops, expect_raises=True).state,
"""Tried to register an operator (test::foo(Tensor x) -> Tensor) with the same name and overload """
"""name multiple times. Each overload's schema should only be registered with a single call to def(). """
"""Duplicate registration: registered at /dev/null:0. Original registration: registered at /dev/null:0""",
)
def test_multiple_def_alias_mismatch(self):
ops = [
# m.def(torch::schema("foo(Tensor x) -> Tensor",
# c10::AliasAnalysisKind::PURE_FUNCTION))
lambda m: m.def_("foo(Tensor x) -> Tensor", alias="PURE_FUNCTION"),
# m.def(torch::schema("foo(Tensor x) -> Tensor",
# c10::AliasAnalysisKind::CONSERVATIVE))
lambda m: m.def_("foo(Tensor x) -> Tensor", alias="CONSERVATIVE"),
]
self.assertExpectedInline(
self.commute("foo", ops, expect_raises=True).state,
"""Tried to register an operator (test::foo(Tensor x) -> Tensor) with the same name and overload """
"""name multiple times. Each overload's schema should only be registered with a single call to def(). """
"""Duplicate registration: registered at /dev/null:0. Original registration: registered at /dev/null:0""",
)
def test_multiple_fallback(self):
global_m = C._dispatch_library("IMPL", "_", "XLA")
global_m.fallback_fallthrough()
try:
global_m.fallback_fallthrough()
except RuntimeError as e:
self.assertExpectedInline(
str(e),
"""Tried to register multiple backend fallbacks for the same dispatch key XLA; previous registration """
"""registered at /dev/null:0, new registration registered at /dev/null:0""",
)
else:
self.assertTrue(False)
def test_overwrite_math(self):
ops = [
lambda m: m.impl_t_t("foo", debug="fn1"),
lambda m: m.impl_t_t("foo", debug="fn2"),
]
# Not commutative
self.assertExpectedInline(
self.commute("foo", ops, ctor_order=(0, 1)).state,
"""\
name: test::foo
schema: (none)
CompositeImplicitAutograd[alias]: fn2 :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias] (inactive): fn1 :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
""",
)
# Definition: a dangling impl happens when someone does an impl() on a
# function but not a def() for it. This is usually a bug, e.g. someone
# misspelled an operator name, or someone registered an impl for an op that
# no longer exists
def test_find_dangling_impls(self):
dangling_impls = C._dispatch_find_dangling_impls()
self.assertEqual(
0,
len(dangling_impls),
msg=f"Expect zero dangling impls, but found: {dangling_impls}",
)
def test_find_dangling_impls_ext(self):
extension_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"cpp_extensions",
"dangling_impl_extension.cpp",
)
module = torch.utils.cpp_extension.load(
name="dangling_impl_extension",
sources=[extension_path],
extra_cflags=["-g"],
verbose=True,
)
impls = C._dispatch_find_dangling_impls()
self.assertEqual(1, len(impls))
self.assertEqual(
f"""\
name: __test::foo
schema: (none)
CPU: registered at {extension_path}:5 :: () -> () [ boxed unboxed ]
""",
impls[0],
)
def test_dispatch_print_registrations_for_dispatch_key_invalid(self):
with self.assertRaisesRegex(
RuntimeError, "could not parse dispatch key: invalid_key"
):
C._dispatch_print_registrations_for_dispatch_key("invalid_key")
| TestDispatch |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/tests/unit_tests/test_checks/test_security.py | {
"start": 3713,
"end": 5281
} | class ____:
def test_fail_when_dockerfile_exists(self, mocker, tmp_path):
# Arrange
connector = mocker.MagicMock(code_directory=tmp_path)
dockerfile = tmp_path / "Dockerfile"
dockerfile.touch()
# Act
result = security.CheckConnectorUsesPythonBaseImage()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert (
result.message
== f"{consts.DOCKERFILE_NAME} file exists. Please remove it and declare the base image in {consts.METADATA_FILE_NAME} file with the `connectorBuildOptions.baseImage` key"
)
def test_fail_when_base_image_is_missing(self, mocker, tmp_path):
# Arrange
connector = mocker.MagicMock(code_directory=tmp_path, metadata={})
# Act
result = security.CheckConnectorUsesPythonBaseImage()._run(connector)
# Assert
assert result.status == CheckStatus.FAILED
assert result.message == f"connectorBuildOptions.baseImage key is missing in {consts.METADATA_FILE_NAME} file"
def test_pass_when_no_dockerfile_and_base_image(self, mocker, tmp_path):
# Arrange
connector = mocker.MagicMock(code_directory=tmp_path, metadata={"connectorBuildOptions": {"baseImage": "test"}})
# Act
result = security.CheckConnectorUsesPythonBaseImage()._run(connector)
# Assert
assert result.status == CheckStatus.PASSED
assert result.message == "Connector uses the Python connector base image"
| TestCheckConnectorUsesPythonBaseImage |
python | django-extensions__django-extensions | tests/management/commands/test_sync_s3.py | {
"start": 8335,
"end": 8979
} | class ____(SyncS3TestsMixin, TestCase):
@override_settings(
AWS_ACCESS_KEY_ID="access_key_id",
AWS_SECRET_ACCESS_KEY="secret_access_key",
AWS_BUCKET_NAME="bucket_name",
)
@patch("sys.stdout", new_callable=StringIO)
def test_should_raise_CommandError_when_medi(self, m_stdout):
self.m_boto.connect_s3.return_value.get_bucket.return_value = Mock()
self.m_boto.s3.key.Key.return_value = "bucket_key"
call_command("sync_s3", "--media-only")
self.assertIn("0 files uploaded.", m_stdout.getvalue())
self.assertIn("0 files skipped.", m_stdout.getvalue())
| SyncS3CommandTests |
python | donnemartin__interactive-coding-challenges | math_probability/check_prime/test_check_prime.py | {
"start": 18,
"end": 536
} | class ____(unittest.TestCase):
def test_check_prime(self):
math = Math()
self.assertRaises(TypeError, math.check_prime, None)
self.assertRaises(TypeError, math.check_prime, 98.6)
self.assertEqual(math.check_prime(0), False)
self.assertEqual(math.check_prime(1), False)
self.assertEqual(math.check_prime(97), True)
print('Success: test_check_prime')
def main():
test = TestMath()
test.test_check_prime()
if __name__ == '__main__':
main()
| TestMath |
python | PyCQA__pydocstyle | src/tests/test_cases/expected.py | {
"start": 0,
"end": 617
} | class ____:
"""Hold expectation for pep257 violations in tests."""
def __init__(self):
self.expected = set()
def expect(self, *args, arg_count=0, func_name=""):
"""Decorator that expects a certain PEP 257 violation."""
# The `arg_count` parameter helps the decorator
# with functions that have positional arguments.
if len(args) == 1:
def decorate(f):
self.expected.add((func_name or f.__name__, args[0]))
f(*[None]*arg_count)
return f
return decorate
self.expected.add(args)
| Expectation |
python | langchain-ai__langchain | libs/langchain/langchain_classic/base_memory.py | {
"start": 702,
"end": 3599
} | class ____(Serializable, ABC):
"""Abstract base class for memory in Chains.
Memory refers to state in Chains. Memory can be used to store information about
past executions of a Chain and inject that information into the inputs of
future executions of the Chain. For example, for conversational Chains Memory
can be used to store conversations and automatically add them to future model
prompts so that the model has the necessary context to respond coherently to
the latest input.
Example:
```python
class SimpleMemory(BaseMemory):
memories: dict[str, Any] = dict()
@property
def memory_variables(self) -> list[str]:
return list(self.memories.keys())
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, str]:
return self.memories
def save_context(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> None:
pass
def clear(self) -> None:
pass
```
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@property
@abstractmethod
def memory_variables(self) -> list[str]:
"""The string keys this memory class will add to chain inputs."""
@abstractmethod
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Return key-value pairs given the text input to the chain.
Args:
inputs: The inputs to the chain.
Returns:
A dictionary of key-value pairs.
"""
async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Async return key-value pairs given the text input to the chain.
Args:
inputs: The inputs to the chain.
Returns:
A dictionary of key-value pairs.
"""
return await run_in_executor(None, self.load_memory_variables, inputs)
@abstractmethod
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
"""Save the context of this chain run to memory.
Args:
inputs: The inputs to the chain.
outputs: The outputs of the chain.
"""
async def asave_context(
self, inputs: dict[str, Any], outputs: dict[str, str]
) -> None:
"""Async save the context of this chain run to memory.
Args:
inputs: The inputs to the chain.
outputs: The outputs of the chain.
"""
await run_in_executor(None, self.save_context, inputs, outputs)
@abstractmethod
def clear(self) -> None:
"""Clear memory contents."""
async def aclear(self) -> None:
"""Async clear memory contents."""
await run_in_executor(None, self.clear)
| BaseMemory |
python | getsentry__sentry | tests/sentry/hybridcloud/tasks/test_deliver_webhooks.py | {
"start": 22067,
"end": 31321
} | class ____(TestCase):
@responses.activate
def test_drain_missing_payload(self) -> None:
drain_mailbox_parallel(99)
assert len(responses.calls) == 0
@responses.activate
def test_drain_unknown_region(self) -> None:
webhook_one = self.create_webhook_payload(
mailbox_name="github:123",
region_name="lolnope",
)
with pytest.raises(RegionResolutionError):
drain_mailbox_parallel(webhook_one.id)
assert len(responses.calls) == 0
@responses.activate
@override_regions(region_config)
def test_drain_success_partial(self) -> None:
responses.add(
responses.POST,
"http://us.testserver/extensions/github/webhook/",
status=200,
body="",
)
responses.add(
responses.POST,
"http://us.testserver/extensions/github/webhook/",
status=500,
body="",
)
records = create_payloads(5, "github:123")
drain_mailbox_parallel(records[0].id)
worker_threads = options.get("hybridcloud.webhookpayload.worker_threads")
# We'll attempt one thread batch, but the second+ will fail
assert len(responses.calls) == worker_threads
# Mailbox should have 4 records left
assert WebhookPayload.objects.count() == 4
# Remaining record should be scheduled to run later.
first = WebhookPayload.objects.order_by("id").first()
assert first
assert first.attempts == 1
assert first.schedule_for > timezone.now()
@responses.activate
@override_regions(region_config)
def test_drain_success(self) -> None:
responses.add(
responses.POST,
"http://us.testserver/extensions/github/webhook/",
status=200,
body="",
)
records = create_payloads(3, "github:123")
drain_mailbox_parallel(records[0].id)
# Mailbox should be empty
assert not WebhookPayload.objects.filter().exists()
@responses.activate
@override_regions(region_config)
def test_drain_time_limit(self) -> None:
responses.add(
responses.POST,
"http://us.testserver/extensions/github/webhook/",
status=200,
body="",
)
records = create_payloads(1, "github:123")
with patch.object(
deliver_webhooks,
"BATCH_SCHEDULE_OFFSET",
new_callable=PropertyMock(return_value=timedelta(minutes=0)),
):
drain_mailbox_parallel(records[0].id)
# Once start time + batch offset is in the past we stop delivery
assert WebhookPayload.objects.count() == 1
@responses.activate
@override_regions(region_config)
def test_drain_discard_old_messages(self) -> None:
responses.add(
responses.POST,
"http://us.testserver/extensions/github/webhook/",
status=200,
body="",
)
records = create_payloads(20, "github:123")
# Make old records
for record in records:
record.date_added = timezone.now() - timedelta(days=4)
record.save()
drain_mailbox_parallel(records[0].id)
# Mailbox should be empty
assert not WebhookPayload.objects.filter().exists()
# No requests sent because records are too old
assert len(responses.calls) == 0
@responses.activate
@override_regions(region_config)
def test_drain_too_many_attempts(self) -> None:
responses.add(
responses.POST,
"http://us.testserver/extensions/github/webhook/",
status=500,
body="",
)
webhook_one = self.create_webhook_payload(
mailbox_name="github:123",
region_name="us",
attempts=MAX_ATTEMPTS,
)
drain_mailbox_parallel(webhook_one.id)
assert not WebhookPayload.objects.filter(id=webhook_one.id).exists()
assert len(responses.calls) == 1
@responses.activate
@override_regions(region_config)
def test_drain_more_than_max_attempts(self) -> None:
webhook_one = self.create_webhook_payload(
mailbox_name="github:123",
region_name="us",
attempts=MAX_ATTEMPTS + 1,
)
drain_mailbox_parallel(webhook_one.id)
assert not WebhookPayload.objects.filter(id=webhook_one.id).exists()
assert len(responses.calls) == 1
@responses.activate
@override_regions(region_config)
def test_drain_fatality(self) -> None:
responses.add(
responses.POST,
"http://us.testserver/extensions/github/webhook/",
# While this specific scenario won't happen, the client libraries could fail
body=ValueError(),
)
webhook_one = self.create_webhook_payload(
mailbox_name="github:123",
region_name="us",
)
with pytest.raises(ValueError):
drain_mailbox_parallel(webhook_one.id)
hook = WebhookPayload.objects.filter(id=webhook_one.id).first()
assert hook
assert hook.attempts == 1
assert hook.schedule_for >= timezone.now()
assert len(responses.calls) == 1
@responses.activate
@override_regions(region_config)
def test_drain_host_error(self) -> None:
responses.add(
responses.POST,
"http://us.testserver/extensions/github/webhook/",
body=ConnectionError(),
)
webhook_one = self.create_webhook_payload(
mailbox_name="github:123",
region_name="us",
)
drain_mailbox_parallel(webhook_one.id)
hook = WebhookPayload.objects.filter(id=webhook_one.id).first()
assert hook
assert len(responses.calls) == 1
@responses.activate
@override_regions(region_config)
def test_drain_conflict(self) -> None:
# Getting a conflict back from the region silo means
# we should drop the hook.
responses.add(
responses.POST,
"http://us.testserver/extensions/github/webhook/",
status=409,
body="",
)
webhook_one = self.create_webhook_payload(
mailbox_name="github:123",
region_name="us",
)
drain_mailbox_parallel(webhook_one.id)
assert not WebhookPayload.objects.filter(id=webhook_one.id).exists()
assert len(responses.calls) == 1
@responses.activate
@override_regions(region_config)
def test_drain_api_error_unauthorized(self) -> None:
responses.add(
responses.POST,
"http://us.testserver/extensions/github/webhook/",
status=401,
body="",
)
webhook_one = self.create_webhook_payload(
mailbox_name="github:123",
region_name="us",
)
drain_mailbox_parallel(webhook_one.id)
hook = WebhookPayload.objects.filter(id=webhook_one.id).first()
# We don't retry 401
assert hook is None
assert len(responses.calls) == 1
@responses.activate
@override_regions(region_config)
def test_drain_api_error_bad_request(self) -> None:
responses.add(
responses.POST,
"http://us.testserver/extensions/github/webhook/",
status=400,
body="",
)
webhook_one = self.create_webhook_payload(
mailbox_name="github:123",
region_name="us",
)
drain_mailbox_parallel(webhook_one.id)
hook = WebhookPayload.objects.filter(id=webhook_one.id).first()
# We don't retry 400
assert hook is None
assert len(responses.calls) == 1
@responses.activate
@override_regions(region_config)
def test_drain_not_found(self) -> None:
responses.add(
responses.POST,
"http://us.testserver/plugins/github/organizations/123/webhook/",
status=404,
body="<html><title>lol nope</title></html>",
)
webhook_one = self.create_webhook_payload(
mailbox_name="plugins:123",
region_name="us",
request_path="/plugins/github/organizations/123/webhook/",
)
drain_mailbox_parallel(webhook_one.id)
# We don't retry if the region 404s
hook = WebhookPayload.objects.filter(id=webhook_one.id).first()
assert hook is None
assert len(responses.calls) == 1
@responses.activate
@override_regions(region_config)
def test_drain_timeout(self) -> None:
responses.add(
responses.POST, "http://us.testserver/extensions/github/webhook/", body=ReadTimeout()
)
webhook_one = self.create_webhook_payload(
mailbox_name="github:123",
region_name="us",
)
drain_mailbox_parallel(webhook_one.id)
hook = WebhookPayload.objects.filter(id=webhook_one.id).first()
assert hook
assert hook.schedule_for > timezone.now()
assert hook.attempts == 1
assert len(responses.calls) == 1
| DrainMailboxParallelTest |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 34205,
"end": 34431
} | class ____(BaseModel, extra="forbid"):
left: "Expression" = Field(..., description="")
right: "Expression" = Field(..., description="")
by_zero_default: Optional[float] = Field(default=None, description="")
| DivParams |
python | pypa__warehouse | warehouse/manage/forms.py | {
"start": 17376,
"end": 18428
} | class ____(wtforms.Form):
__params__ = ["organization"]
organization = wtforms.SelectField(
"Select organization",
choices=[("", "Select organization")],
validators=[
wtforms.validators.InputRequired(message="Select organization"),
],
)
def __init__(self, *args, organization_choices, **kwargs):
super().__init__(*args, **kwargs)
self.organization.choices += [
(
str(org.id),
org.get_billing_status_display(),
)
for org in sorted(organization_choices, key=lambda x: x.name)
]
self.disabled_organizations = [
str(org.id) for org in organization_choices if not org.good_standing
]
def validate_organization(self, field):
if self.organization.data in self.disabled_organizations:
raise wtforms.validators.ValidationError(
_("Cannot transfer to Company Organization with inactive billing")
)
| TransferOrganizationProjectForm |
python | ray-project__ray | python/ray/tests/test_placement_group_3.py | {
"start": 5189,
"end": 23886
} | class ____:
def ready(self):
return True
for bundle_index in range(2):
actor = Actor.options(lifetime="detached",
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg,
placement_group_bundle_index=bundle_index)).remote()
ray.get(actor.ready.remote())
ray.shutdown()
"""
run_string_as_driver(driver_code)
# Wait until the driver is reported as dead by GCS.
def is_job_done():
jobs = ray._private.state.jobs()
for job in jobs:
if job["IsDead"]:
return True
return False
def assert_alive_num_pg(expected_num_pg):
alive_num_pg = 0
for _, placement_group_info in ray.util.placement_group_table().items():
if placement_group_info["state"] == "CREATED":
alive_num_pg += 1
return alive_num_pg == expected_num_pg
def assert_alive_num_actor(expected_num_actor):
alive_num_actor = 0
for actor_info in ray.util.state.list_actors():
if actor_info.state == "ALIVE":
alive_num_actor += 1
return alive_num_actor == expected_num_actor
wait_for_condition(is_job_done)
assert assert_alive_num_pg(1)
assert assert_alive_num_actor(2)
# Make sure detached placement group will alive when its creator which
# is detached actor dead.
# Test actors first.
@ray.remote(num_cpus=1)
class NestedActor:
def ready(self):
return True
@ray.remote(num_cpus=1)
class Actor:
def __init__(self):
self.actors = []
def ready(self):
return True
def schedule_nested_actor_with_detached_pg(self):
# Create placement group which is detached.
pg = ray.util.placement_group(
[{"CPU": 1} for _ in range(2)],
strategy="STRICT_SPREAD",
lifetime="detached",
name="detached_pg",
)
ray.get(pg.ready())
# Schedule nested actor with the placement group.
for bundle_index in range(2):
actor = NestedActor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg, placement_group_bundle_index=bundle_index
),
lifetime="detached",
).remote()
ray.get(actor.ready.remote())
self.actors.append(actor)
a = Actor.options(lifetime="detached").remote()
ray.get(a.ready.remote())
# 1 parent actor and 2 children actor.
ray.get(a.schedule_nested_actor_with_detached_pg.remote())
# Kill an actor and wait until it is killed.
kill_actor_and_wait_for_failure(a)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(a.ready.remote())
# We should have 2 alive pgs and 4 alive actors.
assert assert_alive_num_pg(2)
assert assert_alive_num_actor(4)
def test_named_placement_group(ray_start_cluster):
cluster = ray_start_cluster
for _ in range(2):
cluster.add_node(num_cpus=3)
cluster.wait_for_nodes()
info = ray.init(address=cluster.address, namespace="default_test_namespace")
global_placement_group_name = "named_placement_group"
# Create a detached placement group with name.
driver_code = f"""
import ray
ray.init(address="{info["address"]}", namespace="default_test_namespace")
pg = ray.util.placement_group(
[{{"CPU": 1}} for _ in range(2)],
strategy="STRICT_SPREAD",
name="{global_placement_group_name}",
lifetime="detached")
ray.get(pg.ready())
ray.shutdown()
"""
run_string_as_driver(driver_code)
# Wait until the driver is reported as dead by GCS.
def is_job_done():
jobs = ray._private.state.jobs()
for job in jobs:
if job["IsDead"]:
return True
return False
wait_for_condition(is_job_done)
@ray.remote(num_cpus=1)
class Actor:
def ping(self):
return "pong"
# Get the named placement group and schedule a actor.
placement_group = ray.util.get_placement_group(global_placement_group_name)
assert placement_group is not None
assert placement_group.wait(5)
actor = Actor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group, placement_group_bundle_index=0
)
).remote()
ray.get(actor.ping.remote())
# Create another placement group and make sure its creation will failed.
error_creation_count = 0
try:
ray.util.placement_group(
[{"CPU": 1} for _ in range(2)],
strategy="STRICT_SPREAD",
name=global_placement_group_name,
)
except RaySystemError:
error_creation_count += 1
assert error_creation_count == 1
# Remove a named placement group and make sure the second creation
# will successful.
ray.util.remove_placement_group(placement_group)
same_name_pg = ray.util.placement_group(
[{"CPU": 1} for _ in range(2)],
strategy="STRICT_SPREAD",
name=global_placement_group_name,
)
assert same_name_pg.wait(10)
# Get a named placement group with a name that doesn't exist
# and make sure it will raise ValueError correctly.
error_count = 0
try:
ray.util.get_placement_group("inexistent_pg")
except ValueError:
error_count = error_count + 1
assert error_count == 1
def test_placement_group_synchronous_registration(ray_start_cluster):
cluster = ray_start_cluster
# One node which only has one CPU.
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
# Create a placement group that has two bundles and `STRICT_PACK`
# strategy so its registration will successful but scheduling failed.
placement_group = ray.util.placement_group(
name="name",
strategy="STRICT_PACK",
bundles=[
{
"CPU": 1,
},
{"CPU": 1},
],
)
# Make sure we can properly remove it immediately
# as its registration is synchronous.
ray.util.remove_placement_group(placement_group)
wait_for_condition(lambda: is_placement_group_removed(placement_group))
def test_placement_group_gpu_set(ray_start_cluster):
cluster = ray_start_cluster
# One node which only has one CPU.
cluster.add_node(num_cpus=1, num_gpus=1)
cluster.add_node(num_cpus=1, num_gpus=1)
cluster.wait_for_nodes()
ray.init(address=cluster.address)
placement_group = ray.util.placement_group(
name="name",
strategy="PACK",
bundles=[{"CPU": 1, "GPU": 1}, {"CPU": 1, "GPU": 1}],
)
@ray.remote(num_gpus=1)
def get_gpus():
return ray.get_gpu_ids()
result = get_gpus.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group, placement_group_bundle_index=0
)
).remote()
result = ray.get(result)
assert result == [0]
result = get_gpus.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group, placement_group_bundle_index=1
)
).remote()
result = ray.get(result)
assert result == [0]
def test_placement_group_gpu_assigned(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_gpus=2)
ray.init(address=cluster.address)
gpu_ids_res = set()
@ray.remote(num_gpus=1, num_cpus=0)
def f():
return os.environ["CUDA_VISIBLE_DEVICES"]
pg1 = ray.util.placement_group([{"GPU": 1}])
pg2 = ray.util.placement_group([{"GPU": 1}])
assert pg1.wait(10)
assert pg2.wait(10)
gpu_ids_res.add(
ray.get(
f.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg1
)
).remote()
)
)
gpu_ids_res.add(
ray.get(
f.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg2
)
).remote()
)
)
assert len(gpu_ids_res) == 2
def test_incremental_pg_and_actor_scheduling(ray_start_cluster):
"""Tests that actors in pending PGs are scheduled as resources become available.
For more detailed information please refer to:
https://github.com/ray-project/ray/issues/15801.
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
ray.init(address=cluster.address)
@ray.remote(num_cpus=1)
class A:
def ready(self):
pass
# Schedule a large number of placement groups and actors that should be placed in
# those groups. Initially, none are schedulable.
pgs = [ray.util.placement_group([{"CPU": 1}]) for _ in range(1000)]
pg_refs = [pg.ready() for pg in pgs]
actors = [
A.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote()
for pg in pgs
]
actor_refs = [actor.ready.remote() for actor in actors]
ready_pgs, _ = ray.wait(pg_refs, timeout=0.1)
assert len(ready_pgs) == 0
ready_actors, _ = ray.wait(actor_refs, timeout=0.1)
assert len(ready_actors) == 0
def check_num_refs_ready(refs: List[ObjectRef], expected: int) -> bool:
ready, _ = ray.wait(refs, num_returns=expected, timeout=1)
return len(ready) == expected
# Iteratively add nodes to the cluster so that some of the placement groups (and
# therefore actors) can be scheduled. Verify that the PGs and actors are scheduled
# incrementally as their required resources become available.
for i in range(5):
cluster.add_node(num_cpus=1)
wait_for_condition(lambda: check_num_refs_ready(pg_refs, i + 1), timeout=30)
wait_for_condition(lambda: check_num_refs_ready(actor_refs, i + 1), timeout=30)
def test_placement_group_gpu_unique_assigned(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_gpus=4, num_cpus=4)
ray.init(address=cluster.address)
gpu_ids_res = set()
# Create placement group with 4 bundles using 1 GPU each.
num_gpus = 4
bundles = [{"GPU": 1, "CPU": 1} for _ in range(num_gpus)]
pg = placement_group(bundles)
ray.get(pg.ready())
# Actor using 1 GPU that has a method to get
# $CUDA_VISIBLE_DEVICES env variable.
@ray.remote(num_gpus=1, num_cpus=1)
class Actor:
def get_gpu(self):
import os
return os.environ["CUDA_VISIBLE_DEVICES"]
# Create actors out of order.
actors = []
actors.append(
Actor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg, placement_group_bundle_index=0
)
).remote()
)
actors.append(
Actor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg, placement_group_bundle_index=3
)
).remote()
)
actors.append(
Actor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg, placement_group_bundle_index=2
)
).remote()
)
actors.append(
Actor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg, placement_group_bundle_index=1
)
).remote()
)
for actor in actors:
gpu_ids = ray.get(actor.get_gpu.remote())
assert len(gpu_ids) == 1
gpu_ids_res.add(gpu_ids)
assert len(gpu_ids_res) == 4
@pytest.mark.parametrize("enable_v2", [True, False])
def test_placement_group_status_no_bundle_demand(ray_start_cluster, enable_v2):
reset_autoscaler_v2_enabled_cache()
cluster = ray_start_cluster
cluster.add_node(num_cpus=4, _system_config={"enable_autoscaler_v2": enable_v2})
ray.init(address=cluster.address)
@ray.remote
def f():
pass
pg = ray.util.placement_group([{"CPU": 1}])
ray.get(pg.ready())
ray.util.remove_placement_group(pg)
wait_for_condition(lambda: is_placement_group_removed(pg))
# Create a ready task after the placement group is removed.
# This shouldn't be reported to the resource demand.
r = pg.ready() # noqa
# Wait until the usage is updated, which is
# when the demand is also updated.
def is_usage_updated():
demand_output = get_ray_status_output(cluster.address)
return demand_output["usage"] != ""
wait_for_condition(is_usage_updated)
# The output shouldn't include the pg.ready task demand.
demand_output = get_ray_status_output(cluster.address)
assert demand_output["demand"] == "(no resource demands)"
@pytest.mark.parametrize("enable_v2", [True, False])
def test_placement_group_status(ray_start_cluster, enable_v2):
cluster = ray_start_cluster
cluster.add_node(num_cpus=4, _system_config={"enable_autoscaler_v2": enable_v2})
ray.init(cluster.address)
@ray.remote(num_cpus=1)
class A:
def ready(self):
pass
pg = ray.util.placement_group([{"CPU": 1}])
ray.get(pg.ready())
# Wait until the usage is updated to the expected, which is
# when the demand is also updated.
def is_usage_updated():
demand_output = get_ray_status_output(cluster.address)
cpu_usage = demand_output["usage"]
if cpu_usage == "":
return False
cpu_usage = cpu_usage.split("\n")[0]
expected = "0.0/4.0 CPU (0.0 used of 1.0 reserved in placement groups)"
if cpu_usage != expected:
assert cpu_usage == "0.0/4.0 CPU"
return False
return True
wait_for_condition(is_usage_updated, AUTOSCALER_UPDATE_INTERVAL_S)
# 2 CPU + 1 PG CPU == 3.0/4.0 CPU (1 used by pg)
actors = [A.remote() for _ in range(2)]
actors_in_pg = [
A.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote()
for _ in range(1)
]
ray.get([actor.ready.remote() for actor in actors])
ray.get([actor.ready.remote() for actor in actors_in_pg])
# Wait long enough until the usage is propagated to GCS.
time.sleep(AUTOSCALER_UPDATE_INTERVAL_S)
demand_output = get_ray_status_output(cluster.address)
cpu_usage = demand_output["usage"].split("\n")[0]
expected = "3.0/4.0 CPU (1.0 used of 1.0 reserved in placement groups)"
assert cpu_usage == expected
def test_placement_group_removal_leak_regression(ray_start_cluster):
"""Related issue:
https://github.com/ray-project/ray/issues/19131
"""
cluster = ray_start_cluster
cluster.add_node(num_cpus=5)
ray.init(address=cluster.address)
TOTAL_CPUS = 8
bundles = [{"CPU": 1, "GPU": 1}]
bundles += [{"CPU": 1} for _ in range(TOTAL_CPUS - 1)]
pg = placement_group(bundles, strategy="PACK")
# Here, we simulate that the ready task is queued and
# the new node is up. As soon as the new node is up,
# the ready task is scheduled.
# See https://github.com/ray-project/ray/pull/19138
# for more details about the test.
o = pg.ready()
# Add an artificial delay until the new node is up.
time.sleep(3)
cluster.add_node(num_cpus=5, num_gpus=1)
ray.get(o)
bundle_resource_name = f"bundle_group_{pg.id.hex()}"
expected_bundle_wildcard_val = TOTAL_CPUS * 1000
# This should fail if there's a leakage
# because the bundle resources are never returned properly.
def check_bundle_leaks():
bundle_resources = ray.available_resources()[bundle_resource_name]
return expected_bundle_wildcard_val == bundle_resources
wait_for_condition(check_bundle_leaks)
def test_placement_group_local_resource_view(monkeypatch, ray_start_cluster):
"""Please refer to https://github.com/ray-project/ray/pull/19911
for more details.
"""
with monkeypatch.context() as m:
# Increase broadcasting interval so that node resource will arrive
# at raylet after local resource all being allocated.
m.setenv("RAY_raylet_report_resources_period_milliseconds", "2000")
cluster = ray_start_cluster
cluster.add_node(num_cpus=16, object_store_memory=1e9)
cluster.wait_for_nodes()
# We need to init here so that we can make sure it's connecting to
# the raylet where it only has cpu resources.
# This is a hacky way to prevent scheduling hanging which will
# schedule <CPU:1> job to the node with GPU and for <GPU:1, CPU:1> task
# there is no node has this resource.
ray.init(address="auto")
cluster.add_node(num_cpus=16, num_gpus=1)
cluster.wait_for_nodes()
NUM_CPU_BUNDLES = 30
@ray.remote(num_cpus=1)
class Worker(object):
def __init__(self, i):
self.i = i
def work(self):
time.sleep(0.1)
print("work ", self.i)
@ray.remote(num_cpus=1, num_gpus=1)
class Trainer(object):
def __init__(self, i):
self.i = i
def train(self):
time.sleep(0.2)
print("train ", self.i)
bundles = [{"CPU": 1, "GPU": 1}]
bundles += [{"CPU": 1} for _ in range(NUM_CPU_BUNDLES)]
pg = placement_group(bundles, strategy="PACK")
ray.get(pg.ready())
# Local resource will be allocated and here we are to ensure
# local view is consistent and node resouce updates are discarded
workers = [
Worker.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote(i)
for i in range(NUM_CPU_BUNDLES)
]
trainer = Trainer.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg)
).remote(0)
ray.get([workers[i].work.remote() for i in range(NUM_CPU_BUNDLES)])
ray.get(trainer.train.remote())
def test_fractional_resources_handle_correct(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1000)
ray.init(address=cluster.address)
bundles = [{"CPU": 0.01} for _ in range(5)]
pg = placement_group(bundles, strategy="SPREAD")
ray.get(pg.ready(), timeout=10)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| Actor |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 381435,
"end": 383088
} | class ____(Request):
"""
Set the script requirements for a task
:param task: Task ID
:type task: str
:param requirements: A JSON object containing requirements strings by key
:type requirements: dict
"""
_service = "tasks"
_action = "set_requirements"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": "object",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "requirements"],
"type": "object",
}
def __init__(self, task: str, requirements: dict, **kwargs: Any) -> None:
super(SetRequirementsRequest, self).__init__(**kwargs)
self.task = task
self.requirements = requirements
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("requirements")
def requirements(self) -> dict:
return self._property_requirements
@requirements.setter
def requirements(self, value: dict) -> None:
if value is None:
self._property_requirements = None
return
self.assert_isinstance(value, "requirements", (dict,))
self._property_requirements = value
| SetRequirementsRequest |
python | neetcode-gh__leetcode | python/1029-two-city-scheduling.py | {
"start": 0,
"end": 385
} | class ____:
def twoCitySchedCost(self, costs: List[List[int]]) -> int:
diffs = []
for c1, c2 in costs:
diffs.append([c2 - c1, c1, c2])
diffs.sort()
res = 0
for i in range(len(diffs)):
if i < len(diffs) / 2:
res += diffs[i][2]
else:
res += diffs[i][1]
return res | Solution |
python | scrapy__scrapy | scrapy/contracts/default.py | {
"start": 991,
"end": 1353
} | class ____(Contract):
"""Contract to set metadata arguments for the request.
The value should be JSON-encoded dictionary, e.g.:
@meta {"arg1": "some value"}
"""
name = "meta"
def adjust_request_args(self, args: dict[str, Any]) -> dict[str, Any]:
args["meta"] = json.loads(" ".join(self.args))
return args
| MetadataContract |
python | walkccc__LeetCode | solutions/3248. Snake in Matrix/3248.py | {
"start": 0,
"end": 343
} | class ____:
def finalPositionOfSnake(self, n: int, commands: list[str]) -> int:
directions = {
"UP": (-1, 0),
"RIGHT": (0, 1),
"DOWN": (1, 0),
"LEFT": (0, -1),
}
i = 0
j = 0
for command in commands:
dx, dy = directions[command]
i += dx
j += dy
return i * n + j
| Solution |
python | fastapi__sqlmodel | docs_src/tutorial/code_structure/tutorial001/models.py | {
"start": 306,
"end": 652
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(default=None, foreign_key="team.id")
team: Optional[Team] = Relationship(back_populates="heroes")
| Hero |
python | plotly__plotly.py | plotly/graph_objs/scattermap/_marker.py | {
"start": 233,
"end": 31049
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattermap"
_path_str = "scattermap.marker"
_valid_props = {
"allowoverlap",
"angle",
"anglesrc",
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"colorsrc",
"opacity",
"opacitysrc",
"reversescale",
"showscale",
"size",
"sizemin",
"sizemode",
"sizeref",
"sizesrc",
"symbol",
"symbolsrc",
}
@property
def allowoverlap(self):
"""
Flag to draw all symbols, even if they overlap.
The 'allowoverlap' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["allowoverlap"]
@allowoverlap.setter
def allowoverlap(self, val):
self["allowoverlap"] = val
@property
def angle(self):
"""
Sets the marker orientation from true North, in degrees
clockwise. When using the "auto" default, no rotation would be
applied in perspective views which is different from using a
zero angle.
The 'angle' property is a number and may be specified as:
- An int or float
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["angle"]
@angle.setter
def angle(self, val):
self["angle"] = val
@property
def anglesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `angle`.
The 'anglesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["anglesrc"]
@anglesrc.setter
def anglesrc(self, val):
self["anglesrc"] = val
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color` is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color` is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color` is set to a numerical array. Value should
have the same units as in `marker.color` and if set,
`marker.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color` is set to a numerical array. Value should
have the same units as in `marker.color` and if set,
`marker.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def color(self):
"""
Sets the marker color. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A number that will be interpreted as a color
according to scattermap.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattermap.marker.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.scattermap.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color` is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space, use
`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Blackbody,B
luered,Blues,Cividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic
,Portland,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def opacitysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `opacity`.
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color` is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color` is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizemin(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sizemin"]
@sizemin.setter
def sizemin(self, val):
self["sizemin"] = val
@property
def sizemode(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
"""
return self["sizemode"]
@sizemode.setter
def sizemode(self, val):
self["sizemode"] = val
@property
def sizeref(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["sizeref"]
@sizeref.setter
def sizeref(self, val):
self["sizeref"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def symbol(self):
"""
Sets the marker symbol. Full list: https://www.mapbox.com/maki-
icons/ Note that the array `marker.color` and `marker.size` are
only available for "circle" symbols.
The 'symbol' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
@property
def symbolsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `symbol`.
The 'symbolsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["symbolsrc"]
@symbolsrc.setter
def symbolsrc(self, val):
self["symbolsrc"] = val
@property
def _prop_descriptions(self):
return """\
allowoverlap
Flag to draw all symbols, even if they overlap.
angle
Sets the marker orientation from true North, in degrees
clockwise. When using the "auto" default, no rotation
would be applied in perspective views which is
different from using a zero angle.
anglesrc
Sets the source reference on Chart Studio Cloud for
`angle`.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color` is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color` is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scattermap.marker.ColorBar
` instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color` is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color` is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
symbol
Sets the marker symbol. Full list:
https://www.mapbox.com/maki-icons/ Note that the array
`marker.color` and `marker.size` are only available for
"circle" symbols.
symbolsrc
Sets the source reference on Chart Studio Cloud for
`symbol`.
"""
def __init__(
self,
arg=None,
allowoverlap=None,
angle=None,
anglesrc=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs,
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattermap.Marker`
allowoverlap
Flag to draw all symbols, even if they overlap.
angle
Sets the marker orientation from true North, in degrees
clockwise. When using the "auto" default, no rotation
would be applied in perspective views which is
different from using a zero angle.
anglesrc
Sets the source reference on Chart Studio Cloud for
`angle`.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color` is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color` is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scattermap.marker.ColorBar
` instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color` is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color` is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
symbol
Sets the marker symbol. Full list:
https://www.mapbox.com/maki-icons/ Note that the array
`marker.color` and `marker.size` are only available for
"circle" symbols.
symbolsrc
Sets the source reference on Chart Studio Cloud for
`symbol`.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattermap.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermap.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("allowoverlap", arg, allowoverlap)
self._set_property("angle", arg, angle)
self._set_property("anglesrc", arg, anglesrc)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("color", arg, color)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("opacity", arg, opacity)
self._set_property("opacitysrc", arg, opacitysrc)
self._set_property("reversescale", arg, reversescale)
self._set_property("showscale", arg, showscale)
self._set_property("size", arg, size)
self._set_property("sizemin", arg, sizemin)
self._set_property("sizemode", arg, sizemode)
self._set_property("sizeref", arg, sizeref)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("symbol", arg, symbol)
self._set_property("symbolsrc", arg, symbolsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | networkx__networkx | networkx/linalg/tests/test_laplacian.py | {
"start": 103,
"end": 13953
} | class ____:
@classmethod
def setup_class(cls):
deg = [3, 2, 2, 1, 0]
cls.G = nx.havel_hakimi_graph(deg)
cls.WG = nx.Graph(
(u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.G.edges()
)
cls.WG.add_node(4)
cls.MG = nx.MultiGraph(cls.G)
# Graph with clsloops
cls.Gsl = cls.G.copy()
for node in cls.Gsl.nodes():
cls.Gsl.add_edge(node, node)
# Graph used as an example in Sec. 4.1 of Langville and Meyer,
# "Google's PageRank and Beyond".
cls.DiG = nx.DiGraph()
cls.DiG.add_edges_from(
(
(1, 2),
(1, 3),
(3, 1),
(3, 2),
(3, 5),
(4, 5),
(4, 6),
(5, 4),
(5, 6),
(6, 4),
)
)
cls.DiMG = nx.MultiDiGraph(cls.DiG)
cls.DiWG = nx.DiGraph(
(u, v, {"weight": 0.5, "other": 0.3}) for (u, v) in cls.DiG.edges()
)
cls.DiGsl = cls.DiG.copy()
for node in cls.DiGsl.nodes():
cls.DiGsl.add_edge(node, node)
def test_laplacian(self):
"Graph Laplacian"
# fmt: off
NL = np.array([[ 3, -1, -1, -1, 0],
[-1, 2, -1, 0, 0],
[-1, -1, 2, 0, 0],
[-1, 0, 0, 1, 0],
[ 0, 0, 0, 0, 0]])
# fmt: on
WL = 0.5 * NL
OL = 0.3 * NL
# fmt: off
DiNL = np.array([[ 2, -1, -1, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0],
[-1, -1, 3, -1, 0, 0],
[ 0, 0, 0, 2, -1, -1],
[ 0, 0, 0, -1, 2, -1],
[ 0, 0, 0, 0, -1, 1]])
# fmt: on
DiWL = 0.5 * DiNL
DiOL = 0.3 * DiNL
np.testing.assert_equal(nx.laplacian_matrix(self.G).todense(), NL)
np.testing.assert_equal(nx.laplacian_matrix(self.MG).todense(), NL)
np.testing.assert_equal(
nx.laplacian_matrix(self.G, nodelist=[0, 1]).todense(),
np.array([[1, -1], [-1, 1]]),
)
np.testing.assert_equal(nx.laplacian_matrix(self.WG).todense(), WL)
np.testing.assert_equal(nx.laplacian_matrix(self.WG, weight=None).todense(), NL)
np.testing.assert_equal(
nx.laplacian_matrix(self.WG, weight="other").todense(), OL
)
np.testing.assert_equal(nx.laplacian_matrix(self.DiG).todense(), DiNL)
np.testing.assert_equal(nx.laplacian_matrix(self.DiMG).todense(), DiNL)
np.testing.assert_equal(
nx.laplacian_matrix(self.DiG, nodelist=[1, 2]).todense(),
np.array([[1, -1], [0, 0]]),
)
np.testing.assert_equal(nx.laplacian_matrix(self.DiWG).todense(), DiWL)
np.testing.assert_equal(
nx.laplacian_matrix(self.DiWG, weight=None).todense(), DiNL
)
np.testing.assert_equal(
nx.laplacian_matrix(self.DiWG, weight="other").todense(), DiOL
)
def test_normalized_laplacian(self):
"Generalized Graph Laplacian"
# fmt: off
G = np.array([[ 1. , -0.408, -0.408, -0.577, 0.],
[-0.408, 1. , -0.5 , 0. , 0.],
[-0.408, -0.5 , 1. , 0. , 0.],
[-0.577, 0. , 0. , 1. , 0.],
[ 0. , 0. , 0. , 0. , 0.]])
GL = np.array([[ 1. , -0.408, -0.408, -0.577, 0. ],
[-0.408, 1. , -0.5 , 0. , 0. ],
[-0.408, -0.5 , 1. , 0. , 0. ],
[-0.577, 0. , 0. , 1. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ]])
Lsl = np.array([[ 0.75 , -0.2887, -0.2887, -0.3536, 0. ],
[-0.2887, 0.6667, -0.3333, 0. , 0. ],
[-0.2887, -0.3333, 0.6667, 0. , 0. ],
[-0.3536, 0. , 0. , 0.5 , 0. ],
[ 0. , 0. , 0. , 0. , 0. ]])
DiG = np.array([[ 1. , 0. , -0.4082, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[-0.4082, 0. , 1. , 0. , -0.4082, 0. ],
[ 0. , 0. , 0. , 1. , -0.5 , -0.7071],
[ 0. , 0. , 0. , -0.5 , 1. , -0.7071],
[ 0. , 0. , 0. , -0.7071, 0. , 1. ]])
DiGL = np.array([[ 1. , 0. , -0.4082, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[-0.4082, 0. , 1. , -0.4082, 0. , 0. ],
[ 0. , 0. , 0. , 1. , -0.5 , -0.7071],
[ 0. , 0. , 0. , -0.5 , 1. , -0.7071],
[ 0. , 0. , 0. , 0. , -0.7071, 1. ]])
DiLsl = np.array([[ 0.6667, -0.5774, -0.2887, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. ],
[-0.2887, -0.5 , 0.75 , -0.2887, 0. , 0. ],
[ 0. , 0. , 0. , 0.6667, -0.3333, -0.4082],
[ 0. , 0. , 0. , -0.3333, 0.6667, -0.4082],
[ 0. , 0. , 0. , 0. , -0.4082, 0.5 ]])
# fmt: on
np.testing.assert_almost_equal(
nx.normalized_laplacian_matrix(self.G, nodelist=range(5)).todense(),
G,
decimal=3,
)
np.testing.assert_almost_equal(
nx.normalized_laplacian_matrix(self.G).todense(), GL, decimal=3
)
np.testing.assert_almost_equal(
nx.normalized_laplacian_matrix(self.MG).todense(), GL, decimal=3
)
np.testing.assert_almost_equal(
nx.normalized_laplacian_matrix(self.WG).todense(), GL, decimal=3
)
np.testing.assert_almost_equal(
nx.normalized_laplacian_matrix(self.WG, weight="other").todense(),
GL,
decimal=3,
)
np.testing.assert_almost_equal(
nx.normalized_laplacian_matrix(self.Gsl).todense(), Lsl, decimal=3
)
np.testing.assert_almost_equal(
nx.normalized_laplacian_matrix(
self.DiG,
nodelist=range(1, 1 + 6),
).todense(),
DiG,
decimal=3,
)
np.testing.assert_almost_equal(
nx.normalized_laplacian_matrix(self.DiG).todense(), DiGL, decimal=3
)
np.testing.assert_almost_equal(
nx.normalized_laplacian_matrix(self.DiMG).todense(), DiGL, decimal=3
)
np.testing.assert_almost_equal(
nx.normalized_laplacian_matrix(self.DiWG).todense(), DiGL, decimal=3
)
np.testing.assert_almost_equal(
nx.normalized_laplacian_matrix(self.DiWG, weight="other").todense(),
DiGL,
decimal=3,
)
np.testing.assert_almost_equal(
nx.normalized_laplacian_matrix(self.DiGsl).todense(), DiLsl, decimal=3
)
def test_directed_laplacian():
"Directed Laplacian"
# Graph used as an example in Sec. 4.1 of Langville and Meyer,
# "Google's PageRank and Beyond". The graph contains dangling nodes, so
# the pagerank random walk is selected by directed_laplacian
G = nx.DiGraph()
G.add_edges_from(
(
(1, 2),
(1, 3),
(3, 1),
(3, 2),
(3, 5),
(4, 5),
(4, 6),
(5, 4),
(5, 6),
(6, 4),
)
)
# fmt: off
GL = np.array([[ 0.9833, -0.2941, -0.3882, -0.0291, -0.0231, -0.0261],
[-0.2941, 0.8333, -0.2339, -0.0536, -0.0589, -0.0554],
[-0.3882, -0.2339, 0.9833, -0.0278, -0.0896, -0.0251],
[-0.0291, -0.0536, -0.0278, 0.9833, -0.4878, -0.6675],
[-0.0231, -0.0589, -0.0896, -0.4878, 0.9833, -0.2078],
[-0.0261, -0.0554, -0.0251, -0.6675, -0.2078, 0.9833]])
# fmt: on
L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G))
np.testing.assert_almost_equal(L, GL, decimal=3)
# Make the graph strongly connected, so we can use a random and lazy walk
G.add_edges_from(((2, 5), (6, 1)))
# fmt: off
GL = np.array([[ 1. , -0.3062, -0.4714, 0. , 0. , -0.3227],
[-0.3062, 1. , -0.1443, 0. , -0.3162, 0. ],
[-0.4714, -0.1443, 1. , 0. , -0.0913, 0. ],
[ 0. , 0. , 0. , 1. , -0.5 , -0.5 ],
[ 0. , -0.3162, -0.0913, -0.5 , 1. , -0.25 ],
[-0.3227, 0. , 0. , -0.5 , -0.25 , 1. ]])
# fmt: on
L = nx.directed_laplacian_matrix(
G, alpha=0.9, nodelist=sorted(G), walk_type="random"
)
np.testing.assert_almost_equal(L, GL, decimal=3)
# fmt: off
GL = np.array([[ 0.5 , -0.1531, -0.2357, 0. , 0. , -0.1614],
[-0.1531, 0.5 , -0.0722, 0. , -0.1581, 0. ],
[-0.2357, -0.0722, 0.5 , 0. , -0.0456, 0. ],
[ 0. , 0. , 0. , 0.5 , -0.25 , -0.25 ],
[ 0. , -0.1581, -0.0456, -0.25 , 0.5 , -0.125 ],
[-0.1614, 0. , 0. , -0.25 , -0.125 , 0.5 ]])
# fmt: on
L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G), walk_type="lazy")
np.testing.assert_almost_equal(L, GL, decimal=3)
# Make a strongly connected periodic graph
G = nx.DiGraph()
G.add_edges_from(((1, 2), (2, 4), (4, 1), (1, 3), (3, 4)))
# fmt: off
GL = np.array([[ 0.5 , -0.176, -0.176, -0.25 ],
[-0.176, 0.5 , 0. , -0.176],
[-0.176, 0. , 0.5 , -0.176],
[-0.25 , -0.176, -0.176, 0.5 ]])
# fmt: on
L = nx.directed_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G))
np.testing.assert_almost_equal(L, GL, decimal=3)
def test_directed_combinatorial_laplacian():
"Directed combinatorial Laplacian"
# Graph used as an example in Sec. 4.1 of Langville and Meyer,
# "Google's PageRank and Beyond". The graph contains dangling nodes, so
# the pagerank random walk is selected by directed_laplacian
G = nx.DiGraph()
G.add_edges_from(
(
(1, 2),
(1, 3),
(3, 1),
(3, 2),
(3, 5),
(4, 5),
(4, 6),
(5, 4),
(5, 6),
(6, 4),
)
)
# fmt: off
GL = np.array([[ 0.0366, -0.0132, -0.0153, -0.0034, -0.0020, -0.0027],
[-0.0132, 0.0450, -0.0111, -0.0076, -0.0062, -0.0069],
[-0.0153, -0.0111, 0.0408, -0.0035, -0.0083, -0.0027],
[-0.0034, -0.0076, -0.0035, 0.3688, -0.1356, -0.2187],
[-0.0020, -0.0062, -0.0083, -0.1356, 0.2026, -0.0505],
[-0.0027, -0.0069, -0.0027, -0.2187, -0.0505, 0.2815]])
# fmt: on
L = nx.directed_combinatorial_laplacian_matrix(G, alpha=0.9, nodelist=sorted(G))
np.testing.assert_almost_equal(L, GL, decimal=3)
# Make the graph strongly connected, so we can use a random and lazy walk
G.add_edges_from(((2, 5), (6, 1)))
# fmt: off
GL = np.array([[ 0.1395, -0.0349, -0.0465, 0. , 0. , -0.0581],
[-0.0349, 0.093 , -0.0116, 0. , -0.0465, 0. ],
[-0.0465, -0.0116, 0.0698, 0. , -0.0116, 0. ],
[ 0. , 0. , 0. , 0.2326, -0.1163, -0.1163],
[ 0. , -0.0465, -0.0116, -0.1163, 0.2326, -0.0581],
[-0.0581, 0. , 0. , -0.1163, -0.0581, 0.2326]])
# fmt: on
L = nx.directed_combinatorial_laplacian_matrix(
G, alpha=0.9, nodelist=sorted(G), walk_type="random"
)
np.testing.assert_almost_equal(L, GL, decimal=3)
# fmt: off
GL = np.array([[ 0.0698, -0.0174, -0.0233, 0. , 0. , -0.0291],
[-0.0174, 0.0465, -0.0058, 0. , -0.0233, 0. ],
[-0.0233, -0.0058, 0.0349, 0. , -0.0058, 0. ],
[ 0. , 0. , 0. , 0.1163, -0.0581, -0.0581],
[ 0. , -0.0233, -0.0058, -0.0581, 0.1163, -0.0291],
[-0.0291, 0. , 0. , -0.0581, -0.0291, 0.1163]])
# fmt: on
L = nx.directed_combinatorial_laplacian_matrix(
G, alpha=0.9, nodelist=sorted(G), walk_type="lazy"
)
np.testing.assert_almost_equal(L, GL, decimal=3)
E = nx.DiGraph(nx.margulis_gabber_galil_graph(2))
L = nx.directed_combinatorial_laplacian_matrix(E)
# fmt: off
expected = np.array(
[[ 0.16666667, -0.08333333, -0.08333333, 0. ],
[-0.08333333, 0.16666667, 0. , -0.08333333],
[-0.08333333, 0. , 0.16666667, -0.08333333],
[ 0. , -0.08333333, -0.08333333, 0.16666667]]
)
# fmt: on
np.testing.assert_almost_equal(L, expected, decimal=6)
with pytest.raises(nx.NetworkXError):
nx.directed_combinatorial_laplacian_matrix(G, walk_type="pagerank", alpha=100)
with pytest.raises(nx.NetworkXError):
nx.directed_combinatorial_laplacian_matrix(G, walk_type="silly")
| TestLaplacian |
python | dask__dask | dask/_task_spec.py | {
"start": 27057,
"end": 27119
} | class ____(NestedContainer):
constructor = klass = list
| List |
python | apache__airflow | airflow-core/src/airflow/exceptions.py | {
"start": 4224,
"end": 4330
} | class ____(AirflowNotFoundException):
"""Raise when a DAG is not available in the system."""
| DagNotFound |
python | qdrant__qdrant-client | qdrant_client/qdrant_remote.py | {
"start": 1352,
"end": 109008
} | class ____(QdrantBase):
DEFAULT_GRPC_TIMEOUT = 5 # seconds
DEFAULT_GRPC_POOL_SIZE = 3
def __init__(
self,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[int] = None,
host: Optional[str] = None,
grpc_options: Optional[dict[str, Any]] = None,
auth_token_provider: Optional[
Union[Callable[[], str], Callable[[], Awaitable[str]]]
] = None,
check_compatibility: bool = True,
pool_size: Optional[int] = None,
**kwargs: Any,
):
super().__init__(**kwargs)
self._prefer_grpc = prefer_grpc
self._grpc_port = grpc_port
self._grpc_options = grpc_options or {}
self._https = https if https is not None else api_key is not None
self._scheme = "https" if self._https else "http"
# Pool size to use. This value should not be accessed directly; use _get_grpc_pool_size() instead.
self._pool_size: Optional[int] = None
if pool_size is not None:
pool_size = max(1, pool_size) # Ensure pool_size is always > 0
self._pool_size = pool_size
self._prefix = prefix or ""
if len(self._prefix) > 0 and self._prefix[0] != "/":
self._prefix = f"/{self._prefix}"
if url is not None and host is not None:
raise ValueError(f"Only one of (url, host) can be set. url is {url}, host is {host}")
if host is not None and (host.startswith("http://") or host.startswith("https://")):
raise ValueError(
f"`host` param is not expected to contain protocol (http:// or https://). "
f"Try to use `url` parameter instead."
)
elif url:
if url.startswith("localhost"):
# Handle for a special case when url is localhost:port
# Which is not parsed correctly by urllib
url = f"//{url}"
parsed_url: Url = parse_url(url)
self._host, self._port = parsed_url.host, parsed_url.port
if parsed_url.scheme:
self._https = parsed_url.scheme == "https"
self._scheme = parsed_url.scheme
self._port = self._port if self._port else port
if self._prefix and parsed_url.path:
raise ValueError(
"Prefix can be set either in `url` or in `prefix`. "
f"url is {url}, prefix is {parsed_url.path}"
)
elif parsed_url.path:
self._prefix = parsed_url.path
if self._scheme not in ("http", "https"):
raise ValueError(f"Unknown scheme: {self._scheme}")
else:
self._host = host or "localhost"
self._port = port
_timeout = (
math.ceil(timeout) if timeout is not None else None
) # it has been changed from float to int.
# convert it to the closest greater or equal int value (e.g. 0.5 -> 1)
self._api_key = api_key
self._auth_token_provider = auth_token_provider
limits = kwargs.pop("limits", None)
if limits is None:
if self._host in ["localhost", "127.0.0.1"]:
# Disable keep-alive for local connections
# Cause in some cases, it may cause extra delays
limits = httpx.Limits(max_connections=None, max_keepalive_connections=0)
elif self._pool_size is not None:
# Set http connection pooling to `self._pool_size`, if no limits are specified.
limits = httpx.Limits(max_connections=self._pool_size)
elif self._pool_size is not None:
raise ValueError(
"`pool_size` and `limits` are mutually exclusive. "
f"`pool_size`: {pool_size}, `limit`: {limits}"
)
http2 = kwargs.pop("http2", False)
self._grpc_headers = []
self._rest_headers = {k: v for k, v in kwargs.pop("metadata", {}).items()}
if api_key is not None:
if self._scheme == "http":
show_warning(
message="Api key is used with an insecure connection.",
category=UserWarning,
stacklevel=4,
)
# http2 = True
self._rest_headers["api-key"] = api_key
self._grpc_headers.append(("api-key", api_key))
client_version = importlib.metadata.version("qdrant-client")
python_version = platform.python_version()
user_agent = f"python-client/{client_version} python/{python_version}"
self._rest_headers["User-Agent"] = user_agent
self._grpc_options["grpc.primary_user_agent"] = user_agent
# GRPC Channel-Level Compression
grpc_compression: Optional[Compression] = kwargs.pop("grpc_compression", None)
if grpc_compression is not None and not isinstance(grpc_compression, Compression):
raise TypeError(
f"Expected 'grpc_compression' to be of type "
f"grpc.Compression or None, but got {type(grpc_compression)}"
)
if grpc_compression == Compression.Deflate:
raise ValueError(
"grpc.Compression.Deflate is not supported. Try grpc.Compression.Gzip or grpc.Compression.NoCompression"
)
self._grpc_compression = grpc_compression
address = f"{self._host}:{self._port}" if self._port is not None else self._host
base_url = f"{self._scheme}://{address}"
self.rest_uri = urljoin(base_url, self._prefix)
self._rest_args = {"headers": self._rest_headers, "http2": http2, **kwargs}
if limits is not None:
self._rest_args["limits"] = limits
if _timeout is not None:
self._rest_args["timeout"] = _timeout
self._timeout = _timeout
else:
self._timeout = self.DEFAULT_GRPC_TIMEOUT
if self._auth_token_provider is not None:
if self._scheme == "http":
show_warning(
message="Auth token provider is used with an insecure connection.",
category=UserWarning,
stacklevel=4,
)
bearer_auth = BearerAuth(self._auth_token_provider)
self._rest_args["auth"] = bearer_auth
self.openapi_client: SyncApis[ApiClient] = SyncApis(
host=self.rest_uri,
**self._rest_args,
)
self._grpc_channel_pool: list[grpc.Channel] = []
self._grpc_points_client_pool: Optional[list[grpc.PointsStub]] = None
self._grpc_collections_client_pool: Optional[list[grpc.CollectionsStub]] = None
self._grpc_snapshots_client_pool: Optional[list[grpc.SnapshotsStub]] = None
self._grpc_root_client_pool: Optional[list[grpc.QdrantStub]] = None
self._grpc_client_next_index: int = 0 # The next index to use
self._aio_grpc_points_client: Optional[grpc.PointsStub] = None
self._aio_grpc_collections_client: Optional[grpc.CollectionsStub] = None
self._aio_grpc_snapshots_client: Optional[grpc.SnapshotsStub] = None
self._aio_grpc_root_client: Optional[grpc.QdrantStub] = None
self._closed: bool = False
self.server_version = None
if check_compatibility:
try:
client_version = importlib.metadata.version("qdrant-client")
self.server_version = get_server_version(
self.rest_uri, self._rest_headers, self._rest_args.get("auth")
)
if not self.server_version:
show_warning(
message="Failed to obtain server version. Unable to check client-server compatibility."
" Set check_compatibility=False to skip version check.",
category=UserWarning,
stacklevel=4,
)
elif not is_compatible(client_version, self.server_version):
show_warning(
message=f"Qdrant client version {client_version} is incompatible with server "
f"version {self.server_version}. Major versions should match and minor version difference "
"must not exceed 1. Set check_compatibility=False to skip version check.",
category=UserWarning,
stacklevel=4,
)
except Exception as er:
logging.debug(
f"Unable to get server version: {er}, server version defaults to None"
)
@property
def closed(self) -> bool:
return self._closed
def close(self, grpc_grace: Optional[float] = None, **kwargs: Any) -> None:
if hasattr(self, "_grpc_channel_pool") and len(self._grpc_channel_pool) > 0:
for channel in self._grpc_channel_pool:
try:
channel.close()
except AttributeError:
show_warning(
message="Unable to close grpc_channel. Connection was interrupted on the server side",
category=RuntimeWarning,
stacklevel=4,
)
try:
self.openapi_client.close()
except Exception:
show_warning(
message="Unable to close http connection. Connection was interrupted on the server side",
category=RuntimeWarning,
stacklevel=4,
)
self._closed = True
@staticmethod
def _parse_url(url: str) -> tuple[Optional[str], str, Optional[int], Optional[str]]:
parse_result: Url = parse_url(url)
scheme, host, port, prefix = (
parse_result.scheme,
parse_result.host,
parse_result.port,
parse_result.path,
)
return scheme, host, port, prefix
def _get_grpc_pool_size(self) -> int:
"""
Returns the pool size to use for GRPC connection pool.
This method should be preferred over accessing `self._pool_size` directly as it applies the
default value if no pool_size was provided.
"""
if self._pool_size is not None:
return self._pool_size
else:
return self.DEFAULT_GRPC_POOL_SIZE
def _init_grpc_channel(self) -> None:
if self._closed:
raise RuntimeError("Client was closed. Please create a new QdrantClient instance.")
try:
channel_pool = []
if len(self._grpc_channel_pool) == 0:
for _ in range(self._get_grpc_pool_size()):
channel = get_channel(
host=self._host,
port=self._grpc_port,
ssl=self._https,
metadata=self._grpc_headers,
options=self._grpc_options,
compression=self._grpc_compression,
# sync get_channel does not accept coroutine functions,
# but we can't check type here, since it'll get into async client as well
auth_token_provider=self._auth_token_provider, # type: ignore
)
channel_pool.append(channel)
# Apply the clients late to prevent half-initialized pools if a channel creation fails.
self._grpc_channel_pool = channel_pool
except Exception as e:
raise RuntimeError(f"Error initializing the grpc connection(s): {e}")
def _init_grpc_points_client(self) -> None:
self._init_grpc_channel()
self._grpc_points_client_pool = [
grpc.PointsStub(channel) for channel in self._grpc_channel_pool
]
def _init_grpc_collections_client(self) -> None:
self._init_grpc_channel()
self._grpc_collections_client_pool = [
grpc.CollectionsStub(channel) for channel in self._grpc_channel_pool
]
def _init_grpc_snapshots_client(self) -> None:
self._init_grpc_channel()
self._grpc_snapshots_client_pool = [
grpc.SnapshotsStub(channel) for channel in self._grpc_channel_pool
]
def _init_grpc_root_client(self) -> None:
self._init_grpc_channel()
self._grpc_root_client_pool = [
grpc.QdrantStub(channel) for channel in self._grpc_channel_pool
]
def _next_grpc_client(self) -> int:
current_index = self._grpc_client_next_index
self._grpc_client_next_index = (
self._grpc_client_next_index + 1
) % self._get_grpc_pool_size()
return current_index
@property
def grpc_collections(self) -> grpc.CollectionsStub:
"""gRPC client for collections methods
Returns:
An instance of raw gRPC client, generated from Protobuf
"""
if self._grpc_collections_client_pool is None:
self._init_grpc_collections_client()
assert self._grpc_collections_client_pool is not None
return self._grpc_collections_client_pool[self._next_grpc_client()]
@property
def grpc_points(self) -> grpc.PointsStub:
"""gRPC client for points methods
Returns:
An instance of raw gRPC client, generated from Protobuf
"""
if self._grpc_points_client_pool is None:
self._init_grpc_points_client()
assert self._grpc_points_client_pool is not None
return self._grpc_points_client_pool[self._next_grpc_client()]
@property
def grpc_snapshots(self) -> grpc.SnapshotsStub:
"""gRPC client for snapshots methods
Returns:
An instance of raw gRPC client, generated from Protobuf
"""
if self._grpc_snapshots_client_pool is None:
self._init_grpc_snapshots_client()
assert self._grpc_snapshots_client_pool is not None
return self._grpc_snapshots_client_pool[self._next_grpc_client()]
@property
def grpc_root(self) -> grpc.QdrantStub:
"""gRPC client for info methods
Returns:
An instance of raw gRPC client, generated from Protobuf
"""
if self._grpc_root_client_pool is None:
self._init_grpc_root_client()
assert self._grpc_root_client_pool is not None
return self._grpc_root_client_pool[self._next_grpc_client()]
@property
def rest(self) -> SyncApis[ApiClient]:
"""REST Client
Returns:
An instance of raw REST API client, generated from OpenAPI schema
"""
return self.openapi_client
@property
def http(self) -> SyncApis[ApiClient]:
"""REST Client
Returns:
An instance of raw REST API client, generated from OpenAPI schema
"""
return self.openapi_client
def query_points(
self,
collection_name: str,
query: Union[
types.PointId,
list[float],
list[list[float]],
types.SparseVector,
types.Query,
types.NumpyArray,
types.Document,
types.Image,
types.InferenceObject,
None,
] = None,
using: Optional[str] = None,
prefetch: Union[types.Prefetch, list[types.Prefetch], None] = None,
query_filter: Optional[types.Filter] = None,
search_params: Optional[types.SearchParams] = None,
limit: int = 10,
offset: Optional[int] = None,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
lookup_from: Optional[types.LookupLocation] = None,
consistency: Optional[types.ReadConsistency] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> types.QueryResponse:
if self._prefer_grpc:
if query is not None:
query = RestToGrpc.convert_query(query)
if isinstance(prefetch, models.Prefetch):
prefetch = [RestToGrpc.convert_prefetch_query(prefetch)]
if isinstance(prefetch, list):
prefetch = [
RestToGrpc.convert_prefetch_query(p) if isinstance(p, models.Prefetch) else p
for p in prefetch
]
if isinstance(query_filter, models.Filter):
query_filter = RestToGrpc.convert_filter(model=query_filter)
if isinstance(search_params, models.SearchParams):
search_params = RestToGrpc.convert_search_params(search_params)
if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)):
with_payload = RestToGrpc.convert_with_payload_interface(with_payload)
if isinstance(with_vectors, get_args_subscribed(models.WithVector)):
with_vectors = RestToGrpc.convert_with_vectors(with_vectors)
if isinstance(lookup_from, models.LookupLocation):
lookup_from = RestToGrpc.convert_lookup_location(lookup_from)
if isinstance(consistency, get_args_subscribed(models.ReadConsistency)):
consistency = RestToGrpc.convert_read_consistency(consistency)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
res: grpc.QueryResponse = self.grpc_points.Query(
grpc.QueryPoints(
collection_name=collection_name,
query=query,
prefetch=prefetch,
filter=query_filter,
limit=limit,
offset=offset,
with_vectors=with_vectors,
with_payload=with_payload,
params=search_params,
score_threshold=score_threshold,
using=using,
lookup_from=lookup_from,
timeout=timeout,
shard_key_selector=shard_key_selector,
read_consistency=consistency,
),
timeout=timeout if timeout is not None else self._timeout,
)
scored_points = [GrpcToRest.convert_scored_point(hit) for hit in res.result]
return models.QueryResponse(points=scored_points)
else:
if isinstance(query_filter, grpc.Filter):
query_filter = GrpcToRest.convert_filter(model=query_filter)
if isinstance(search_params, grpc.SearchParams):
search_params = GrpcToRest.convert_search_params(search_params)
if isinstance(with_payload, grpc.WithPayloadSelector):
with_payload = GrpcToRest.convert_with_payload_selector(with_payload)
if isinstance(lookup_from, grpc.LookupLocation):
lookup_from = GrpcToRest.convert_lookup_location(lookup_from)
query_request = models.QueryRequest(
shard_key=shard_key_selector,
prefetch=prefetch,
query=query,
using=using,
filter=query_filter,
params=search_params,
score_threshold=score_threshold,
limit=limit,
offset=offset,
with_vector=with_vectors,
with_payload=with_payload,
lookup_from=lookup_from,
)
query_result = self.http.search_api.query_points(
collection_name=collection_name,
consistency=consistency,
timeout=timeout,
query_request=query_request,
)
result: Optional[models.QueryResponse] = query_result.result
assert result is not None, "Search returned None"
return result
def query_batch_points(
self,
collection_name: str,
requests: Sequence[types.QueryRequest],
consistency: Optional[types.ReadConsistency] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> list[types.QueryResponse]:
if self._prefer_grpc:
requests = [
(
RestToGrpc.convert_query_request(r, collection_name)
if isinstance(r, models.QueryRequest)
else r
)
for r in requests
]
if isinstance(consistency, get_args_subscribed(models.ReadConsistency)):
consistency = RestToGrpc.convert_read_consistency(consistency)
grpc_res: grpc.QueryBatchResponse = self.grpc_points.QueryBatch(
grpc.QueryBatchPoints(
collection_name=collection_name,
query_points=requests,
read_consistency=consistency,
timeout=timeout,
),
timeout=timeout if timeout is not None else self._timeout,
)
return [
models.QueryResponse(
points=[GrpcToRest.convert_scored_point(hit) for hit in r.result]
)
for r in grpc_res.result
]
else:
http_res: Optional[list[models.QueryResponse]] = (
self.http.search_api.query_batch_points(
collection_name=collection_name,
consistency=consistency,
timeout=timeout,
query_request_batch=models.QueryRequestBatch(searches=requests),
).result
)
assert http_res is not None, "Query batch returned None"
return http_res
def query_points_groups(
self,
collection_name: str,
group_by: str,
query: Union[
types.PointId,
list[float],
list[list[float]],
types.SparseVector,
types.Query,
types.NumpyArray,
types.Document,
types.Image,
types.InferenceObject,
None,
] = None,
using: Optional[str] = None,
prefetch: Union[types.Prefetch, list[types.Prefetch], None] = None,
query_filter: Optional[types.Filter] = None,
search_params: Optional[types.SearchParams] = None,
limit: int = 10,
group_size: int = 3,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
with_lookup: Optional[types.WithLookupInterface] = None,
lookup_from: Optional[types.LookupLocation] = None,
consistency: Optional[types.ReadConsistency] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> types.GroupsResult:
if self._prefer_grpc:
if query is not None:
query = RestToGrpc.convert_query(query)
if isinstance(prefetch, models.Prefetch):
prefetch = [RestToGrpc.convert_prefetch_query(prefetch)]
if isinstance(prefetch, list):
prefetch = [
RestToGrpc.convert_prefetch_query(p) if isinstance(p, models.Prefetch) else p
for p in prefetch
]
if isinstance(query_filter, models.Filter):
query_filter = RestToGrpc.convert_filter(model=query_filter)
if isinstance(search_params, models.SearchParams):
search_params = RestToGrpc.convert_search_params(search_params)
if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)):
with_payload = RestToGrpc.convert_with_payload_interface(with_payload)
if isinstance(with_vectors, get_args_subscribed(models.WithVector)):
with_vectors = RestToGrpc.convert_with_vectors(with_vectors)
if isinstance(with_lookup, models.WithLookup):
with_lookup = RestToGrpc.convert_with_lookup(with_lookup)
if isinstance(with_lookup, str):
with_lookup = grpc.WithLookup(collection=with_lookup)
if isinstance(lookup_from, models.LookupLocation):
lookup_from = RestToGrpc.convert_lookup_location(lookup_from)
if isinstance(consistency, get_args_subscribed(models.ReadConsistency)):
consistency = RestToGrpc.convert_read_consistency(consistency)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
result: grpc.QueryGroupsResponse = self.grpc_points.QueryGroups(
grpc.QueryPointGroups(
collection_name=collection_name,
query=query,
prefetch=prefetch,
filter=query_filter,
limit=limit,
with_vectors=with_vectors,
with_payload=with_payload,
params=search_params,
score_threshold=score_threshold,
using=using,
group_by=group_by,
group_size=group_size,
with_lookup=with_lookup,
lookup_from=lookup_from,
timeout=timeout,
shard_key_selector=shard_key_selector,
read_consistency=consistency,
),
timeout=timeout if timeout is not None else self._timeout,
).result
return GrpcToRest.convert_groups_result(result)
else:
if isinstance(query_filter, grpc.Filter):
query_filter = GrpcToRest.convert_filter(model=query_filter)
if isinstance(search_params, grpc.SearchParams):
search_params = GrpcToRest.convert_search_params(search_params)
if isinstance(with_payload, grpc.WithPayloadSelector):
with_payload = GrpcToRest.convert_with_payload_selector(with_payload)
if isinstance(lookup_from, grpc.LookupLocation):
lookup_from = GrpcToRest.convert_lookup_location(lookup_from)
query_request = models.QueryGroupsRequest(
shard_key=shard_key_selector,
prefetch=prefetch,
query=query,
using=using,
filter=query_filter,
params=search_params,
score_threshold=score_threshold,
limit=limit,
group_by=group_by,
group_size=group_size,
with_vector=with_vectors,
with_payload=with_payload,
with_lookup=with_lookup,
lookup_from=lookup_from,
)
query_result = self.http.search_api.query_points_groups(
collection_name=collection_name,
consistency=consistency,
timeout=timeout,
query_groups_request=query_request,
)
assert query_result is not None, "Query points groups API returned None"
return query_result.result
def search_matrix_pairs(
self,
collection_name: str,
query_filter: Optional[types.Filter] = None,
limit: int = 3,
sample: int = 10,
using: Optional[str] = None,
consistency: Optional[types.ReadConsistency] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> types.SearchMatrixPairsResponse:
if self._prefer_grpc:
if isinstance(query_filter, models.Filter):
query_filter = RestToGrpc.convert_filter(model=query_filter)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
if isinstance(consistency, get_args_subscribed(models.ReadConsistency)):
consistency = RestToGrpc.convert_read_consistency(consistency)
response = self.grpc_points.SearchMatrixPairs(
grpc.SearchMatrixPoints(
collection_name=collection_name,
filter=query_filter,
sample=sample,
limit=limit,
using=using,
timeout=timeout,
read_consistency=consistency,
shard_key_selector=shard_key_selector,
),
timeout=timeout if timeout is not None else self._timeout,
)
return GrpcToRest.convert_search_matrix_pairs(response.result)
if isinstance(query_filter, grpc.Filter):
query_filter = GrpcToRest.convert_filter(model=query_filter)
search_matrix_result = self.openapi_client.search_api.search_matrix_pairs(
collection_name=collection_name,
consistency=consistency,
timeout=timeout,
search_matrix_request=models.SearchMatrixRequest(
shard_key=shard_key_selector,
limit=limit,
sample=sample,
using=using,
filter=query_filter,
),
).result
assert search_matrix_result is not None, "Search matrix pairs returned None result"
return search_matrix_result
def search_matrix_offsets(
self,
collection_name: str,
query_filter: Optional[types.Filter] = None,
limit: int = 3,
sample: int = 10,
using: Optional[str] = None,
consistency: Optional[types.ReadConsistency] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> types.SearchMatrixOffsetsResponse:
if self._prefer_grpc:
if isinstance(query_filter, models.Filter):
query_filter = RestToGrpc.convert_filter(model=query_filter)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
if isinstance(consistency, get_args_subscribed(models.ReadConsistency)):
consistency = RestToGrpc.convert_read_consistency(consistency)
response = self.grpc_points.SearchMatrixOffsets(
grpc.SearchMatrixPoints(
collection_name=collection_name,
filter=query_filter,
sample=sample,
limit=limit,
using=using,
timeout=timeout,
read_consistency=consistency,
shard_key_selector=shard_key_selector,
),
timeout=timeout if timeout is not None else self._timeout,
)
return GrpcToRest.convert_search_matrix_offsets(response.result)
if isinstance(query_filter, grpc.Filter):
query_filter = GrpcToRest.convert_filter(model=query_filter)
search_matrix_result = self.openapi_client.search_api.search_matrix_offsets(
collection_name=collection_name,
consistency=consistency,
timeout=timeout,
search_matrix_request=models.SearchMatrixRequest(
shard_key=shard_key_selector,
limit=limit,
sample=sample,
using=using,
filter=query_filter,
),
).result
assert search_matrix_result is not None, "Search matrix offsets returned None result"
return search_matrix_result
def scroll(
self,
collection_name: str,
scroll_filter: Optional[types.Filter] = None,
limit: int = 10,
order_by: Optional[types.OrderBy] = None,
offset: Optional[types.PointId] = None,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
consistency: Optional[types.ReadConsistency] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> tuple[list[types.Record], Optional[types.PointId]]:
if self._prefer_grpc:
if isinstance(offset, get_args_subscribed(models.ExtendedPointId)):
offset = RestToGrpc.convert_extended_point_id(offset)
if isinstance(scroll_filter, models.Filter):
scroll_filter = RestToGrpc.convert_filter(model=scroll_filter)
if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)):
with_payload = RestToGrpc.convert_with_payload_interface(with_payload)
if isinstance(with_vectors, get_args_subscribed(models.WithVector)):
with_vectors = RestToGrpc.convert_with_vectors(with_vectors)
if isinstance(consistency, get_args_subscribed(models.ReadConsistency)):
consistency = RestToGrpc.convert_read_consistency(consistency)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
if isinstance(order_by, get_args_subscribed(models.OrderByInterface)):
order_by = RestToGrpc.convert_order_by_interface(order_by)
res: grpc.ScrollResponse = self.grpc_points.Scroll(
grpc.ScrollPoints(
collection_name=collection_name,
filter=scroll_filter,
order_by=order_by,
offset=offset,
with_vectors=with_vectors,
with_payload=with_payload,
limit=limit,
read_consistency=consistency,
shard_key_selector=shard_key_selector,
timeout=timeout,
),
timeout=timeout if timeout is not None else self._timeout,
)
return [GrpcToRest.convert_retrieved_point(point) for point in res.result], (
GrpcToRest.convert_point_id(res.next_page_offset)
if res.HasField("next_page_offset")
else None
)
else:
if isinstance(offset, grpc.PointId):
offset = GrpcToRest.convert_point_id(offset)
if isinstance(scroll_filter, grpc.Filter):
scroll_filter = GrpcToRest.convert_filter(model=scroll_filter)
if isinstance(order_by, grpc.OrderBy):
order_by = GrpcToRest.convert_order_by(order_by)
if isinstance(with_payload, grpc.WithPayloadSelector):
with_payload = GrpcToRest.convert_with_payload_selector(with_payload)
scroll_result: Optional[models.ScrollResult] = (
self.openapi_client.points_api.scroll_points(
collection_name=collection_name,
consistency=consistency,
scroll_request=models.ScrollRequest(
filter=scroll_filter,
limit=limit,
order_by=order_by,
offset=offset,
with_payload=with_payload,
with_vector=with_vectors,
shard_key=shard_key_selector,
),
timeout=timeout,
).result
)
assert scroll_result is not None, "Scroll points API returned None result"
return scroll_result.points, scroll_result.next_page_offset
def count(
self,
collection_name: str,
count_filter: Optional[types.Filter] = None,
exact: bool = True,
shard_key_selector: Optional[types.ShardKeySelector] = None,
timeout: Optional[int] = None,
consistency: Optional[types.ReadConsistency] = None,
**kwargs: Any,
) -> types.CountResult:
if self._prefer_grpc:
if isinstance(count_filter, models.Filter):
count_filter = RestToGrpc.convert_filter(model=count_filter)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
if isinstance(consistency, get_args_subscribed(models.ReadConsistency)):
consistency = RestToGrpc.convert_read_consistency(consistency)
response = self.grpc_points.Count(
grpc.CountPoints(
collection_name=collection_name,
filter=count_filter,
exact=exact,
shard_key_selector=shard_key_selector,
timeout=timeout,
read_consistency=consistency,
),
timeout=timeout if timeout is not None else self._timeout,
).result
return GrpcToRest.convert_count_result(response)
if isinstance(count_filter, grpc.Filter):
count_filter = GrpcToRest.convert_filter(model=count_filter)
count_result = self.openapi_client.points_api.count_points(
collection_name=collection_name,
count_request=models.CountRequest(
filter=count_filter,
exact=exact,
shard_key=shard_key_selector,
),
consistency=consistency,
timeout=timeout,
).result
assert count_result is not None, "Count points returned None result"
return count_result
def facet(
self,
collection_name: str,
key: str,
facet_filter: Optional[types.Filter] = None,
limit: int = 10,
exact: bool = False,
timeout: Optional[int] = None,
consistency: Optional[types.ReadConsistency] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
**kwargs: Any,
) -> types.FacetResponse:
if self._prefer_grpc:
if isinstance(facet_filter, models.Filter):
facet_filter = RestToGrpc.convert_filter(model=facet_filter)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
if isinstance(consistency, get_args_subscribed(models.ReadConsistency)):
consistency = RestToGrpc.convert_read_consistency(consistency)
response = self.grpc_points.Facet(
grpc.FacetCounts(
collection_name=collection_name,
key=key,
filter=facet_filter,
limit=limit,
exact=exact,
timeout=timeout,
read_consistency=consistency,
shard_key_selector=shard_key_selector,
),
timeout=timeout if timeout is not None else self._timeout,
)
return types.FacetResponse(
hits=[GrpcToRest.convert_facet_value_hit(hit) for hit in response.hits]
)
if isinstance(facet_filter, grpc.Filter):
facet_filter = GrpcToRest.convert_filter(model=facet_filter)
facet_result = self.openapi_client.points_api.facet(
collection_name=collection_name,
consistency=consistency,
timeout=timeout,
facet_request=models.FacetRequest(
shard_key=shard_key_selector,
key=key,
limit=limit,
filter=facet_filter,
exact=exact,
),
).result
assert facet_result is not None, "Facet points returned None result"
return facet_result
def upsert(
self,
collection_name: str,
points: types.Points,
wait: bool = True,
ordering: Optional[types.WriteOrdering] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
update_filter: Optional[types.Filter] = None,
**kwargs: Any,
) -> types.UpdateResult:
if self._prefer_grpc:
if isinstance(points, models.Batch):
vectors_batch: list[grpc.Vectors] = RestToGrpc.convert_batch_vector_struct(
points.vectors, len(points.ids)
)
points = [
grpc.PointStruct(
id=RestToGrpc.convert_extended_point_id(points.ids[idx]),
vectors=vectors_batch[idx],
payload=(
RestToGrpc.convert_payload(points.payloads[idx])
if points.payloads is not None
else None
),
)
for idx in range(len(points.ids))
]
if isinstance(points, list):
points = [
(
RestToGrpc.convert_point_struct(point)
if isinstance(point, models.PointStruct)
else point
)
for point in points
]
if isinstance(ordering, models.WriteOrdering):
ordering = RestToGrpc.convert_write_ordering(ordering)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
if isinstance(update_filter, models.Filter):
update_filter = RestToGrpc.convert_filter(model=update_filter)
grpc_result = self.grpc_points.Upsert(
grpc.UpsertPoints(
collection_name=collection_name,
wait=wait,
points=points,
ordering=ordering,
shard_key_selector=shard_key_selector,
update_filter=update_filter,
),
timeout=self._timeout,
).result
assert grpc_result is not None, "Upsert returned None result"
return GrpcToRest.convert_update_result(grpc_result)
else:
if isinstance(update_filter, grpc.Filter):
update_filter = GrpcToRest.convert_filter(model=update_filter)
if isinstance(points, list):
points = [
(
GrpcToRest.convert_point_struct(point)
if isinstance(point, grpc.PointStruct)
else point
)
for point in points
]
points = models.PointsList(
points=points, shard_key=shard_key_selector, update_filter=update_filter
)
if isinstance(points, models.Batch):
points = models.PointsBatch(
batch=points, shard_key=shard_key_selector, update_filter=update_filter
)
http_result = self.openapi_client.points_api.upsert_points(
collection_name=collection_name,
wait=wait,
point_insert_operations=points,
ordering=ordering,
).result
assert http_result is not None, "Upsert returned None result"
return http_result
def update_vectors(
self,
collection_name: str,
points: Sequence[types.PointVectors],
wait: bool = True,
ordering: Optional[types.WriteOrdering] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
update_filter: Optional[types.Filter] = None,
**kwargs: Any,
) -> types.UpdateResult:
if self._prefer_grpc:
points = [RestToGrpc.convert_point_vectors(point) for point in points]
if isinstance(ordering, models.WriteOrdering):
ordering = RestToGrpc.convert_write_ordering(ordering)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
if isinstance(update_filter, models.Filter):
update_filter = RestToGrpc.convert_filter(model=update_filter)
grpc_result = self.grpc_points.UpdateVectors(
grpc.UpdatePointVectors(
collection_name=collection_name,
wait=wait,
points=points,
ordering=ordering,
shard_key_selector=shard_key_selector,
update_filter=update_filter,
),
timeout=self._timeout,
).result
assert grpc_result is not None, "Upsert returned None result"
return GrpcToRest.convert_update_result(grpc_result)
else:
if isinstance(update_filter, grpc.Filter):
update_filter = GrpcToRest.convert_filter(model=update_filter)
return self.openapi_client.points_api.update_vectors(
collection_name=collection_name,
wait=wait,
update_vectors=models.UpdateVectors(
points=points,
shard_key=shard_key_selector,
update_filter=update_filter,
),
ordering=ordering,
).result
def delete_vectors(
self,
collection_name: str,
vectors: Sequence[str],
points: types.PointsSelector,
wait: bool = True,
ordering: Optional[types.WriteOrdering] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
**kwargs: Any,
) -> types.UpdateResult:
if self._prefer_grpc:
points_selector, opt_shard_key_selector = self._try_argument_to_grpc_selector(points)
shard_key_selector = shard_key_selector or opt_shard_key_selector
if isinstance(ordering, models.WriteOrdering):
ordering = RestToGrpc.convert_write_ordering(ordering)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
grpc_result = self.grpc_points.DeleteVectors(
grpc.DeletePointVectors(
collection_name=collection_name,
wait=wait,
vectors=grpc.VectorsSelector(
names=vectors,
),
points_selector=points_selector,
ordering=ordering,
shard_key_selector=shard_key_selector,
),
timeout=self._timeout,
).result
assert grpc_result is not None, "Delete vectors returned None result"
return GrpcToRest.convert_update_result(grpc_result)
else:
_points, _filter = self._try_argument_to_rest_points_and_filter(points)
return self.openapi_client.points_api.delete_vectors(
collection_name=collection_name,
wait=wait,
ordering=ordering,
delete_vectors=construct(
models.DeleteVectors,
vector=vectors,
points=_points,
filter=_filter,
shard_key=shard_key_selector,
),
).result
def retrieve(
self,
collection_name: str,
ids: Sequence[types.PointId],
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
consistency: Optional[types.ReadConsistency] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> list[types.Record]:
if self._prefer_grpc:
if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)):
with_payload = RestToGrpc.convert_with_payload_interface(with_payload)
ids = [
(
RestToGrpc.convert_extended_point_id(idx)
if isinstance(idx, get_args_subscribed(models.ExtendedPointId))
else idx
)
for idx in ids
]
with_vectors = RestToGrpc.convert_with_vectors(with_vectors)
if isinstance(consistency, get_args_subscribed(models.ReadConsistency)):
consistency = RestToGrpc.convert_read_consistency(consistency)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
result = self.grpc_points.Get(
grpc.GetPoints(
collection_name=collection_name,
ids=ids,
with_payload=with_payload,
with_vectors=with_vectors,
read_consistency=consistency,
shard_key_selector=shard_key_selector,
timeout=timeout,
),
timeout=timeout if timeout is not None else self._timeout,
).result
assert result is not None, "Retrieve returned None result"
return [GrpcToRest.convert_retrieved_point(record) for record in result]
else:
if isinstance(with_payload, grpc.WithPayloadSelector):
with_payload = GrpcToRest.convert_with_payload_selector(with_payload)
ids = [
(GrpcToRest.convert_point_id(idx) if isinstance(idx, grpc.PointId) else idx)
for idx in ids
]
http_result = self.openapi_client.points_api.get_points(
collection_name=collection_name,
consistency=consistency,
point_request=models.PointRequest(
ids=ids,
with_payload=with_payload,
with_vector=with_vectors,
shard_key=shard_key_selector,
),
timeout=timeout,
).result
assert http_result is not None, "Retrieve API returned None result"
return http_result
@classmethod
def _try_argument_to_grpc_selector(
cls, points: types.PointsSelector
) -> tuple[grpc.PointsSelector, Optional[grpc.ShardKeySelector]]:
shard_key_selector = None
if isinstance(points, list):
points_selector = grpc.PointsSelector(
points=grpc.PointsIdsList(
ids=[
(
RestToGrpc.convert_extended_point_id(idx)
if isinstance(idx, get_args_subscribed(models.ExtendedPointId))
else idx
)
for idx in points
]
)
)
elif isinstance(points, grpc.PointsSelector):
points_selector = points
elif isinstance(points, get_args(models.PointsSelector)):
if points.shard_key is not None:
shard_key_selector = RestToGrpc.convert_shard_key_selector(points.shard_key)
points_selector = RestToGrpc.convert_points_selector(points)
elif isinstance(points, models.Filter):
points_selector = RestToGrpc.convert_points_selector(
construct(models.FilterSelector, filter=points)
)
elif isinstance(points, grpc.Filter):
points_selector = grpc.PointsSelector(filter=points)
else:
raise ValueError(f"Unsupported points selector type: {type(points)}")
return points_selector, shard_key_selector
@classmethod
def _try_argument_to_rest_selector(
cls,
points: types.PointsSelector,
shard_key_selector: Optional[types.ShardKeySelector],
) -> models.PointsSelector:
if isinstance(points, list):
_points = [
(GrpcToRest.convert_point_id(idx) if isinstance(idx, grpc.PointId) else idx)
for idx in points
]
points_selector = construct(
models.PointIdsList,
points=_points,
shard_key=shard_key_selector,
)
elif isinstance(points, grpc.PointsSelector):
points_selector = GrpcToRest.convert_points_selector(points)
points_selector.shard_key = shard_key_selector
elif isinstance(points, get_args(models.PointsSelector)):
points_selector = points
points_selector.shard_key = shard_key_selector
elif isinstance(points, models.Filter):
points_selector = construct(
models.FilterSelector, filter=points, shard_key=shard_key_selector
)
elif isinstance(points, grpc.Filter):
points_selector = construct(
models.FilterSelector,
filter=GrpcToRest.convert_filter(points),
shard_key=shard_key_selector,
)
else:
raise ValueError(f"Unsupported points selector type: {type(points)}")
return points_selector
@classmethod
def _points_selector_to_points_list(
cls, points_selector: grpc.PointsSelector
) -> list[grpc.PointId]:
name = points_selector.WhichOneof("points_selector_one_of")
if name is None:
return []
val = getattr(points_selector, name)
if name == "points":
return list(val.ids)
return []
@classmethod
def _try_argument_to_rest_points_and_filter(
cls, points: types.PointsSelector
) -> tuple[Optional[list[models.ExtendedPointId]], Optional[models.Filter]]:
_points = None
_filter = None
if isinstance(points, list):
_points = [
(GrpcToRest.convert_point_id(idx) if isinstance(idx, grpc.PointId) else idx)
for idx in points
]
elif isinstance(points, grpc.PointsSelector):
selector = GrpcToRest.convert_points_selector(points)
if isinstance(selector, models.PointIdsList):
_points = selector.points
elif isinstance(selector, models.FilterSelector):
_filter = selector.filter
elif isinstance(points, models.PointIdsList):
_points = points.points
elif isinstance(points, models.FilterSelector):
_filter = points.filter
elif isinstance(points, models.Filter):
_filter = points
elif isinstance(points, grpc.Filter):
_filter = GrpcToRest.convert_filter(points)
else:
raise ValueError(f"Unsupported points selector type: {type(points)}")
return _points, _filter
def delete(
self,
collection_name: str,
points_selector: types.PointsSelector,
wait: bool = True,
ordering: Optional[types.WriteOrdering] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
**kwargs: Any,
) -> types.UpdateResult:
if self._prefer_grpc:
points_selector, opt_shard_key_selector = self._try_argument_to_grpc_selector(
points_selector
)
shard_key_selector = shard_key_selector or opt_shard_key_selector
if isinstance(ordering, models.WriteOrdering):
ordering = RestToGrpc.convert_write_ordering(ordering)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
return GrpcToRest.convert_update_result(
self.grpc_points.Delete(
grpc.DeletePoints(
collection_name=collection_name,
wait=wait,
points=points_selector,
ordering=ordering,
shard_key_selector=shard_key_selector,
),
timeout=self._timeout,
).result
)
else:
points_selector = self._try_argument_to_rest_selector(
points_selector, shard_key_selector
)
result: Optional[types.UpdateResult] = self.openapi_client.points_api.delete_points(
collection_name=collection_name,
wait=wait,
points_selector=points_selector,
ordering=ordering,
).result
assert result is not None, "Delete points returned None"
return result
def set_payload(
self,
collection_name: str,
payload: types.Payload,
points: types.PointsSelector,
key: Optional[str] = None,
wait: bool = True,
ordering: Optional[types.WriteOrdering] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
**kwargs: Any,
) -> types.UpdateResult:
if self._prefer_grpc:
points_selector, opt_shard_key_selector = self._try_argument_to_grpc_selector(points)
shard_key_selector = shard_key_selector or opt_shard_key_selector
if isinstance(ordering, models.WriteOrdering):
ordering = RestToGrpc.convert_write_ordering(ordering)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
return GrpcToRest.convert_update_result(
self.grpc_points.SetPayload(
grpc.SetPayloadPoints(
collection_name=collection_name,
wait=wait,
payload=RestToGrpc.convert_payload(payload),
points_selector=points_selector,
ordering=ordering,
shard_key_selector=shard_key_selector,
key=key,
),
timeout=self._timeout,
).result
)
else:
_points, _filter = self._try_argument_to_rest_points_and_filter(points)
result: Optional[types.UpdateResult] = self.openapi_client.points_api.set_payload(
collection_name=collection_name,
wait=wait,
ordering=ordering,
set_payload=models.SetPayload(
payload=payload,
points=_points,
filter=_filter,
shard_key=shard_key_selector,
key=key,
),
).result
assert result is not None, "Set payload returned None"
return result
def overwrite_payload(
self,
collection_name: str,
payload: types.Payload,
points: types.PointsSelector,
wait: bool = True,
ordering: Optional[types.WriteOrdering] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
**kwargs: Any,
) -> types.UpdateResult:
if self._prefer_grpc:
points_selector, opt_shard_key_selector = self._try_argument_to_grpc_selector(points)
shard_key_selector = shard_key_selector or opt_shard_key_selector
if isinstance(ordering, models.WriteOrdering):
ordering = RestToGrpc.convert_write_ordering(ordering)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
return GrpcToRest.convert_update_result(
self.grpc_points.OverwritePayload(
grpc.SetPayloadPoints(
collection_name=collection_name,
wait=wait,
payload=RestToGrpc.convert_payload(payload),
points_selector=points_selector,
ordering=ordering,
shard_key_selector=shard_key_selector,
),
timeout=self._timeout,
).result
)
else:
_points, _filter = self._try_argument_to_rest_points_and_filter(points)
result: Optional[types.UpdateResult] = (
self.openapi_client.points_api.overwrite_payload(
collection_name=collection_name,
wait=wait,
ordering=ordering,
set_payload=models.SetPayload(
payload=payload,
points=_points,
filter=_filter,
shard_key=shard_key_selector,
),
).result
)
assert result is not None, "Overwrite payload returned None"
return result
def delete_payload(
self,
collection_name: str,
keys: Sequence[str],
points: types.PointsSelector,
wait: bool = True,
ordering: Optional[types.WriteOrdering] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
**kwargs: Any,
) -> types.UpdateResult:
if self._prefer_grpc:
points_selector, opt_shard_key_selector = self._try_argument_to_grpc_selector(points)
shard_key_selector = shard_key_selector or opt_shard_key_selector
if isinstance(ordering, models.WriteOrdering):
ordering = RestToGrpc.convert_write_ordering(ordering)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
return GrpcToRest.convert_update_result(
self.grpc_points.DeletePayload(
grpc.DeletePayloadPoints(
collection_name=collection_name,
wait=wait,
keys=keys,
points_selector=points_selector,
ordering=ordering,
shard_key_selector=shard_key_selector,
),
timeout=self._timeout,
).result
)
else:
_points, _filter = self._try_argument_to_rest_points_and_filter(points)
result: Optional[types.UpdateResult] = self.openapi_client.points_api.delete_payload(
collection_name=collection_name,
wait=wait,
ordering=ordering,
delete_payload=models.DeletePayload(
keys=keys,
points=_points,
filter=_filter,
shard_key=shard_key_selector,
),
).result
assert result is not None, "Delete payload returned None"
return result
def clear_payload(
self,
collection_name: str,
points_selector: types.PointsSelector,
wait: bool = True,
ordering: Optional[types.WriteOrdering] = None,
shard_key_selector: Optional[types.ShardKeySelector] = None,
**kwargs: Any,
) -> types.UpdateResult:
if self._prefer_grpc:
points_selector, opt_shard_key_selector = self._try_argument_to_grpc_selector(
points_selector
)
shard_key_selector = shard_key_selector or opt_shard_key_selector
if isinstance(ordering, models.WriteOrdering):
ordering = RestToGrpc.convert_write_ordering(ordering)
if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)):
shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector)
return GrpcToRest.convert_update_result(
self.grpc_points.ClearPayload(
grpc.ClearPayloadPoints(
collection_name=collection_name,
wait=wait,
points=points_selector,
ordering=ordering,
shard_key_selector=shard_key_selector,
),
timeout=self._timeout,
).result
)
else:
points_selector = self._try_argument_to_rest_selector(
points_selector, shard_key_selector
)
result: Optional[types.UpdateResult] = self.openapi_client.points_api.clear_payload(
collection_name=collection_name,
wait=wait,
ordering=ordering,
points_selector=points_selector,
).result
assert result is not None, "Clear payload returned None"
return result
def batch_update_points(
self,
collection_name: str,
update_operations: Sequence[types.UpdateOperation],
wait: bool = True,
ordering: Optional[types.WriteOrdering] = None,
**kwargs: Any,
) -> list[types.UpdateResult]:
if self._prefer_grpc:
update_operations = [
RestToGrpc.convert_update_operation(operation) for operation in update_operations
]
if isinstance(ordering, models.WriteOrdering):
ordering = RestToGrpc.convert_write_ordering(ordering)
return [
GrpcToRest.convert_update_result(result)
for result in self.grpc_points.UpdateBatch(
grpc.UpdateBatchPoints(
collection_name=collection_name,
wait=wait,
operations=update_operations,
ordering=ordering,
),
timeout=self._timeout,
).result
]
else:
result: Optional[list[types.UpdateResult]] = (
self.openapi_client.points_api.batch_update(
collection_name=collection_name,
wait=wait,
ordering=ordering,
update_operations=models.UpdateOperations(operations=update_operations),
).result
)
assert result is not None, "Batch update points returned None"
return result
def update_collection_aliases(
self,
change_aliases_operations: Sequence[types.AliasOperations],
timeout: Optional[int] = None,
**kwargs: Any,
) -> bool:
if self._prefer_grpc:
change_aliases_operation = [
(
RestToGrpc.convert_alias_operations(operation)
if not isinstance(operation, grpc.AliasOperations)
else operation
)
for operation in change_aliases_operations
]
return self.grpc_collections.UpdateAliases(
grpc.ChangeAliases(
timeout=timeout,
actions=change_aliases_operation,
),
timeout=timeout if timeout is not None else self._timeout,
).result
change_aliases_operation = [
(
GrpcToRest.convert_alias_operations(operation)
if isinstance(operation, grpc.AliasOperations)
else operation
)
for operation in change_aliases_operations
]
result: Optional[bool] = self.http.aliases_api.update_aliases(
timeout=timeout,
change_aliases_operation=models.ChangeAliasesOperation(
actions=change_aliases_operation
),
).result
assert result is not None, "Update aliases returned None"
return result
def get_collection_aliases(
self, collection_name: str, **kwargs: Any
) -> types.CollectionsAliasesResponse:
if self._prefer_grpc:
response = self.grpc_collections.ListCollectionAliases(
grpc.ListCollectionAliasesRequest(collection_name=collection_name),
timeout=self._timeout,
).aliases
return types.CollectionsAliasesResponse(
aliases=[
GrpcToRest.convert_alias_description(description) for description in response
]
)
result: Optional[types.CollectionsAliasesResponse] = (
self.http.aliases_api.get_collection_aliases(collection_name=collection_name).result
)
assert result is not None, "Get collection aliases returned None"
return result
def get_aliases(self, **kwargs: Any) -> types.CollectionsAliasesResponse:
if self._prefer_grpc:
response = self.grpc_collections.ListAliases(
grpc.ListAliasesRequest(), timeout=self._timeout
).aliases
return types.CollectionsAliasesResponse(
aliases=[
GrpcToRest.convert_alias_description(description) for description in response
]
)
result: Optional[types.CollectionsAliasesResponse] = (
self.http.aliases_api.get_collections_aliases().result
)
assert result is not None, "Get aliases returned None"
return result
def get_collections(self, **kwargs: Any) -> types.CollectionsResponse:
if self._prefer_grpc:
response = self.grpc_collections.List(
grpc.ListCollectionsRequest(), timeout=self._timeout
).collections
return types.CollectionsResponse(
collections=[
GrpcToRest.convert_collection_description(description)
for description in response
]
)
result: Optional[types.CollectionsResponse] = (
self.http.collections_api.get_collections().result
)
assert result is not None, "Get collections returned None"
return result
def get_collection(self, collection_name: str, **kwargs: Any) -> types.CollectionInfo:
if self._prefer_grpc:
return GrpcToRest.convert_collection_info(
self.grpc_collections.Get(
grpc.GetCollectionInfoRequest(collection_name=collection_name),
timeout=self._timeout,
).result
)
result: Optional[types.CollectionInfo] = self.http.collections_api.get_collection(
collection_name=collection_name
).result
assert result is not None, "Get collection returned None"
return result
def collection_exists(self, collection_name: str, **kwargs: Any) -> bool:
if self._prefer_grpc:
return self.grpc_collections.CollectionExists(
grpc.CollectionExistsRequest(collection_name=collection_name),
timeout=self._timeout,
).result.exists
result: Optional[models.CollectionExistence] = self.http.collections_api.collection_exists(
collection_name=collection_name
).result
assert result is not None, "Collection exists returned None"
return result.exists
def update_collection(
self,
collection_name: str,
optimizers_config: Optional[types.OptimizersConfigDiff] = None,
collection_params: Optional[types.CollectionParamsDiff] = None,
vectors_config: Optional[types.VectorsConfigDiff] = None,
hnsw_config: Optional[types.HnswConfigDiff] = None,
quantization_config: Optional[types.QuantizationConfigDiff] = None,
timeout: Optional[int] = None,
sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None,
strict_mode_config: Optional[types.StrictModeConfig] = None,
metadata: Optional[types.Payload] = None,
**kwargs: Any,
) -> bool:
if self._prefer_grpc:
if isinstance(optimizers_config, models.OptimizersConfigDiff):
optimizers_config = RestToGrpc.convert_optimizers_config_diff(optimizers_config)
if isinstance(collection_params, models.CollectionParamsDiff):
collection_params = RestToGrpc.convert_collection_params_diff(collection_params)
if isinstance(vectors_config, dict):
vectors_config = RestToGrpc.convert_vectors_config_diff(vectors_config)
if isinstance(hnsw_config, models.HnswConfigDiff):
hnsw_config = RestToGrpc.convert_hnsw_config_diff(hnsw_config)
if isinstance(quantization_config, get_args(models.QuantizationConfigDiff)):
quantization_config = RestToGrpc.convert_quantization_config_diff(
quantization_config
)
if isinstance(sparse_vectors_config, dict):
sparse_vectors_config = RestToGrpc.convert_sparse_vector_config(
sparse_vectors_config
)
if isinstance(strict_mode_config, models.StrictModeConfig):
strict_mode_config = RestToGrpc.convert_strict_mode_config(strict_mode_config)
if isinstance(metadata, dict):
metadata = RestToGrpc.convert_payload(metadata)
return self.grpc_collections.Update(
grpc.UpdateCollection(
collection_name=collection_name,
optimizers_config=optimizers_config,
params=collection_params,
vectors_config=vectors_config,
hnsw_config=hnsw_config,
quantization_config=quantization_config,
sparse_vectors_config=sparse_vectors_config,
strict_mode_config=strict_mode_config,
timeout=timeout,
metadata=metadata,
),
timeout=timeout if timeout is not None else self._timeout,
).result
if isinstance(optimizers_config, grpc.OptimizersConfigDiff):
optimizers_config = GrpcToRest.convert_optimizers_config_diff(optimizers_config)
if isinstance(collection_params, grpc.CollectionParamsDiff):
collection_params = GrpcToRest.convert_collection_params_diff(collection_params)
if isinstance(vectors_config, grpc.VectorsConfigDiff):
vectors_config = GrpcToRest.convert_vectors_config_diff(vectors_config)
if isinstance(hnsw_config, grpc.HnswConfigDiff):
hnsw_config = GrpcToRest.convert_hnsw_config_diff(hnsw_config)
if isinstance(quantization_config, grpc.QuantizationConfigDiff):
quantization_config = GrpcToRest.convert_quantization_config_diff(quantization_config)
result: Optional[bool] = self.http.collections_api.update_collection(
collection_name,
update_collection=models.UpdateCollection(
optimizers_config=optimizers_config,
params=collection_params,
vectors=vectors_config,
hnsw_config=hnsw_config,
quantization_config=quantization_config,
sparse_vectors=sparse_vectors_config,
strict_mode_config=strict_mode_config,
metadata=metadata,
),
timeout=timeout,
).result
assert result is not None, "Update collection returned None"
return result
def delete_collection(
self, collection_name: str, timeout: Optional[int] = None, **kwargs: Any
) -> bool:
if self._prefer_grpc:
return self.grpc_collections.Delete(
grpc.DeleteCollection(collection_name=collection_name, timeout=timeout),
timeout=timeout if timeout is not None else self._timeout,
).result
result: Optional[bool] = self.http.collections_api.delete_collection(
collection_name, timeout=timeout
).result
assert result is not None, "Delete collection returned None"
return result
def create_collection(
self,
collection_name: str,
vectors_config: Optional[
Union[types.VectorParams, Mapping[str, types.VectorParams]]
] = None,
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[types.HnswConfigDiff] = None,
optimizers_config: Optional[types.OptimizersConfigDiff] = None,
wal_config: Optional[types.WalConfigDiff] = None,
quantization_config: Optional[types.QuantizationConfig] = None,
timeout: Optional[int] = None,
sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None,
sharding_method: Optional[types.ShardingMethod] = None,
strict_mode_config: Optional[types.StrictModeConfig] = None,
metadata: Optional[types.Payload] = None,
**kwargs: Any,
) -> bool:
if self._prefer_grpc:
if isinstance(vectors_config, (models.VectorParams, dict)):
vectors_config = RestToGrpc.convert_vectors_config(vectors_config)
if isinstance(hnsw_config, models.HnswConfigDiff):
hnsw_config = RestToGrpc.convert_hnsw_config_diff(hnsw_config)
if isinstance(optimizers_config, models.OptimizersConfigDiff):
optimizers_config = RestToGrpc.convert_optimizers_config_diff(optimizers_config)
if isinstance(wal_config, models.WalConfigDiff):
wal_config = RestToGrpc.convert_wal_config_diff(wal_config)
if isinstance(
quantization_config,
get_args(models.QuantizationConfig),
):
quantization_config = RestToGrpc.convert_quantization_config(quantization_config)
if isinstance(sparse_vectors_config, dict):
sparse_vectors_config = RestToGrpc.convert_sparse_vector_config(
sparse_vectors_config
)
if isinstance(sharding_method, models.ShardingMethod):
sharding_method = RestToGrpc.convert_sharding_method(sharding_method)
if isinstance(strict_mode_config, models.StrictModeConfig):
strict_mode_config = RestToGrpc.convert_strict_mode_config(strict_mode_config)
if isinstance(metadata, dict):
metadata = RestToGrpc.convert_payload(metadata)
create_collection = grpc.CreateCollection(
collection_name=collection_name,
hnsw_config=hnsw_config,
wal_config=wal_config,
optimizers_config=optimizers_config,
shard_number=shard_number,
on_disk_payload=on_disk_payload,
timeout=timeout,
vectors_config=vectors_config,
replication_factor=replication_factor,
write_consistency_factor=write_consistency_factor,
quantization_config=quantization_config,
sparse_vectors_config=sparse_vectors_config,
sharding_method=sharding_method,
strict_mode_config=strict_mode_config,
metadata=metadata,
)
return self.grpc_collections.Create(create_collection, timeout=self._timeout).result
if isinstance(hnsw_config, grpc.HnswConfigDiff):
hnsw_config = GrpcToRest.convert_hnsw_config_diff(hnsw_config)
if isinstance(optimizers_config, grpc.OptimizersConfigDiff):
optimizers_config = GrpcToRest.convert_optimizers_config_diff(optimizers_config)
if isinstance(wal_config, grpc.WalConfigDiff):
wal_config = GrpcToRest.convert_wal_config_diff(wal_config)
if isinstance(quantization_config, grpc.QuantizationConfig):
quantization_config = GrpcToRest.convert_quantization_config(quantization_config)
create_collection_request = models.CreateCollection(
vectors=vectors_config,
shard_number=shard_number,
replication_factor=replication_factor,
write_consistency_factor=write_consistency_factor,
on_disk_payload=on_disk_payload,
hnsw_config=hnsw_config,
optimizers_config=optimizers_config,
wal_config=wal_config,
quantization_config=quantization_config,
sparse_vectors=sparse_vectors_config,
sharding_method=sharding_method,
strict_mode_config=strict_mode_config,
metadata=metadata,
)
result: Optional[bool] = self.http.collections_api.create_collection(
collection_name=collection_name,
create_collection=create_collection_request,
timeout=timeout,
).result
assert result is not None, "Create collection returned None"
return result
def recreate_collection(
self,
collection_name: str,
vectors_config: Union[types.VectorParams, Mapping[str, types.VectorParams]],
shard_number: Optional[int] = None,
replication_factor: Optional[int] = None,
write_consistency_factor: Optional[int] = None,
on_disk_payload: Optional[bool] = None,
hnsw_config: Optional[types.HnswConfigDiff] = None,
optimizers_config: Optional[types.OptimizersConfigDiff] = None,
wal_config: Optional[types.WalConfigDiff] = None,
quantization_config: Optional[types.QuantizationConfig] = None,
timeout: Optional[int] = None,
sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None,
sharding_method: Optional[types.ShardingMethod] = None,
strict_mode_config: Optional[types.StrictModeConfig] = None,
metadata: Optional[types.Payload] = None,
**kwargs: Any,
) -> bool:
self.delete_collection(collection_name, timeout=timeout)
return self.create_collection(
collection_name=collection_name,
vectors_config=vectors_config,
shard_number=shard_number,
replication_factor=replication_factor,
write_consistency_factor=write_consistency_factor,
on_disk_payload=on_disk_payload,
hnsw_config=hnsw_config,
optimizers_config=optimizers_config,
wal_config=wal_config,
quantization_config=quantization_config,
timeout=timeout,
sparse_vectors_config=sparse_vectors_config,
sharding_method=sharding_method,
strict_mode_config=strict_mode_config,
metadata=metadata,
)
@property
def _updater_class(self) -> Type[BaseUploader]:
if self._prefer_grpc:
return GrpcBatchUploader
else:
return RestBatchUploader
def _upload_collection(
self,
batches_iterator: Iterable,
collection_name: str,
max_retries: int,
parallel: int = 1,
method: Optional[str] = None,
wait: bool = False,
shard_key_selector: Optional[types.ShardKeySelector] = None,
update_filter: Optional[types.Filter] = None,
) -> None:
if method is not None:
if method in get_all_start_methods():
start_method = method
else:
raise ValueError(
f"Start methods {method} is not available, available methods: {get_all_start_methods()}"
)
else:
start_method = "forkserver" if "forkserver" in get_all_start_methods() else "spawn"
if self._prefer_grpc:
updater_kwargs = {
"collection_name": collection_name,
"host": self._host,
"port": self._grpc_port,
"max_retries": max_retries,
"ssl": self._https,
"metadata": self._grpc_headers,
"wait": wait,
"shard_key_selector": shard_key_selector,
"options": self._grpc_options,
"timeout": self._timeout,
"update_filter": update_filter,
}
else:
updater_kwargs = {
"collection_name": collection_name,
"uri": self.rest_uri,
"max_retries": max_retries,
"wait": wait,
"shard_key_selector": shard_key_selector,
"update_filter": update_filter,
**self._rest_args,
}
if parallel == 1:
updater = self._updater_class.start(**updater_kwargs)
for _ in updater.process(batches_iterator):
pass
else:
pool = ParallelWorkerPool(parallel, self._updater_class, start_method=start_method)
for _ in pool.unordered_map(batches_iterator, **updater_kwargs):
pass
def upload_points(
self,
collection_name: str,
points: Iterable[types.PointStruct],
batch_size: int = 64,
parallel: int = 1,
method: Optional[str] = None,
max_retries: int = 3,
wait: bool = False,
shard_key_selector: Optional[types.ShardKeySelector] = None,
update_filter: Optional[types.Filter] = None,
**kwargs: Any,
) -> None:
batches_iterator = self._updater_class.iterate_records_batches(
records=points, batch_size=batch_size
)
self._upload_collection(
batches_iterator=batches_iterator,
collection_name=collection_name,
max_retries=max_retries,
parallel=parallel,
method=method,
wait=wait,
shard_key_selector=shard_key_selector,
update_filter=update_filter,
)
def upload_collection(
self,
collection_name: str,
vectors: Union[
dict[str, types.NumpyArray], types.NumpyArray, Iterable[types.VectorStruct]
],
payload: Optional[Iterable[dict[Any, Any]]] = None,
ids: Optional[Iterable[types.PointId]] = None,
batch_size: int = 64,
parallel: int = 1,
method: Optional[str] = None,
max_retries: int = 3,
wait: bool = False,
shard_key_selector: Optional[types.ShardKeySelector] = None,
update_filter: Optional[types.Filter] = None,
**kwargs: Any,
) -> None:
batches_iterator = self._updater_class.iterate_batches(
vectors=vectors,
payload=payload,
ids=ids,
batch_size=batch_size,
)
self._upload_collection(
batches_iterator=batches_iterator,
collection_name=collection_name,
max_retries=max_retries,
parallel=parallel,
method=method,
wait=wait,
shard_key_selector=shard_key_selector,
update_filter=update_filter,
)
def create_payload_index(
self,
collection_name: str,
field_name: str,
field_schema: Optional[types.PayloadSchemaType] = None,
field_type: Optional[types.PayloadSchemaType] = None,
wait: bool = True,
ordering: Optional[types.WriteOrdering] = None,
**kwargs: Any,
) -> types.UpdateResult:
if field_type is not None:
show_warning_once(
message="field_type is deprecated, use field_schema instead",
category=DeprecationWarning,
stacklevel=5,
idx="payload-index-field-type",
)
field_schema = field_type
if self._prefer_grpc:
field_index_params = None
if isinstance(field_schema, models.PayloadSchemaType):
field_schema = RestToGrpc.convert_payload_schema_type(field_schema)
if isinstance(field_schema, str):
field_schema = RestToGrpc.convert_payload_schema_type(
models.PayloadSchemaType(field_schema)
)
if isinstance(field_schema, int):
# There are no means to distinguish grpc.PayloadSchemaType and grpc.FieldType,
# as both of them are just ints
# method signature assumes that grpc.PayloadSchemaType is passed,
# otherwise the value will be corrupted
field_schema = grpc_payload_schema_to_field_type(field_schema)
if isinstance(field_schema, get_args(models.PayloadSchemaParams)):
field_schema = RestToGrpc.convert_payload_schema_params(field_schema)
if isinstance(field_schema, grpc.PayloadIndexParams):
field_index_params = field_schema
name = field_index_params.WhichOneof("index_params")
index_params = getattr(field_index_params, name)
if isinstance(index_params, grpc.TextIndexParams):
field_schema = grpc.FieldType.FieldTypeText
if isinstance(index_params, grpc.IntegerIndexParams):
field_schema = grpc.FieldType.FieldTypeInteger
if isinstance(index_params, grpc.KeywordIndexParams):
field_schema = grpc.FieldType.FieldTypeKeyword
if isinstance(index_params, grpc.FloatIndexParams):
field_schema = grpc.FieldType.FieldTypeFloat
if isinstance(index_params, grpc.GeoIndexParams):
field_schema = grpc.FieldType.FieldTypeGeo
if isinstance(index_params, grpc.BoolIndexParams):
field_schema = grpc.FieldType.FieldTypeBool
if isinstance(index_params, grpc.DatetimeIndexParams):
field_schema = grpc.FieldType.FieldTypeDatetime
if isinstance(index_params, grpc.UuidIndexParams):
field_schema = grpc.FieldType.FieldTypeUuid
request = grpc.CreateFieldIndexCollection(
collection_name=collection_name,
field_name=field_name,
field_type=field_schema,
field_index_params=field_index_params,
wait=wait,
ordering=ordering,
)
return GrpcToRest.convert_update_result(
self.grpc_points.CreateFieldIndex(request, timeout=self._timeout).result
)
if isinstance(field_schema, int): # type(grpc.PayloadSchemaType) == int
field_schema = GrpcToRest.convert_payload_schema_type(field_schema)
if isinstance(field_schema, grpc.PayloadIndexParams):
field_schema = GrpcToRest.convert_payload_schema_params(field_schema)
result: Optional[types.UpdateResult] = self.openapi_client.indexes_api.create_field_index(
collection_name=collection_name,
create_field_index=models.CreateFieldIndex(
field_name=field_name, field_schema=field_schema
),
wait=wait,
ordering=ordering,
).result
assert result is not None, "Create field index returned None"
return result
def delete_payload_index(
self,
collection_name: str,
field_name: str,
wait: bool = True,
ordering: Optional[types.WriteOrdering] = None,
**kwargs: Any,
) -> types.UpdateResult:
if self._prefer_grpc:
request = grpc.DeleteFieldIndexCollection(
collection_name=collection_name,
field_name=field_name,
wait=wait,
ordering=ordering,
)
return GrpcToRest.convert_update_result(
self.grpc_points.DeleteFieldIndex(request, timeout=self._timeout).result
)
result: Optional[types.UpdateResult] = self.openapi_client.indexes_api.delete_field_index(
collection_name=collection_name,
field_name=field_name,
wait=wait,
ordering=ordering,
).result
assert result is not None, "Delete field index returned None"
return result
def list_snapshots(
self, collection_name: str, **kwargs: Any
) -> list[types.SnapshotDescription]:
if self._prefer_grpc:
snapshots = self.grpc_snapshots.List(
grpc.ListSnapshotsRequest(collection_name=collection_name), timeout=self._timeout
).snapshot_descriptions
return [GrpcToRest.convert_snapshot_description(snapshot) for snapshot in snapshots]
snapshots = self.openapi_client.snapshots_api.list_snapshots(
collection_name=collection_name
).result
assert snapshots is not None, "List snapshots API returned None result"
return snapshots
def create_snapshot(
self, collection_name: str, wait: bool = True, **kwargs: Any
) -> Optional[types.SnapshotDescription]:
if self._prefer_grpc:
snapshot = self.grpc_snapshots.Create(
grpc.CreateSnapshotRequest(collection_name=collection_name), timeout=self._timeout
).snapshot_description
return GrpcToRest.convert_snapshot_description(snapshot)
return self.openapi_client.snapshots_api.create_snapshot(
collection_name=collection_name, wait=wait
).result
def delete_snapshot(
self, collection_name: str, snapshot_name: str, wait: bool = True, **kwargs: Any
) -> Optional[bool]:
if self._prefer_grpc:
self.grpc_snapshots.Delete(
grpc.DeleteSnapshotRequest(
collection_name=collection_name, snapshot_name=snapshot_name
),
timeout=self._timeout,
)
return True
return self.openapi_client.snapshots_api.delete_snapshot(
collection_name=collection_name,
snapshot_name=snapshot_name,
wait=wait,
).result
def list_full_snapshots(self, **kwargs: Any) -> list[types.SnapshotDescription]:
if self._prefer_grpc:
snapshots = self.grpc_snapshots.ListFull(
grpc.ListFullSnapshotsRequest(),
timeout=self._timeout,
).snapshot_descriptions
return [GrpcToRest.convert_snapshot_description(snapshot) for snapshot in snapshots]
snapshots = self.openapi_client.snapshots_api.list_full_snapshots().result
assert snapshots is not None, "List full snapshots API returned None result"
return snapshots
def create_full_snapshot(self, wait: bool = True, **kwargs: Any) -> types.SnapshotDescription:
if self._prefer_grpc:
snapshot_description = self.grpc_snapshots.CreateFull(
grpc.CreateFullSnapshotRequest(), timeout=self._timeout
).snapshot_description
return GrpcToRest.convert_snapshot_description(snapshot_description)
return self.openapi_client.snapshots_api.create_full_snapshot(wait=wait).result
def delete_full_snapshot(
self, snapshot_name: str, wait: bool = True, **kwargs: Any
) -> Optional[bool]:
if self._prefer_grpc:
self.grpc_snapshots.DeleteFull(
grpc.DeleteFullSnapshotRequest(snapshot_name=snapshot_name),
timeout=self._timeout,
)
return True
return self.openapi_client.snapshots_api.delete_full_snapshot(
snapshot_name=snapshot_name, wait=wait
).result
def recover_snapshot(
self,
collection_name: str,
location: str,
api_key: Optional[str] = None,
checksum: Optional[str] = None,
priority: Optional[types.SnapshotPriority] = None,
wait: bool = True,
**kwargs: Any,
) -> Optional[bool]:
return self.openapi_client.snapshots_api.recover_from_snapshot(
collection_name=collection_name,
wait=wait,
snapshot_recover=models.SnapshotRecover(
location=location,
priority=priority,
checksum=checksum,
api_key=api_key,
),
).result
def list_shard_snapshots(
self, collection_name: str, shard_id: int, **kwargs: Any
) -> list[types.SnapshotDescription]:
snapshots = self.openapi_client.snapshots_api.list_shard_snapshots(
collection_name=collection_name,
shard_id=shard_id,
).result
assert snapshots is not None, "List snapshots API returned None result"
return snapshots
def create_shard_snapshot(
self, collection_name: str, shard_id: int, wait: bool = True, **kwargs: Any
) -> Optional[types.SnapshotDescription]:
return self.openapi_client.snapshots_api.create_shard_snapshot(
collection_name=collection_name,
shard_id=shard_id,
wait=wait,
).result
def delete_shard_snapshot(
self,
collection_name: str,
shard_id: int,
snapshot_name: str,
wait: bool = True,
**kwargs: Any,
) -> Optional[bool]:
return self.openapi_client.snapshots_api.delete_shard_snapshot(
collection_name=collection_name,
shard_id=shard_id,
snapshot_name=snapshot_name,
wait=wait,
).result
def recover_shard_snapshot(
self,
collection_name: str,
shard_id: int,
location: str,
api_key: Optional[str] = None,
checksum: Optional[str] = None,
priority: Optional[types.SnapshotPriority] = None,
wait: bool = True,
**kwargs: Any,
) -> Optional[bool]:
return self.openapi_client.snapshots_api.recover_shard_from_snapshot(
collection_name=collection_name,
shard_id=shard_id,
wait=wait,
shard_snapshot_recover=models.ShardSnapshotRecover(
location=location,
priority=priority,
checksum=checksum,
api_key=api_key,
),
).result
def create_shard_key(
self,
collection_name: str,
shard_key: types.ShardKey,
shards_number: Optional[int] = None,
replication_factor: Optional[int] = None,
placement: Optional[list[int]] = None,
initial_state: Optional[types.ReplicaState] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> bool:
if self._prefer_grpc:
if isinstance(shard_key, get_args_subscribed(models.ShardKey)):
shard_key = RestToGrpc.convert_shard_key(shard_key)
if isinstance(initial_state, models.ReplicaState):
initial_state = RestToGrpc.convert_replica_state(initial_state)
return self.grpc_collections.CreateShardKey(
grpc.CreateShardKeyRequest(
collection_name=collection_name,
timeout=timeout,
request=grpc.CreateShardKey(
shard_key=shard_key,
shards_number=shards_number,
replication_factor=replication_factor,
placement=placement or [],
initial_state=initial_state,
),
),
timeout=timeout if timeout is not None else self._timeout,
).result
else:
result = self.openapi_client.distributed_api.create_shard_key(
collection_name=collection_name,
timeout=timeout,
create_sharding_key=models.CreateShardingKey(
shard_key=shard_key,
shards_number=shards_number,
replication_factor=replication_factor,
placement=placement,
initial_state=initial_state,
),
).result
assert result is not None, "Create shard key returned None"
return result
def delete_shard_key(
self,
collection_name: str,
shard_key: types.ShardKey,
timeout: Optional[int] = None,
**kwargs: Any,
) -> bool:
if self._prefer_grpc:
if isinstance(shard_key, get_args_subscribed(models.ShardKey)):
shard_key = RestToGrpc.convert_shard_key(shard_key)
return self.grpc_collections.DeleteShardKey(
grpc.DeleteShardKeyRequest(
collection_name=collection_name,
timeout=timeout,
request=grpc.DeleteShardKey(
shard_key=shard_key,
),
),
timeout=timeout if timeout is not None else self._timeout,
).result
else:
result = self.openapi_client.distributed_api.delete_shard_key(
collection_name=collection_name,
timeout=timeout,
drop_sharding_key=models.DropShardingKey(
shard_key=shard_key,
),
).result
assert result is not None, "Delete shard key returned None"
return result
def info(self) -> types.VersionInfo:
if self._prefer_grpc:
version_info = self.grpc_root.HealthCheck(
grpc.HealthCheckRequest(), timeout=self._timeout
)
return GrpcToRest.convert_health_check_reply(version_info)
version_info = self.rest.service_api.root()
assert version_info is not None, "Healthcheck returned None"
return version_info
def cluster_collection_update(
self,
collection_name: str,
cluster_operation: types.ClusterOperations,
timeout: Optional[int] = None,
**kwargs: Any,
) -> bool:
if self._prefer_grpc:
cluster_operation = RestToGrpc.convert_cluster_operations(cluster_operation)
grpc_operation = {}
if isinstance(cluster_operation, grpc.MoveShard):
grpc_operation["move_shard"] = cluster_operation
elif isinstance(cluster_operation, grpc.ReplicateShard):
grpc_operation["replicate_shard"] = cluster_operation
elif isinstance(cluster_operation, grpc.AbortShardTransfer):
grpc_operation["abort_transfer"] = cluster_operation
elif isinstance(cluster_operation, grpc.Replica):
grpc_operation["drop_replica"] = cluster_operation
elif isinstance(cluster_operation, grpc.CreateShardKey):
grpc_operation["create_shard_key"] = cluster_operation
elif isinstance(cluster_operation, grpc.DeleteShardKey):
grpc_operation["delete_shard_key"] = cluster_operation
elif isinstance(cluster_operation, grpc.RestartTransfer):
grpc_operation["restart_transfer"] = cluster_operation
elif isinstance(cluster_operation, grpc.ReplicatePoints):
grpc_operation["replicate_points"] = cluster_operation
else:
raise TypeError(f"Unknown cluster operation: {cluster_operation}")
return self.grpc_collections.UpdateCollectionClusterSetup(
grpc.UpdateCollectionClusterSetupRequest(
collection_name=collection_name, timeout=timeout, **grpc_operation
),
timeout=timeout if timeout is not None else self._timeout,
).result
update_result = self.rest.distributed_api.update_collection_cluster(
collection_name=collection_name, cluster_operations=cluster_operation, timeout=timeout
).result
assert update_result is not None, "Cluster collection update returned None"
return update_result
def cluster_status(self) -> types.ClusterStatus:
# grpc does not have cluster status api
status_result = self.rest.distributed_api.cluster_status().result
assert status_result is not None, "Cluster status returned None"
return status_result
def recover_current_peer(self) -> bool:
# grpc does not have recover peer api
recover_result = self.rest.distributed_api.recover_current_peer().result
assert recover_result is not None, "Recover current peer returned None"
return recover_result
def remove_peer(
self,
peer_id: int,
force: Optional[bool] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> bool:
# grpc does not have remove peer api
update_result = self.rest.distributed_api.remove_peer(
peer_id=peer_id,
force=force,
timeout=timeout,
).result
assert update_result is not None, "Remove peer returned None"
return update_result
def collection_cluster_info(self, collection_name: str) -> types.CollectionClusterInfo:
if self._prefer_grpc:
collection_info = self.grpc_collections.CollectionClusterInfo(
grpc.CollectionClusterInfoRequest(collection_name=collection_name),
timeout=self._timeout,
)
return GrpcToRest.convert_collection_cluster_info(collection_info)
collection_info = self.rest.distributed_api.collection_cluster_info(
collection_name=collection_name
).result
assert collection_info is not None, "Collection cluster info returned None"
return collection_info
| QdrantRemote |
python | wandb__wandb | wandb/sdk/internal/_generated/server_features_query.py | {
"start": 210,
"end": 335
} | class ____(GQLResult):
server_info: Optional[ServerFeaturesQueryServerInfo] = Field(alias="serverInfo")
| ServerFeaturesQuery |
python | huggingface__transformers | src/transformers/models/llava_onevision/modeling_llava_onevision.py | {
"start": 32119,
"end": 44951
} | class ____(LlavaOnevisionPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {
r"^language_model.model": "model.language_model",
r"^vision_tower": "model.vision_tower",
r"^multi_modal_projector": "model.multi_modal_projector",
r"^image_newline": "model.image_newline",
r"^language_model.lm_head": "lm_head",
}
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
def __init__(self, config: LlavaOnevisionConfig):
super().__init__(config)
self.model = LlavaOnevisionModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def pack_image_features(self, image_features, image_sizes, vision_feature_select_strategy, image_newline=None):
return self.model.pack_image_features(
image_features=image_features,
image_sizes=image_sizes,
vision_feature_select_strategy=vision_feature_select_strategy,
image_newline=image_newline,
)
def get_image_features(
self,
pixel_values: torch.FloatTensor,
image_sizes: torch.Tensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
):
return self.model.get_image_features(
pixel_values=pixel_values,
image_sizes=image_sizes,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
image_sizes: Optional[torch.LongTensor] = None,
pixel_values_videos: Optional[torch.FloatTensor] = None,
image_sizes_videos: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
vision_aspect_ratio: Optional[str] = None,
batch_num_images: Optional[torch.LongTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, LlavaOnevisionCausalLMOutputWithPast]:
r"""
image_sizes_videos (`torch.LongTensor` of shape `(batch_size, frames, 2)`, *optional*):
The sizes of the videos in the batch, being (height, width) for each frame in the video.
vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`):
Aspect ratio used when processong image features. The default value is "anyres_max_9".
batch_num_images (`torch.LongTensor`, *optional*):
Number of images in each sample.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> import torch
>>> from transformers import LlavaOnevisionProcessor, LlavaOnevisionForConditionalGeneration
>>> model = LlavaOnevisionForConditionalGeneration.from_pretrained("llava-hf/llava-onevision-qwen2-7b-ov-hf", dtype="float16", device_map="cuda:0")
>>> processor = LlavaOnevisionProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-7b-ov-hf")
>>> conversation = [
... {
... "role": "user",
... "content": [
... {"type": "text", "text": "What is shown in this image?"},
... {"type": "image"},
... ],
... },
... ]
>>> prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
>>> image_file = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> raw_image = Image.open(requests.get(image_file, stream=True).raw)
>>> inputs = processor(text=prompt, images=raw_image, return_tensors='pt').to(0, torch.float16)
>>> output = model.generate(**inputs, max_new_tokens=20, do_sample=False)
>>> processor.batch_decode(output, skip_special_tokens=True)[0]
"user\n\nWhat is shown in this image?\nassistant\ncat"
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
vision_aspect_ratio = (
vision_aspect_ratio if vision_aspect_ratio is not None else self.config.vision_aspect_ratio
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_sizes=image_sizes,
image_sizes_videos=image_sizes_videos,
vision_aspect_ratio=vision_aspect_ratio,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
batch_num_images=batch_num_images,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return LlavaOnevisionCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
video_hidden_states=outputs.video_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
image_sizes=None,
pixel_values_videos=None,
image_sizes_videos=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
if cache_position[0] == 0:
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model
model_inputs["pixel_values"] = pixel_values
model_inputs["image_sizes"] = image_sizes
model_inputs["pixel_values_videos"] = pixel_values_videos
model_inputs["image_sizes_videos"] = image_sizes_videos
return model_inputs
@staticmethod
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
**kwargs,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
`(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache,
to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=cache_position.device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
causal_mask.device
)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
def get_video_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
):
return self.model.get_video_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
)
__all__ = ["LlavaOnevisionModel", "LlavaOnevisionForConditionalGeneration", "LlavaOnevisionPreTrainedModel"]
| LlavaOnevisionForConditionalGeneration |
python | catalyst-team__catalyst | catalyst/contrib/datasets/misc_cv.py | {
"start": 184,
"end": 2476
} | class ____(ImageFolderDataset):
"""
Base class for datasets with the following structure:
.. code-block:: bash
path/to/dataset/
|-- train/
| |-- class1/ # folder of N images
| | |-- train_image11
| | |-- train_image12
| | ...
| | `-- train_image1N
| ...
| `-- classM/ # folder of K images
| |-- train_imageM1
| |-- train_imageM2
| ...
| `-- train_imageMK
`-- val/
|-- class1/ # folder of P images
| |-- val_image11
| |-- val_image12
| ...
| `-- val_image1P
...
`-- classM/ # folder of T images
|-- val_imageT1
|-- val_imageT2
...
`-- val_imageMT
"""
# name of dataset folder
name: str
# list of (url, md5 hash) tuples representing files to download
resources: Iterable[Tuple[str, str]] = None
def __init__(self, root: str, train: bool = True, download: bool = False, **kwargs):
"""Constructor method for the ``ImageClassificationDataset`` class.
Args:
root: root directory of dataset
train: if ``True``, creates dataset from ``train/``
subfolder, otherwise from ``val/``
download: if ``True``, downloads the dataset from
the internet and puts it in root directory. If dataset
is already downloaded, it is not downloaded again
**kwargs: Keyword-arguments passed to ``super().__init__`` method.
"""
# downlad dataset if needed
if download and not os.path.exists(os.path.join(root, self.name)):
os.makedirs(root, exist_ok=True)
# download files
for url, md5 in self.resources:
filename = url.rpartition("/")[2]
download_and_extract_archive(
url, download_root=root, filename=filename, md5=md5
)
rootpath = os.path.join(root, self.name, "train" if train else "val")
super().__init__(rootpath=rootpath, **kwargs)
__all__ = ["ImageClassificationDataset"]
| ImageClassificationDataset |
python | fastapi__sqlmodel | docs_src/tutorial/connect/select/tutorial004.py | {
"start": 254,
"end": 2190
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(default=None, foreign_key="team.id")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
session.add(team_preventers)
session.add(team_z_force)
session.commit()
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team_id=team_z_force.id
)
hero_rusty_man = Hero(
name="Rusty-Man",
secret_name="Tommy Sharp",
age=48,
team_id=team_preventers.id,
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
def select_heroes():
with Session(engine) as session:
statement = select(Hero).join(Team).where(Team.name == "Preventers")
results = session.exec(statement)
for hero in results:
print("Preventer Hero:", hero)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | django__django | django/contrib/postgres/constraints.py | {
"start": 557,
"end": 664
} | class ____(IndexExpression):
template = "%(expressions)s WITH %(operator)s"
| ExclusionConstraintExpression |
python | yaml__pyyaml | lib/yaml/tokens.py | {
"start": 1915,
"end": 2112
} | class ____(Token):
id = '<anchor>'
def __init__(self, value, start_mark, end_mark):
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
| AnchorToken |
python | google__jax | tests/pallas/mosaic_gpu_test.py | {
"start": 5874,
"end": 89740
} | class ____(PallasTest):
def test_jitted_function_containing_multiple_pallas_calls(self):
# This test aims to ensure that execution works correctly inside CUDA
# graphs. This is complementary to the test in
# jaxlib/mosaic/gpu/custom_call_test.cc that checks that such jitted
# functions do invoke CUDA graphs.
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.float32),
)
def kernel(x_ref, o_ref):
o_ref[...] = x_ref[...] + 1
@jax.jit
def f(x):
# Run the kernel 10 times because CUDA graphs only trigger for >= 5 ops.
for _ in range(10):
x = kernel(x)
return x
x = jnp.arange(256).astype(jnp.float32)
np.testing.assert_array_equal(f(x), x + 10)
@parameterized.product(
op=[
lax.neg,
lax.bitwise_not,
lax.logistic,
lax.exp,
lambda x: x**2,
lambda x: x**5,
lax.rsqrt,
lax.tanh,
lax.log,
jax.nn.gelu,
],
approx_math=[True, False],
)
def test_unary_op(self, op, approx_math):
dtype = jnp.int32 if op is lax.bitwise_not else jnp.float32
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], dtype),
compiler_params=plgpu.CompilerParams(approx_math=approx_math),
)
def kernel(x_ref, o_ref):
o_ref[...] = op(x_ref[...])
x = jnp.arange(256).astype(dtype)
np.testing.assert_allclose(
kernel(x), op(x), rtol=1e-5 if approx_math else 3e-7
)
@parameterized.product(
op=[
operator.add,
lambda x, _: x + 1, # for int->vector conversion
operator.sub,
operator.mul,
lax.div,
jnp.minimum,
jnp.maximum,
],
dtype=[jnp.float32, jnp.int32, jnp.uint32],
)
def test_binary_op(self, op, dtype):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct([256], dtype)
)
def kernel(x_ref, y_ref, o_ref):
o_ref[...] = op(x_ref[...], y_ref[...])
key0, key1 = jax.random.split(jax.random.key(0), 2)
x = (jax.random.uniform(key0, [256]) * 42).astype(dtype)
y = (jax.random.uniform(key1, [256]) * 42).astype(dtype)
np.testing.assert_array_equal(kernel(x, y), op(x, y))
@parameterized.product(
op=[
lax.eq,
operator.ne,
operator.lt,
operator.le,
operator.gt,
operator.ge,
],
# TODO(slebedev): Support integral types.
dtype=[jnp.float32, jnp.int32, jnp.uint32],
)
def test_comparison_op(self, op, dtype):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct([256], dtype)
)
def kernel(o_ref):
o_ref[...] = jnp.broadcast_to(
op(dtype(42), dtype(24)).astype(dtype), o_ref.shape
)
np.testing.assert_array_equal(kernel(), jnp.full([256], op(42, 24), dtype))
def test_add_first(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.float32),
)
def kernel(x_ref, y_ref, o_ref):
o_ref[...] = x_ref[...] + y_ref[0]
x = jnp.arange(256).astype(jnp.float32)
y = jnp.flip(x).reshape(1, 256)
np.testing.assert_array_equal(kernel(x, y), x + y[0])
@parameterized.product(shape=[(128,), (128, 64)])
def test_reduce_sum(self, shape):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct(shape, jnp.float32)
)
def kernel(x_ref, o_ref):
o_ref[...] = jnp.broadcast_to(_sum_same_dtype(x_ref[...]), o_ref.shape)
x = jnp.arange(math.prod(shape)).reshape(shape).astype(jnp.float32)
np.testing.assert_array_equal(kernel(x), jnp.sum(x))
def test_reshape(self):
self.skip_if_wg_semantics() # Can't infer transforms for `memref.expand_shape`.
shape1, shape2 = (128,), (2, 16, 4)
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct(shape2, jnp.float32)
)
def kernel(x_ref, out_ref):
x_ref_reshaped = x_ref.reshape(shape2)
self.assertEqual(x_ref.shape, shape1)
self.assertEqual(x_ref_reshaped.shape, shape2)
out_ref[...] = x_ref_reshaped[...]
x = jnp.arange(math.prod(shape1)).astype(jnp.float32)
np.testing.assert_array_equal(kernel(x), x.reshape(shape2))
def test_add_xy_indexed(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct([128], jnp.float32)
)
def kernel(x_ref, y_ref, o_ref):
idx = _sum_same_dtype(y_ref[...])
o_ref[...] = x_ref[idx]
x = jnp.arange(4 * 128).reshape(4, 128).astype(jnp.float32)
y = jnp.zeros(128, dtype=jnp.int32)
np.testing.assert_array_equal(kernel(x, y), x[jnp.sum(y)])
def test_add_one_grid(self):
@functools.partial(
self.pallas_call,
in_specs=[pl.BlockSpec((128,), lambda *i: i)],
out_specs=pl.BlockSpec((128,), lambda *i: i),
out_shape=jax.ShapeDtypeStruct([128 * 2], jnp.float32),
grid=2,
)
def kernel(x_ref, o_ref):
o_ref[...] = x_ref[...] + 1.0
x = jnp.arange(128 * 2).astype(jnp.float32)
np.testing.assert_array_equal(kernel(x), x + 1.0)
def test_add_one_grid_with_scratch(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([128 * 2], jnp.float32),
in_specs=[pl.BlockSpec((128,), lambda *i: i)],
out_specs=pl.BlockSpec((128,), lambda *i: i),
scratch_shapes=[plgpu.SMEM((128,), jnp.float32)],
grid=2,
)
def kernel(x_ref, o_ref, scratch_ref):
scratch_ref[...] = x_ref[...] + 1
o_ref[...] = scratch_ref[...]
x = jnp.arange(256).astype(jnp.float32)
np.testing.assert_array_equal(kernel(x), x + 1.0)
@parameterized.product(max_concurrent_steps=[1, 2, 3, 4, 16])
def test_add_one_grid_pipelined(self, max_concurrent_steps):
@functools.partial(
self.pallas_call,
in_specs=[pl.BlockSpec((128, 16), lambda i, j: (i, j))],
out_specs=pl.BlockSpec((128, 16), lambda i, j: (i, j)),
out_shape=jax.ShapeDtypeStruct([128 * 2, 64], jnp.float32),
compiler_params=plgpu.CompilerParams(
dimension_semantics=["parallel", "sequential"],
max_concurrent_steps=max_concurrent_steps,
),
grid=(2, 4),
)
def kernel(x_ref, o_ref):
o_ref[...] = x_ref[...] + 1.0
x = jnp.arange(128 * 2 * 64).reshape((128 * 2, 64)).astype(jnp.float32)
np.testing.assert_array_equal(kernel(x), x + 1.0)
def test_add_one_grid_pipelined_with_leading_sequential_dimension(self):
@functools.partial(
self.pallas_call,
in_specs=[pl.BlockSpec((128, 16), lambda i, j: (i, j))],
out_specs=pl.BlockSpec((128, 16), lambda i, j: (i, j)),
out_shape=jax.ShapeDtypeStruct([128 * 2, 64], jnp.float32),
compiler_params=plgpu.CompilerParams(
dimension_semantics=["sequential", "parallel"],
),
grid=(2, 4),
)
def kernel(x_ref, o_ref):
o_ref[...] = x_ref[...] + 1.0
x = jnp.arange(128 * 2 * 64).reshape((128 * 2, 64)).astype(jnp.float32)
np.testing.assert_array_equal(kernel(x), x + 1.0)
def test_add_one_grid_pipelined_program_id(self):
@functools.partial(
self.pallas_call,
out_specs=pl.BlockSpec((16, 16), lambda i, j: (i, j)),
out_shape=jax.ShapeDtypeStruct([16, 64], jnp.int32),
compiler_params=plgpu.CompilerParams(
dimension_semantics=["parallel", "sequential"],
max_concurrent_steps=2,
),
grid=(4, 4),
)
def kernel(o_ref):
o_ref[...] = jnp.broadcast_to(pl.program_id(1), o_ref.shape)
np.testing.assert_array_equal(
kernel(),
jnp.repeat(jnp.repeat(jnp.arange(4), 16)[None], 16, axis=0),
)
def test_add_one_grid_pipelined_sequential_invariant_output(self):
@functools.partial(
self.pallas_call,
in_specs=[pl.BlockSpec((32, 16), lambda i, j: (i, j))],
out_specs=pl.BlockSpec((32, 16), lambda i, j: (i, 0)),
out_shape=jax.ShapeDtypeStruct([32 * 2, 64], jnp.float32),
compiler_params=plgpu.CompilerParams(
dimension_semantics=["parallel", "sequential"],
max_concurrent_steps=2,
),
grid=(2, 4),
)
def kernel(x_ref, o_ref):
o_ref[...] = x_ref[...] + 1.0
x = jnp.arange(32 * 2 * 64).reshape((32 * 2, 64)).astype(jnp.float32)
y = jnp.empty_like(x)
for i in range(2):
i_slice = slice(32 * i, 32 * (i + 1))
for j in range(4):
j_slice = slice(16 * j, 16 * (j + 1))
y = y.at[i_slice, :16].set(x[i_slice, j_slice] + 1)
# We only compare the elements in the first 16 columns, because the rest
# are never written to.
np.testing.assert_array_equal(kernel(x)[:, :16], y[:, :16])
@parameterized.parameters(jnp.float32, jnp.int32, jnp.uint32)
def test_iota(self, dtype):
dimension = 1
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct((128, 128), dtype)
)
def kernel(o_ref):
o_ref[...] = plgpu.broadcasted_iota(
dtype, o_ref.shape, dimension, layout=plgpu.Layout.WGMMA
)
np.testing.assert_array_equal(
kernel(), jax.lax.broadcasted_iota(dtype, (128, 128), dimension)
)
@parameterized.parameters(jnp.bfloat16, jnp.int16, jnp.uint16)
def test_inline_mgpu(self, jnp_type):
dtype = jnp.dtype(jnp_type)
is_signed = mgpu.utils.is_signed(dtype)
shape = (128, 128)
tile = (64, 128 // dtype.itemsize)
tiled_shape = list(mgpu.tile_shape(shape, tile))
key = jax.random.key(0)
x = jax.random.uniform(key, (2, *shape), minval=-10, maxval=10).astype(
dtype
)
transforms = (
plgpu.TilingTransform(tile),
plgpu.SwizzleTransform(128),
)
if self.LOWERING_SEMANTICS == plgpu.LoweringSemantics.Warpgroup:
pallas_call_transforms = ()
else:
pallas_call_transforms = transforms
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct(shape, dtype),
in_specs=(pl.BlockSpec(memory_space=plgpu.GMEM),),
scratch_shapes=[
plgpu.SMEM(
x.shape,
dtype,
transforms=pallas_call_transforms,
),
plgpu.Barrier(),
],
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
)
def kernel(x_ref, o_ref, smem_ref, barrier):
plgpu.copy_gmem_to_smem(x_ref, smem_ref, barrier)
plgpu.barrier_wait(barrier)
# Add an indexer at the end.
sliced_smem_ref = smem_ref.at[0]
@plgpu.inline_mgpu(
arg_types=(plgpu.RefType((
plgpu.TilingTransform(tile),
plgpu.SwizzleTransform(128),
)),),
return_type=plgpu.ShapeDtypeStruct(
shape, dtype, layout=plgpu.Layout.WGMMA
),
)
def foo(ctx, smem_ref):
del ctx
assert smem_ref.type.shape == tiled_shape, (smem_ref.type, tiled_shape)
x = mgpu.FragmentedArray.load_tiled(
smem_ref, swizzle=128, is_signed=is_signed
)
y = mgpu.FragmentedArray.splat(
mgpu.c(1, x.mlir_dtype),
shape=x.shape,
layout=x.layout,
is_signed=is_signed,
)
return (x + x + y)
arr = foo(sliced_smem_ref)
@plgpu.inline_mgpu(arg_types=(plgpu.Layout.WGMMA, plgpu.RefType(transforms), plgpu.RefType()))
def store(ctx, arr, smem_ref, o_ref):
sliced_smem_ref = mgpu.memref_slice(smem_ref, (0,))
arr.store_tiled(sliced_smem_ref, swizzle=128)
mgpu.commit_shared()
ctx.async_copy(
src_ref=sliced_smem_ref,
dst_ref=o_ref,
swizzle=128,
gmem_transform=(mgpu.TileTransform(tile)),
)
ctx.await_async_copy(0)
# A dummy if statement to make sure we inline nested blocks correctly.
is_leader_thread = mgpu.utils.single_thread_predicate()
with mgpu.utils.when(is_leader_thread):
pass
# This time we slice inside the inline_mgpu body.
store(arr, smem_ref, o_ref)
np.testing.assert_array_equal(kernel(x), x[0] + x[0] + 1)
@parameterized.parameters(
plgpu.Layout.WGMMA,
plgpu.Layout.WGMMA_UPCAST_2X,
plgpu.Layout.WGMMA_UPCAST_4X,
plgpu.Layout.TCGEN05,
)
def test_inline_mgpu_layout_args(self, layout: gpu_core.SomeLayout):
quant_dtype = jnp.int8
dtype = jnp.bfloat16
mgpu_layout = layout.to_mgpu()
shape = (128, 128)
rngs = list(jax.random.split(jax.random.key(0)))
x = jax.random.randint(rngs.pop(), shape, minval=-10, maxval=10).astype(
quant_dtype
)
x_s = jax.random.uniform(
rngs.pop(), shape[0], minval=-100, maxval=100
).astype(dtype)
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct(shape, dtype),
in_specs=(pl.BlockSpec(memory_space=plgpu.GMEM),
pl.BlockSpec(memory_space=plgpu.GMEM)),
scratch_shapes=[
plgpu.SMEM(
x.shape,
dtype,
),
],
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
)
def kernel(
x_ref, x_scale_ref, o_ref, o_smem_ref,
):
x = plgpu.load(x_ref, (), layout=layout, optimized=False).astype(x_scale_ref.dtype)
x_s = plgpu.load(x_scale_ref, (), layout=layout.reduce(1), optimized=False)
@plgpu.inline_mgpu(
arg_types=(layout,layout.reduce(1)),
return_type=plgpu.ShapeDtypeStruct(
shape, dtype, layout=layout
),
)
def custom_broadcast(ctx, x_fa, xs_fa):
del ctx
return xs_fa.broadcast_in_dim(shape, [0], layout=mgpu_layout) * x_fa
arr = custom_broadcast(x, x_s)
o_smem_ref[...] = arr
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(o_smem_ref, o_ref)
plgpu.wait_smem_to_gmem(0)
np.testing.assert_array_equal(
kernel(x, x_s),
x.astype(dtype) * jnp.broadcast_to(x_s[:, None], x.shape),
)
def test_sync_copy(self):
shape = (128, 128)
transforms = self.default_transforms(dtype=jnp.float32)
@functools.partial(
self.pallas_call,
in_specs=[pl.BlockSpec(memory_space=plgpu.GMEM)],
out_shape=jax.ShapeDtypeStruct(shape, jnp.float32),
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
scratch_shapes=[plgpu.SMEM(shape, jnp.float32, transforms=transforms)],
)
def kernel(x_ref, y_ref, scratch_ref):
layout = plgpu.Layout.SMEM_GMEM_COPY(shape, jnp.float32, swizzle=128)
# GMEM loads require optimized=False, because we can't prove coalescing.
# But with this layout they should be fast.
scratch_ref[...] = plgpu.load(x_ref, (), layout=layout, optimized=False)
y_ref[...] = plgpu.layout_cast(scratch_ref[...], layout)
x = jnp.arange(math.prod(shape), dtype=jnp.float32).reshape(shape)
np.testing.assert_array_equal(kernel(x), x)
@parameterized.product(indexer=[..., slice(128), slice(None, 128)])
def test_copy_smem_to_gmem(self, indexer):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.float32),
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
scratch_shapes=[plgpu.SMEM((256,), jnp.float32)],
)
def kernel(x_ref, o_ref_gmem, scratch_ref):
scratch_ref[...] = x_ref[...] + 1
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(scratch_ref.at[indexer], o_ref_gmem.at[indexer])
plgpu.wait_smem_to_gmem(0)
x = jnp.arange(256).astype(jnp.float32)
np.testing.assert_array_equal(kernel(x)[indexer], x[indexer] + 1.0)
@parameterized.parameters(jnp.bfloat16, jnp.float16, jnp.float32)
def test_copy_smem_to_gmem_reduction(self, dtype):
self.skip_if_wg_semantics() # Reduction not implemented.
@functools.partial(
self.pallas_call,
grid=(200,),
in_specs=[pl.BlockSpec((128,), lambda *i: i), pl.BlockSpec(memory_space=plgpu.GMEM)],
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct([128], dtype),
scratch_shapes=[plgpu.SMEM((128,), dtype)],
input_output_aliases={1:0}
)
def kernel(x_ref, o_ref_gmem, o_ref_gmem_alias, scratch_ref):
del o_ref_gmem_alias
scratch_ref[...] = x_ref[...]
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(scratch_ref.at[...], o_ref_gmem.at[...], reduction_op="add")
plgpu.wait_smem_to_gmem(0)
x = jnp.ones(200 * 128).astype(dtype) # 200 blocks
output = jnp.zeros(128).astype(dtype)
output = kernel(x, output)
output_val = x.reshape(-1, 128).sum(axis=0)
np.testing.assert_array_equal(output, output_val)
@parameterized.named_parameters(
{"testcase_name": "1d_none",
"shape": (256,), "indexers": (slice(0, 128), slice(None, 32))},
{"testcase_name": "1d_offset",
"shape": (256,), "indexers": (slice(32, 96), slice(0, 32))},
{"testcase_name": "2d_extract",
"shape": (64, 64), "indexers": (4, slice(0, 64))},
)
def test_copy_smem_to_gmem_with_multiple_gmem_indexers(self, shape, indexers):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct(shape, jnp.float32),
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
scratch_shapes=[plgpu.SMEM(shape, jnp.float32)],
)
def kernel(x_ref, o_ref_gmem, scratch_ref):
scratch_ref[...] = x_ref[...] + 1
plgpu.commit_smem()
for indexer in indexers:
scratch_ref = scratch_ref.at[indexer]
o_ref_gmem = o_ref_gmem.at[indexer]
plgpu.copy_smem_to_gmem(scratch_ref, o_ref_gmem)
plgpu.wait_smem_to_gmem(0)
x = jnp.arange(np.prod(shape)).astype(jnp.float32).reshape(*shape)
result = kernel(x)
ref = x + 1.0
for indexer in indexers:
result = result[indexer]
ref = ref[indexer]
np.testing.assert_array_equal(result, ref)
@parameterized.product(indexer=[..., slice(128), slice(None, 128)])
def test_copy_gmem_to_smem(self, indexer):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.float32),
in_specs=(pl.BlockSpec(memory_space=plgpu.GMEM),),
scratch_shapes=[
plgpu.SMEM((256,), jnp.float32),
plgpu.Barrier(),
],
)
def kernel(x_ref_gmem, o_ref, scratch_ref, barrier_ref):
plgpu.copy_gmem_to_smem(
x_ref_gmem.at[indexer], scratch_ref.at[indexer], barrier_ref
)
plgpu.barrier_wait(barrier_ref)
o_ref[...] = scratch_ref[...] + 1
x = jnp.arange(256).astype(jnp.float32)
np.testing.assert_array_equal(kernel(x)[indexer], x[indexer] + 1.0)
def test_collective_copy_gmem_to_smem(self):
@functools.partial(
self.kernel,
out_shape=jax.ShapeDtypeStruct((2, 128), jnp.float32),
scratch_shapes=dict(
smem_ref=plgpu.SMEM((128,), jnp.float32),
barrier_ref=plgpu.Barrier(),
),
cluster=(2,),
cluster_names=("cluster",),
)
def kernel(x_ref, y_ref, smem_ref, barrier_ref):
# Specifying collective_axes will enable TMA multicast automatically.
plgpu.copy_gmem_to_smem(
x_ref, smem_ref, barrier_ref, collective_axes="cluster"
)
plgpu.barrier_wait(barrier_ref)
plgpu.copy_smem_to_gmem(smem_ref, y_ref.at[jax.lax.axis_index("cluster")])
plgpu.wait_smem_to_gmem(0)
x = jnp.arange(128, dtype=jnp.float32)
y = kernel(x)
# Each block gets the same data and writes it out.
np.testing.assert_array_equal(y, jnp.stack([x, x], axis=0))
@parameterized.product(indexer=[..., slice(128), slice(None, 128)])
def test_async_prefetch(self, indexer):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.float32),
in_specs=(pl.BlockSpec(memory_space=plgpu.GMEM),),
scratch_shapes=[
plgpu.SMEM((256,), jnp.float32),
plgpu.Barrier(),
],
)
def kernel(x_ref_gmem, o_ref, scratch_ref, barrier_ref):
plgpu.async_prefetch(x_ref_gmem.at[indexer])
plgpu.copy_gmem_to_smem(
x_ref_gmem.at[indexer], scratch_ref.at[indexer], barrier_ref
)
plgpu.barrier_wait(barrier_ref)
o_ref[...] = scratch_ref[...] + 1
x = jnp.arange(256).astype(jnp.float32)
np.testing.assert_array_equal(kernel(x)[indexer], x[indexer] + 1.0)
@parameterized.named_parameters(
{
"testcase_name": "1d_none",
"shape": (256,),
"indexers": (slice(0, 128), slice(None, 32)),
},
{
"testcase_name": "1d_offset",
"shape": (256,),
"indexers": (slice(32, 96), slice(0, 32)),
},
{
"testcase_name": "2d_extract_static",
"shape": (64, 64),
"indexers": (4, slice(0, 64)),
},
{
"testcase_name": "2d_extract_dyn",
"shape": (64, 64),
"indexers": lambda in_dev: (
pl.program_id(0) + 4 if in_dev else jnp.array(4),
slice(0, 64),
),
},
)
def test_copy_gmem_to_smem_with_multiple_gmem_indexers(self, shape, indexers):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct(shape, jnp.float32),
in_specs=(pl.BlockSpec(memory_space=plgpu.GMEM),),
scratch_shapes=[
plgpu.SMEM(shape, jnp.float32),
plgpu.Barrier(),
],
grid=(1,),
)
def kernel(x_ref_gmem, o_ref, scratch_ref, barrier_ref):
scratch_ref_sliced = scratch_ref
for indexer in indexers(True) if callable(indexers) else indexers:
scratch_ref_sliced = scratch_ref_sliced.at[indexer]
x_ref_gmem = x_ref_gmem.at[indexer]
plgpu.copy_gmem_to_smem(
x_ref_gmem, scratch_ref_sliced, barrier_ref
)
plgpu.barrier_wait(barrier_ref)
o_ref[...] = scratch_ref[...] + 1
x = jnp.arange(np.prod(shape)).astype(jnp.float32).reshape(*shape)
result = kernel(x)
ref = x + 1.0
for indexer in indexers(False) if callable(indexers) else indexers:
result = result[indexer]
ref = ref[indexer]
np.testing.assert_array_equal(result, ref)
def test_gmem_to_smem_with_multiple_smem_indexers(self):
x = jax.random.uniform(jax.random.key(0), (2, 64, 64), dtype=jnp.float32)
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([64, 64], jnp.float32),
in_specs=(pl.BlockSpec(memory_space=plgpu.GMEM),),
scratch_shapes=[
plgpu.SMEM(x.shape, jnp.float32),
plgpu.Barrier(),
],
)
def extract_x0(x_ref_gmem, o_ref, scratch_ref, barrier_ref):
plgpu.copy_gmem_to_smem(x_ref_gmem, scratch_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
x_sliced = scratch_ref.at[0, :, :] # shape=(64, 64)
o_ref[pl.ds(0, 32), :] = x_sliced[pl.ds(0, 32), :]
o_ref[pl.ds(32, 32), :] = x_sliced[pl.ds(32, 32), :]
np.testing.assert_array_equal(extract_x0(x), x[0])
def test_gmem_to_smem_with_multiple_smem_indexers_and_transforms(self):
transforms = self.default_transforms(dtype=jnp.int32)
x = jnp.arange(512 * 512, dtype=jnp.int32).reshape(512, 512)
@functools.partial(
self.pallas_call,
grid=(4, 4),
out_shape=jax.ShapeDtypeStruct((256, 128), jnp.int32),
in_specs=(
plgpu.BlockSpec(
block_shape=(128, 128),
index_map=lambda i, j: (i, j),
memory_space=plgpu.SMEM,
transforms=transforms,
),
),
out_specs=(
plgpu.BlockSpec(
block_shape=(64, 32),
index_map=lambda i, j: (i, j),
memory_space=plgpu.SMEM,
)
),
)
def kernel(x_ref, o_ref):
x_sliced = x_ref.at[0:64, 32:96].at[:, 0:32] # get x_ref[0:64, 32:64]
o_ref[...] = x_sliced[...]
ref = jnp.concatenate([x[blk:blk+64, :] for blk in range(0, 512, 128)])
ref = jnp.concatenate(
[ref[:, blk+32:blk+64] for blk in range(0, 512, 128)], axis=1)
np.testing.assert_array_equal(kernel(x), ref)
@parameterized.product(indexer=[0, 1, 2, 3])
def test_copy_gmem_to_smem_with_indexed_barrier(self, indexer):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([128], jnp.float32),
in_specs=(pl.BlockSpec(memory_space=plgpu.GMEM),),
scratch_shapes=[
plgpu.SMEM((128,), jnp.float32),
plgpu.Barrier(num_barriers=4),
],
)
def kernel(x_ref_gmem, o_ref, scratch_ref, barrier_ref):
plgpu.copy_gmem_to_smem(
x_ref_gmem, scratch_ref, barrier_ref.at[indexer]
)
plgpu.barrier_wait(barrier_ref.at[indexer])
o_ref[...] = scratch_ref[...] + 1
x = jnp.arange(128).astype(jnp.float32)
np.testing.assert_array_equal(kernel(x), x + 1.0)
@parameterized.named_parameters(("_g2s", False), ("_s2g", True))
def test_copy_with_transforms(self, to_smem):
transforms = self.default_transforms(dtype=jnp.float32)
def kernel(x_ref, o_ref, barrier_ref):
if to_smem:
plgpu.copy_gmem_to_smem(x_ref, o_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
else:
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(x_ref, o_ref)
plgpu.wait_smem_to_gmem(0)
in_spec = pl.BlockSpec(memory_space=plgpu.GMEM)
out_spec = plgpu.BlockSpec(
transforms=transforms,
memory_space=plgpu.SMEM,
)
if not to_smem:
in_spec, out_spec = out_spec, in_spec
f = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct([128, 128], jnp.float32),
in_specs=(in_spec,),
out_specs=out_spec,
scratch_shapes=[plgpu.Barrier()],
)
x = jnp.arange(128 * 128, dtype=jnp.float32).reshape(128, 128)
np.testing.assert_array_equal(f(x), x)
def test_scoped_copy_with_transforms(self):
ts = self.default_transforms(dtype=jnp.float32)
def kernel(x_ref, o_ref, barrier_ref):
def body(tmp_ref):
plgpu.copy_gmem_to_smem(x_ref, tmp_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
o_ref[...] = tmp_ref[...] * 2
pl.run_scoped(body, plgpu.SMEM((128, 64), jnp.float32, transforms=ts))
in_spec = pl.BlockSpec(memory_space=plgpu.GMEM)
out_spec = plgpu.BlockSpec(transforms=ts, memory_space=plgpu.SMEM)
f = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct([128, 64], jnp.float32),
in_specs=(in_spec,),
out_specs=out_spec,
scratch_shapes=[plgpu.Barrier()],
)
x = jnp.arange(128 * 64, dtype=jnp.float32).reshape(128, 64)
np.testing.assert_array_equal(f(x), x * 2)
@jtu.skip_if_mosaic_gpu_exceeds_shared_memory(device_patterns="RTX PRO 6000 Blackwell")
def test_scoped_copy_with_user_transforms(self):
self.skip_if_wg_semantics()
def kernel(x_ref, o_ref, barrier_ref):
def body(tmp_ref):
tmp_ref = plgpu.unswizzle_ref(tmp_ref, 128)
tmp_ref = plgpu.untile_ref(tmp_ref, (8, 32))
plgpu.copy_gmem_to_smem(x_ref, tmp_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
o_ref[...] = tmp_ref[...] * 2
pl.run_scoped(body, plgpu.SMEM((8, 4, 8, 32), jnp.float32))
in_spec = pl.BlockSpec(memory_space=plgpu.GMEM)
f = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct([64, 128], jnp.float32),
in_specs=(in_spec,),
scratch_shapes=[plgpu.Barrier()],
)
x = jnp.arange(64 * 128, dtype=jnp.float32).reshape(64, 128)
np.testing.assert_array_equal(f(x), x * 2)
def test_copy_with_transforms_and_indexing(self):
self.skip_if_wg_semantics()
def kernel(x_ref, o_ref, barrier_ref):
for i in range(2):
plgpu.copy_gmem_to_smem(x_ref, o_ref.at[i], barrier_ref)
plgpu.barrier_wait(barrier_ref)
in_spec = pl.BlockSpec(memory_space=plgpu.GMEM)
out_spec = plgpu.BlockSpec(
transforms=(
plgpu.TilingTransform((8, 32)),
plgpu.TransposeTransform((0, 2, 1, 3, 4)),
plgpu.SwizzleTransform(128),
),
memory_space=plgpu.SMEM,
)
f = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct([2, 64, 128], jnp.float32),
in_specs=(in_spec,),
out_specs=out_spec,
scratch_shapes=[plgpu.Barrier()],
)
x = jnp.arange(64 * 128, dtype=jnp.float32).reshape(64, 128)
np.testing.assert_array_equal(f(x), np.stack([x, x], axis=0))
@parameterized.parameters(
((),),
((plgpu.TilingTransform((8, 32)), plgpu.SwizzleTransform(128)),),
(
(
plgpu.TilingTransform((8, 32)),
plgpu.TransposeTransform((1, 0, 2, 3)),
plgpu.SwizzleTransform(128),
),
),
)
def test_copy_gmem_to_smem_gather(self, transforms):
if not jtu.is_cuda_compute_capability_at_least("10.0"):
self.skipTest("Only works on a GPU with capability >= sm100")
self.skip_if_wg_semantics()
dtype = jnp.int32
out_shape = (64, 128)
shape = (128, 64 + out_shape[-1])
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct(out_shape, dtype),
out_specs=plgpu.BlockSpec(memory_space=plgpu.SMEM, transforms=transforms),
in_specs=(
pl.BlockSpec(memory_space=plgpu.GMEM),
pl.BlockSpec(memory_space=plgpu.SMEM),
),
scratch_shapes=[plgpu.Barrier()],
)
def kernel(x_ref_gmem, idx_ref, o_ref, barrier_ref):
idxs = plgpu.load(idx_ref, (), layout=plgpu.Layout.TMA_GATHER_INDICES)
plgpu.copy_gmem_to_smem(x_ref_gmem.at[idxs, 64:], o_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
x = jnp.arange(math.prod(shape)).reshape(shape).astype(dtype)
idx = jax.random.permutation(jax.random.key(1234), out_shape[0]).astype(jnp.uint32)
np.testing.assert_array_equal(kernel(x, idx), x[idx, 64:])
@parameterized.parameters(
(plgpu.Layout.WGMMA, plgpu.Layout.WGMMA_TRANSPOSED),
(plgpu.Layout.WGMMA_TRANSPOSED, plgpu.Layout.WGMMA),
)
def test_transposed_load_store(self, src_layout, dst_layout):
def is_transposed(layout):
return layout == plgpu.Layout.WGMMA_TRANSPOSED
if (
self.LOWERING_SEMANTICS == mgpu.LoweringSemantics.Lane
and is_transposed(dst_layout)
):
self.skipTest("Not implemented: transposed, not tiled")
shape, dtype = (128, 128), jnp.float32
@functools.partial(
self.kernel,
out_shape=jax.ShapeDtypeStruct(shape, dtype),
)
def kernel(src_ref, dst_ref):
if is_transposed(src_layout):
src_ref = src_ref.T
if is_transposed(dst_layout):
dst_ref = dst_ref.T
src = plgpu.load(src_ref, (), layout=src_layout, optimized=False)
dst = plgpu.layout_cast(src, dst_layout)
dst_ref[...] = dst
x = jnp.arange(math.prod(shape), dtype=dtype).reshape(shape)
np.testing.assert_array_equal(kernel(x), x.T)
@parameterized.product(
src_memory_space=[plgpu.SMEM, plgpu.GMEM],
layout=[plgpu.Layout.WG_STRIDED((128,), vec_size=1), None,
]
)
def test_load_to_strided_layout_with_indexing(self, src_memory_space, layout):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([2, 128], jnp.float32),
in_specs=[pl.BlockSpec(memory_space=src_memory_space)],
out_specs=plgpu.BlockSpec(memory_space=plgpu.SMEM),
)
def kernel(x_ref, o_ref):
for i in range(2):
x = plgpu.load(x_ref, (i,), layout=layout)
o_ref[i, ...] = x
x = jnp.arange(2 * 128, dtype=jnp.float32).reshape(2, 128)
np.testing.assert_array_equal(kernel(x), x)
def test_indexing_before_transpose(self):
self.skip_if_wg_semantics()
def kernel(x_ref, o_ref, barrier_ref):
for i in range(2):
plgpu.copy_gmem_to_smem(
x_ref, plgpu.transpose_ref(o_ref.at[i], (1, 0, 2)), barrier_ref
)
plgpu.barrier_wait(barrier_ref)
in_spec = pl.BlockSpec(memory_space=plgpu.GMEM)
out_spec = plgpu.BlockSpec(memory_space=plgpu.SMEM)
f = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct([2, 32, 2, 128], jnp.float32),
in_specs=(in_spec,),
out_specs=out_spec,
scratch_shapes=[plgpu.Barrier()],
)
x = jnp.arange(2 * 32 * 128, dtype=jnp.float32).reshape(2, 32, 128)
xt = x.transpose((1, 0, 2))
np.testing.assert_array_equal(f(x), np.stack([xt, xt], axis=0))
def test_copy_gmem_to_smem_in_run_scoped(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.float32),
in_specs=(pl.BlockSpec(memory_space=plgpu.GMEM),),
)
def kernel(x_ref_gmem, o_ref):
def body(barrier_ref):
def inner_body(scratch_ref):
plgpu.copy_gmem_to_smem(x_ref_gmem, scratch_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
o_ref[...] = scratch_ref[...] + 1
pl.run_scoped(inner_body, plgpu.SMEM((256,), jnp.float32))
pl.run_scoped(body, plgpu.Barrier())
x = jnp.arange(256).astype(jnp.float32)
np.testing.assert_array_equal(kernel(x), x + 1.0)
def test_add_doubled_sum(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([128], jnp.float32),
)
def kernel(x_ref, o_ref):
o_ref[...] = x_ref[...] + jnp.sum(x_ref[...]) + jnp.sum(x_ref[...])
x = jnp.arange(128).astype(jnp.float32)
np.testing.assert_array_equal(kernel(x), x + x.sum()*2)
@parameterized.product(input_factor=[0.001, 1, 10, 100, 100])
def test_layer_norm(self, input_factor):
eps = 1e-5
gamma = 1.0
beta = 1.0
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.float32),
)
def layer_norm(x_ref, o_ref):
x_mean = jnp.mean(x_ref[...])
x_centered = x_ref[...] - x_mean
o_ref[...] = (
x_centered * jax.lax.rsqrt(jnp.mean(x_centered**2) + eps) * gamma
+ beta
)
def layer_norm_np(x):
x_mean = np.mean(x)
x_centered = x - x_mean
return (x_centered / np.sqrt(np.mean(x_centered**2) + eps) * gamma) + beta
# Ones are always fully precise
x = jnp.ones((256,)).astype(jnp.float32) * input_factor
np.testing.assert_allclose(layer_norm(x), layer_norm_np(x))
# random (and anything else is not)
x = (
jax.random.uniform(jax.random.key(42), shape=(256,), dtype=jnp.float32)
* input_factor
)
np.testing.assert_allclose(layer_norm(x), layer_norm_np(x), rtol=5e-5)
def test_print(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.float32),
)
def kernel(x_ref, o_ref):
del x_ref, o_ref
pl.debug_print("It works!")
x = jnp.arange(256).astype(jnp.float32)
with self.capture_stdout() as output:
jax.block_until_ready(kernel(x))
self.assertEqual(output(), "It works!\n")
def test_print_wgmma_tiled_layout(self):
shape = (128, 64)
size = math.prod(shape)
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct(shape, jnp.float32),
in_specs=[
plgpu.BlockSpec(
transforms= self.default_transforms(dtype=jnp.float32),
)
],
)
def kernel(x_ref, o_ref):
del o_ref # Unused.
pl.debug_print("prefix {}", x_ref[...])
x = jnp.arange(size, dtype=jnp.float32).reshape(shape)
with self.capture_stdout() as get_output:
jax.block_until_ready(kernel(x))
output = get_output()
results = re.findall(r"prefix \[(\d+), (\d+)\]: (\d+).?\d*", output)
self.assertLen(results, size, output)
for i, j, v in results:
i, j, v = map(int, (i, j, v))
self.assertEqual(v, i * shape[1] + j)
def test_print_scalar(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.int32),
)
def kernel(x_ref, o_ref):
del o_ref
pl.debug_print("x.sum() = {}", _sum_same_dtype(x_ref[...]))
x = jnp.arange(256, dtype=jnp.int32)
with self.capture_stdout() as output:
jax.block_until_ready(kernel(x))
self.assertIn(f"x.sum() = {x.sum()}", output())
def test_print_scalar_array(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.int32),
)
def kernel(x_ref, o_ref):
del o_ref
pl.debug_print("x.sum() = {}", _sum_same_dtype(x_ref[...]) + 1)
x = jnp.arange(256, dtype=jnp.int32)
with self.capture_stdout() as output:
jax.block_until_ready(kernel(x))
self.assertIn(f"x.sum() = {x.sum() + 1}", output())
def test_print_array(self):
in_shape = [2, 1, 64, 64]
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct(in_shape, jnp.int32),
)
def kernel(x_ref, o_ref):
del o_ref
pl.debug_print("x: {}", x_ref[...])
x = jnp.arange(math.prod(in_shape), dtype=jnp.int32).reshape(in_shape)
with self.capture_stdout() as output:
jax.block_until_ready(kernel(x))
self.assertIn("x: [1, 0, 43, 23]: 6871\n", output())
def test_print_layout(self):
shape = (128,)
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct(shape, jnp.bfloat16),
)
def kernel(x_ref, o_ref):
del o_ref
x = plgpu.layout_cast(x_ref[...], plgpu.Layout.WGMMA_ROW)
plgpu.print_layout("x: {}", x)
x = jnp.arange(math.prod(shape), dtype=jnp.bfloat16).reshape(shape)
with self.capture_stdout() as output:
jax.block_until_ready(kernel(x))
self.assertIn("x: WGMMA_ROW\n", output())
@parameterized.parameters(
(plgpu.TilingTransform((1, 32)), plgpu.SwizzleTransform(128)),
(plgpu.TilingTransform((8, 32)), plgpu.SwizzleTransform(128)),
(),
)
def test_get_swap_with_transforms(self, *transforms):
self.skip_if_wg_semantics()
shape = (128, 128)
@functools.partial(
self.pallas_call,
in_specs=[plgpu.BlockSpec(memory_space=plgpu.GMEM)],
out_specs=plgpu.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct(shape, jnp.int32),
scratch_shapes=[
plgpu.SMEM(shape, jnp.int32, transforms=tuple(transforms)),
plgpu.Barrier(),
]
)
def kernel(x_ref, o_ref, scratch_ref, barrier_ref):
plgpu.copy_gmem_to_smem(x_ref, scratch_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
scratch_ref[...] = scratch_ref[...] * 2
plgpu.copy_smem_to_gmem(scratch_ref, o_ref)
plgpu.wait_smem_to_gmem(0)
x = jnp.arange(math.prod(shape), dtype=jnp.int32).reshape(shape)
np.testing.assert_array_equal(kernel(x), x * 2)
def test_check(self):
self.skip_if_wg_semantics()
self.enter_context(pl.enable_debug_checks(True))
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.int32),
)
def kernel(x_ref, o_ref):
pl.debug_check(_sum_same_dtype(x_ref[...]) > 0, "x.sum() is negative")
o_ref[...] = x_ref[...]
x = jnp.arange(256, dtype=jnp.int32)
np.testing.assert_array_equal(kernel(x), x)
def test_load_scalar(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((128,), jnp.int32),
in_specs=[plgpu.BlockSpec(memory_space=plgpu.GMEM)],
)
def kernel(x_ref, o_ref):
o_ref[...] = jnp.broadcast_to(x_ref[10], (128,))
np.testing.assert_array_equal(kernel(jnp.arange(11, dtype=jnp.int32)),
jnp.full((128,), 10, dtype=jnp.int32))
def test_run_scoped(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
)
def kernel(x_ref, o_ref):
def body(tmp_ref):
self.assertEqual(tmp_ref.shape, (8, 128))
tmp_ref[...] = x_ref[...] + 1.0
return tmp_ref[...]
tmp = pl.run_scoped(body, plgpu.SMEM((8, 128), jnp.float32))
self.assertEqual(tmp.shape, (8, 128))
o_ref[...] = tmp
x = np.ones((8, 128), jnp.float32)
np.testing.assert_array_equal(kernel(x), x + 1.0)
def test_run_scoped_in_cond(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.int32),
in_specs=[pl.BlockSpec(memory_space=plgpu.GMEM)],
out_specs=pl.BlockSpec(memory_space=plgpu.SMEM),
)
def kernel(x_ref_gmem, o_ref):
def scoped_kernel(barrier_ref):
plgpu.copy_gmem_to_smem(x_ref_gmem, o_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
def branch():
pl.run_scoped(scoped_kernel, plgpu.Barrier())
jax.lax.cond(x_ref_gmem[0] % 2 == 0, branch, branch)
x = jnp.full((256,), 1234, dtype=jnp.int32)
np.testing.assert_array_equal(kernel(x), x)
def test_program_id(self):
@functools.partial(
self.pallas_call,
in_specs=(),
out_specs=pl.BlockSpec((128,), lambda *i: i),
out_shape=jax.ShapeDtypeStruct([128 * 2], jnp.int32),
grid=2,
)
def kernel(o_ref):
o_ref[...] = jnp.full(o_ref.shape, pl.program_id(0))
np.testing.assert_array_equal(
kernel(),
jnp.array([0] * 128 + [1] * 128, dtype=jnp.int32),
)
def test_program_id_in_squashed_grid(self):
# Tests whether a grid with >3 logical dimensions is correctly squashed to
# 3 CUDA grid dimensions.
grid = (2, 3, 4, 5)
@functools.partial(
self.pallas_call,
in_specs=(),
out_specs=pl.BlockSpec((1,) * len(grid) + (128,), lambda *i: (*i, 0)),
out_shape=jax.ShapeDtypeStruct([*grid, 128], jnp.int32),
grid=grid,
)
def kernel(o_ref):
mult = 1
idx = 0
for axis in range(len(grid)-1, -1, -1):
idx += pl.program_id(axis) * mult
mult *= pl.num_programs(axis)
o_ref[...] = jnp.full(o_ref.shape, idx)
np.testing.assert_array_equal(
kernel()[:, :, :, :, 0],
jnp.arange(math.prod(grid), dtype=jnp.int32).reshape(*grid)
)
@parameterized.parameters(
((2, 3), ("a", "b"), (), ()),
((2, 3), ("a", "b"), (2,), ("x",)),
((2, 3, 4), ("a", "b", "c"), (), ()),
((2, 3, 4), ("a", "b", "c"), (2,), ("x",)),
((2, 3, 4), ("a", "b", "c"), (2, 3), ("x", "y")),
((2, 3, 4, 5), ("a", "b", "c", "d"), (), ()),
((2, 3, 4, 5), ("a", "b", "c", "d"), (2,), ("x",)),
)
def test_axis_indices_in_grid(self, grid, grid_names, cluster, cluster_names):
@functools.partial(
self.kernel,
out_shape=[
jax.ShapeDtypeStruct([*cluster, *grid, 128], jnp.int32),
jax.ShapeDtypeStruct([*cluster, *grid, 128], jnp.int32)
],
grid=grid,
grid_names=grid_names,
cluster=cluster,
cluster_names=cluster_names,
)
def kernel(out1_ref, out2_ref):
pallas_grid_idx = lax.axis_index(grid_names)
cuda_grid_idx = _get_linearized_cuda_grid_index()
out_indices = [lax.axis_index(ax) for ax in (*cluster_names, *grid_names)]
out1_ref[*out_indices] = jnp.full((128,), pallas_grid_idx)
out2_ref[*out_indices] = jnp.full((128,), cuda_grid_idx)
out1, out2 = kernel()
out_per_cta = jnp.arange(math.prod(grid), dtype=jnp.int32).reshape(grid)
out1_ref = jnp.broadcast_to(out_per_cta[..., None], (*cluster, *grid, 128))
np.testing.assert_array_equal(out1, out1_ref)
padded_cluster = (1,) * (len(grid) - len(cluster)) + cluster
scaled_grid = tuple(g * c for g, c in zip(grid, padded_cluster))
original = jnp.arange(math.prod(scaled_grid), dtype=jnp.int32).reshape(
scaled_grid
)
# Untile the scaled grid to get the per-cluster grid.
interleaved_shape = tuple(val for pair in zip(grid, padded_cluster) for val in pair)
perm = tuple(range(1, 2 * len(grid), 2)) + tuple(range(0, 2 * len(grid), 2))
out2_ref = original.reshape(interleaved_shape).transpose(perm).squeeze()
out2_ref = jnp.broadcast_to(out2_ref[..., None], out2_ref.shape + (128,))
np.testing.assert_array_equal(out2, out2_ref)
def test_program_id_in_block_spec(self):
@functools.partial(
self.pallas_call,
in_specs=(pl.BlockSpec((2, 128), lambda i: (pl.program_id(0), i)),),
out_specs=pl.BlockSpec((2, 128), lambda i: (pl.program_id(0), i)),
out_shape=jax.ShapeDtypeStruct([2, 128], jnp.int32),
grid=2,
)
def kernel(x_ref, o_ref):
o_ref[...] = x_ref[...]
x = jnp.arange(2 * 128, dtype=jnp.int32).reshape([2, 128])
np.testing.assert_array_equal(kernel(x), x)
def test_num_programs(self):
@functools.partial(
self.pallas_call,
in_specs=(),
out_specs=pl.BlockSpec((128,), lambda *i: i),
out_shape=jax.ShapeDtypeStruct([128 * 2], jnp.int32),
grid=2,
)
def kernel(o_ref):
o_ref[...] = jnp.full(o_ref.shape, pl.num_programs(0), o_ref.dtype)
np.testing.assert_array_equal(
kernel(),
jnp.full([256], 2, dtype=jnp.int32),
)
def test_swizzled_blockspec_shapes(self):
spec = plgpu.BlockSpec(
(128, 64),
lambda *i: i,
transforms=self.default_transforms(dtype=jnp.float16),
)
@functools.partial(
self.pallas_call,
in_specs=[spec],
out_specs=spec,
out_shape=jax.ShapeDtypeStruct((128, 128), jnp.float16),
grid=(2, 2),
)
def kernel(x_ref, o_ref):
assert x_ref.shape == (128, 64), x_ref.shape
o_ref[...] = x_ref[...]
x = jnp.arange(128 * 128).astype(jnp.float16).reshape(128, 128)
np.testing.assert_array_equal(kernel(x), x)
@parameterized.product(force_while=[False, True])
def test_fori_loop_array(self, force_while):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct([256], jnp.int32)
)
def kernel(x_ref, o_ref):
# Equivalent to x_ref[...] + 2 + 3.
o_ref[...] = _fori_loop(
force_while, 2, 4, lambda i, x: x + i, x_ref[...]
)
x = jnp.arange(256, dtype=jnp.int32)
np.testing.assert_array_equal(kernel(x), x + 2 + 3)
@parameterized.product(unroll=[1, 2, 4])
def test_fori_loop_array_unrolled(self, unroll):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct([256], jnp.int32)
)
def kernel(x_ref, o_ref):
# Equivalent to x_ref[...] + 2 + 3 + 4 + 5.
o_ref[...] = lax.fori_loop(
2, 6, lambda i, x: x + i, x_ref[...], unroll=unroll
)
x = jnp.arange(256, dtype=jnp.int32)
np.testing.assert_array_equal(kernel(x), x + 2 + 3 + 4 + 5)
@parameterized.product(force_while=[False, True])
def test_fori_loop_scalar(self, force_while):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct([256], jnp.int32)
)
def kernel(o_ref):
# Equivalent to 2 + 3.
o_ref[...] = jax.lax.broadcast(
_fori_loop(force_while, 2, 4, lambda i, x: x + i, jnp.int32(0)),
o_ref.shape,
)
np.testing.assert_array_equal(kernel(), jnp.full([256], 5, jnp.int32))
def test_fori_loop_dynamic_bounds(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.int32),
grid=(1,)
)
def kernel(o_ref):
zero = pl.program_id(0)
# Equivalent to 2 + 3.
o_ref[...] = jax.lax.broadcast(
jax.lax.fori_loop(2 + zero, 4 + zero, lambda i, x: x + i, 0), o_ref.shape
)
np.testing.assert_array_equal(kernel(), jnp.full([256], 5, dtype=jnp.int32))
@parameterized.product(force_while=[False, True])
def test_fori_loop_tuple(self, force_while):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct([256], jnp.int32)
)
def kernel(o_ref):
def body(step, xs):
return tuple(
jax.lax.cond(step % 2 == 0, lambda x: x + 1, lambda x: x, x)
for x in xs
)
# Equivalent to 3 * (0 + 1).
o_ref[...] = jax.lax.broadcast(
sum(_fori_loop(force_while, 2, 4, body, (jnp.int32(0),) * 3)),
o_ref.shape,
)
np.testing.assert_array_equal(
kernel(), jnp.full([256], 3 * (0 + 1), jnp.int32)
)
@parameterized.product(force_while=[False, True])
def test_fori_loop_indexed_store(self, force_while):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([4, 128], jnp.float32),
)
def kernel(x_ref, y_ref, o_ref):
def body(idx, _):
o_ref[idx] = x_ref[idx] + y_ref[idx]
return ()
_fori_loop(force_while, 0, 4, body, ())
x = jnp.arange(4 * 128).reshape(4, 128).astype(jnp.float32)
y = x + 1
np.testing.assert_array_equal(kernel(x, y), x + y)
def test_while_loop(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct([128], jnp.int32)
)
def kernel(x_ref, o_ref):
o_ref[...] = jnp.zeros(o_ref.shape, dtype=jnp.int32)
def cond(acc):
_, last_o = acc
return _sum_same_dtype(last_o) < 128*10
def body(acc):
i, _ = acc
o_ref[...] += x_ref[i]
return i+1, o_ref[...]
_ = jax.lax.while_loop(cond, body, (0, o_ref[...]))
np.testing.assert_array_equal(
kernel(jnp.ones([128, 128], jnp.int32)), jnp.full([128], 10, jnp.int32)
)
def test_while_loop_layout_mismatch(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct([128], jnp.int32)
)
def kernel(o_ref):
def cond(acc):
return _sum_same_dtype(acc) < 128
def body(acc):
del acc # Unused.
o_ref[...] = o_ref[...] # side-effect to prevent DCE
# We deliberately do a cast here to trigger a layout mismatch.
return plgpu.layout_cast(
jnp.zeros(o_ref.shape, o_ref.dtype), plgpu.Layout.WGMMA_ROW
)
# Cast explicitly to cause the mismatch, otherwise layout inference will
# succeed at constructing a working program.
strided_input = plgpu.layout_cast(
o_ref[...], plgpu.Layout.WG_STRIDED(shape=(128,), vec_size=1)
)
_ = jax.lax.while_loop(cond, body, strided_input)
if self.LOWERING_SEMANTICS == plgpu.LoweringSemantics.Warpgroup:
with self.assertRaisesRegex(
ValueError, "Failed to infer a possible set of layouts",
):
kernel()
else:
with self.assertRaisesRegex(
ValueError, "has layout .*, when it should be"
):
kernel()
def test_cond(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct([256], jnp.int32)
)
def kernel(x_ref, o_ref):
jax.lax.cond(
x_ref[0] % 2 == 0,
lambda: pl.debug_print("acc % 2"),
lambda: pl.debug_print("acc"),
)
o_ref[...] = jnp.broadcast_to(jnp.asarray(0, dtype=o_ref.dtype), o_ref.shape)
x = jnp.full((256,), 1234, dtype=jnp.int32)
with self.capture_stdout() as output:
jax.block_until_ready(kernel(x))
self.assertIn("acc % 2", output())
def test_cond_returning_array(self):
@functools.partial(
self.pallas_call, out_shape=jax.ShapeDtypeStruct([256], jnp.int32)
)
def kernel(x_ref, o_ref):
acc_sum = _sum_same_dtype(x_ref[...])
acc2, acc = jax.lax.cond(
acc_sum % 2 == 0,
lambda: (acc_sum * 2, x_ref[...]),
lambda: (acc_sum, x_ref[...]),
)
o_ref[...] = jnp.broadcast_to(_sum_same_dtype(acc) + acc2, o_ref.shape)
x = jnp.arange(256, dtype=jnp.int32)
np.testing.assert_array_equal(kernel(x), jnp.broadcast_to(jnp.sum(x) * 3, [256]))
def test_tile_slicing(self):
# Not testing with warpgroup semantics, because we want to enforce a layout.
self.skip_if_wg_semantics()
shape = (256, 128)
block_spec = plgpu.BlockSpec(
transforms=self.default_transforms(dtype=jnp.uint16)
)
@functools.partial(
self.pallas_call,
in_specs=[block_spec],
out_specs=block_spec,
out_shape=jax.ShapeDtypeStruct((64, 64), jnp.uint16),
)
def kernel(x_ref, o_ref):
def sum_tiles(row, acc):
row_slice = pl.ds(row * 64, 64)
for col in range(128 // 64):
acc += x_ref[row_slice, pl.ds(col * 64, 64)]
return acc
acc = plgpu.layout_cast(jnp.zeros((64, 64), jnp.uint16), plgpu.Layout.WGMMA)
o_ref[...] = _fori_loop(False, 0, 256 // 64, sum_tiles, acc)
x = jnp.arange(math.prod(shape), dtype=jnp.uint16).reshape(shape)
y = x.reshape(256 // 64, 64, 128 // 64, 64).sum(axis=(0, 2), dtype=jnp.uint16)
np.testing.assert_array_equal(kernel(x), y)
def test_input_output_aliases(self):
# Note that we're writing to the input pointer, which should alias b_ptr.
def kernel(a_ref, b_ref):
del b_ref
a_ref[...] = jnp.ones_like(a_ref)
a = np.zeros((64, 64), dtype=jnp.float32)
b = self.pallas_call(
kernel,
in_specs=[plgpu.BlockSpec(memory_space=plgpu.GMEM)],
out_specs=plgpu.BlockSpec(memory_space=plgpu.GMEM),
input_output_aliases={0: 0},
out_shape=a,
)(a)
np.testing.assert_array_equal(b, np.ones_like(a))
def test_slicing(self):
left = upper = slice(None, 64)
right = lower = slice(64, None)
# We rotate the four quadrants of the input clockwise.
def rotate(src, dst):
dst[upper, left] = src[lower, left]
dst[upper, right] = src[upper, left]
dst[lower, right] = src[upper, right]
dst[lower, left] = src[lower, right]
x = jnp.arange(128 * 128).astype(jnp.float16).reshape(128, 128)
spec = plgpu.BlockSpec(
transforms=self.default_transforms(dtype=jnp.float16)
)
f = self.pallas_call(rotate, out_shape=x, in_specs=[spec], out_specs=spec)
expected = np.empty_like(x)
rotate(x, expected)
np.testing.assert_array_equal(f(x), expected)
def test_layout_cast(self, shape=(256, 64)):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct(shape, jnp.float32),
)
def kernel(o_ref):
o_ref[...] = plgpu.layout_cast(jnp.full(shape, 42.0, jnp.float32), plgpu.Layout.WGMMA)
x = jnp.full(shape, 42.0, jnp.float32)
np.testing.assert_array_equal(kernel(), x)
@parameterized.product(
layouts=[
(plgpu.Layout.WGMMA, plgpu.Layout.WGMMA_TRANSPOSED),
(plgpu.Layout.TCGEN05, plgpu.Layout.TCGEN05_TRANSPOSED),
],
)
def test_transposed_layout(self, layouts):
self.skip_if_wg_semantics() # TiledLayout replication not supported.
layout, transposed_layout = layouts
dtype = jnp.dtype(jnp.float16)
shape = (256, 192)
transforms = self.default_transforms(dtype=dtype)
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct(shape[::-1], dtype),
out_specs=plgpu.BlockSpec(transforms=transforms),
)
def kernel(o_ref):
iota = plgpu.broadcasted_iota(dtype, shape, 0, layout=layout)
iota *= shape[1]
iota += plgpu.broadcasted_iota(dtype, shape, 1, layout=layout)
o_ref_t = plgpu.transpose_ref(o_ref, (1, 0))
o_ref_t[...] = plgpu.layout_cast(iota, transposed_layout)
x = jnp.arange(math.prod(shape), dtype=dtype).reshape(shape).T
np.testing.assert_array_equal(kernel(), x)
def test_profiler(self):
def kernel(x_ref, o_ref):
with jax.named_scope("add"):
with jax.named_scope("load"):
x = x_ref[...]
o = x + x
with jax.named_scope("store"):
o_ref[...] = o
with tempfile.TemporaryDirectory() as tmpdir:
x = jnp.arange(256).astype(jnp.float32)
y = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct([256], jnp.float32),
compiler_params=plgpu.CompilerParams(
profile_space=16, profile_dir=tmpdir
),
)(x)
jax.block_until_ready(y)
jax.effects_barrier()
[name] = os.listdir(tmpdir)
with open(os.path.join(tmpdir, name)) as f:
data = f.read()
self.assertEqual(data.count('"name": "add"'), 2)
self.assertEqual(data.count('"name": "load"'), 2)
self.assertEqual(data.count('"name": "store"'), 2)
np.testing.assert_array_equal(y, x + x)
@parameterized.product(
dtypes=[
(jnp.float16, jnp.float16), # Noop
(jnp.int16, jnp.bfloat16),
(jnp.int16, jnp.float16),
(jnp.uint16, jnp.float16),
(jnp.float32, jnp.int32),
(jnp.float32, jnp.uint32),
(jnp.uint32, jnp.int32),
(jnp.int32, jnp.uint32),
],
)
def test_bitcast_convert_type(self, dtypes):
in_dtype, out_dtype = dtypes
m, n = 16, 8
out_shape = jax.ShapeDtypeStruct((m, n), out_dtype)
@functools.partial(self.pallas_call, out_shape=out_shape)
def convert(x_ref, y_ref):
y_ref[...] = jax.lax.bitcast_convert_type(x_ref[...], out_shape)
x = jnp.arange(m * n, dtype=in_dtype).reshape((m, n))
np.testing.assert_array_equal(
convert(x), jax.lax.bitcast_convert_type(x, out_dtype)
)
def test_optimization_barrier(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((128,), jnp.float32),
)
def kernel(x_ref, o_ref):
o_ref[...] = lax.optimization_barrier(x_ref[...])
x = jax.lax.iota(jnp.float32, 128)
np.testing.assert_array_equal(kernel(x), x)
def test_optimization_barrier_multiple_inputs(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((128,), jnp.float32),
)
def kernel(x_ref, y_ref, o_ref):
x, y = lax.optimization_barrier([x_ref[...], y_ref[...]])
o_ref[...] = x + y
x = jax.lax.iota(jnp.float32, 128)
y = jax.lax.iota(jnp.float32, 128) * 3
np.testing.assert_array_equal(kernel(x, y), x + y)
def test_smem_aliasing_works(self):
self.skip_if_wg_semantics()
in_shape = (2, 256)
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([128], jnp.float32),
in_specs=[pl.BlockSpec(in_shape)],
out_specs=pl.BlockSpec((128,), memory_space=plgpu.GMEM),
scratch_shapes=[
plgpu.RefUnion(
# Note: this test exposes internals that we don't particularly
# want to phold for the sake of testing the functionality of the
# API. It's expected that this test might end up breaking in the
# future, e.g. if we decide to change our alignment requirements
# on SMEM refs---and that's OK. Users should explicitly NOT rely
# on this exact behaviour.
#
# Use a value larger than the number of bytes used for SMEM
# alignment (1024) in order to make sure that the second ref
# in the second group aliases the single ref in the first group.
plgpu.SMEM(in_shape, jnp.float32),
[
plgpu.SMEM((256,), jnp.bfloat16),
# Add an arbitrary level of nesting to make sure that we
# support PyTrees.
[
plgpu.SMEM(
(128,),
jnp.float32,
transforms=(plgpu.TilingTransform((64,)),),
),
]
],
)
],
)
def kernel(x_ref, o_ref128, aliased_ref):
smem_ref256, _, smem_ref128 = aliased_ref
# Ensure that extraction via index works the same as unfolding.
smem_ref128_2 = aliased_ref[2]
self.assertIsInstance(smem_ref128, state_types.TransformedRef)
self.assertIsInstance(smem_ref128_2, state_types.TransformedRef)
self.assertIs(smem_ref128.ref, smem_ref128_2.ref)
self.assertEqual(smem_ref128.transforms, smem_ref128_2.transforms)
extract_alias_transform, tile_transform = smem_ref128.transforms
# Ensure that the transforms provided in the scratch shapes have been
# passed correctly.
self.assertIsInstance(extract_alias_transform, gpu_core.ExtractAliasedRef)
self.assertIsInstance(tile_transform, gpu_core.UntileRef)
smem_ref256[...] = x_ref[...] + 1
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(smem_ref128, o_ref128)
x = jnp.arange(512).astype(jnp.float32)
np.testing.assert_array_equal(
kernel(x.reshape(in_shape)).reshape((128,)), x[256 : 256 + 128] + 1
)
def test_smem_aliasing_works_with_subbyte_dtypes(self):
self.skip_if_wg_semantics()
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.uint4),
in_specs=[pl.BlockSpec((128,))],
out_specs=pl.BlockSpec((256,), memory_space=plgpu.GMEM),
scratch_shapes=[
plgpu.RefUnion(
# Note: this test exposes internals that we don't particularly
# want to phold for the sake of testing the functionality of the
# API. It's expected that this test might end up breaking in the
# future, e.g. if we decide to change our alignment requirements
# on SMEM refs---and that's OK. Users should explicitly NOT rely
# on this exact behaviour.
#
# This allocation scheme is a bit complicated, but serves to
# test that
# 1. Refs are aligned correctly (currently to 1024 bytes);
# 2. (u)int4 references are not allocated more than 1 byte per
# 2 elements.
# The first group of refs serves to create two allocations, each
# aligned to 1024 bytes. The second group serves to create two
# allocations where the first one is exactly 1024 bytes,
# assuming 1 byte per 2 uint4 elements. As a result, if our
# implementation is correct, the second allocation of the second
# group should exactly alias the second allocation of the first
# group.
[
plgpu.SMEM((128,), jnp.int8),
plgpu.SMEM((128,), jnp.int8),
],
[plgpu.SMEM((2048,), jnp.uint4), plgpu.SMEM((256,), jnp.uint4)],
)
],
)
def kernel(x_ref, o_refi4, aliased_ref):
_, smem_refi8, _, smem_refi4 = aliased_ref
smem_refi8[...] = x_ref[...]
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(smem_refi4, o_refi4)
def unpack_i4_as_i8(x):
x = x.reshape((128, 1))
x_high = x >> 4
x_low = x & 0xF
return jnp.concatenate([x_low, x_high], axis=-1).reshape((256,))
x = jnp.arange(128).astype(jnp.int8)
test_as_i8 = jax.lax.convert_element_type(kernel(x), new_dtype=jnp.int8)
np.testing.assert_array_equal(test_as_i8[:256], unpack_i4_as_i8(x))
def test_smem_aliasing_works_for_quantization(self):
self.skip_if_wg_semantics()
shape = (64, 256)
large_ty, small_ty = jnp.bfloat16, jnp.uint4
large_swizzle = plgpu.SwizzleTransform(64 * jnp.finfo(large_ty).bits // 8)
small_swizzle = plgpu.SwizzleTransform(64 * jnp.iinfo(small_ty).bits // 8)
tiling = plgpu.TilingTransform((8, 64))
def kernel(x_gmem, o_gmem):
return pl.run_scoped(
functools.partial(scoped_kernel, x_gmem, o_gmem),
plgpu.RefUnion(
plgpu.SMEM(shape, large_ty, transforms=(tiling, large_swizzle)),
plgpu.SMEM(shape, small_ty, transforms=(tiling, small_swizzle))
),
plgpu.Barrier(num_barriers=1),
)
def scoped_kernel(x_gmem, o_gmem, aliased_ref, barrier):
ref_large_ty, ref_small_ty = aliased_ref
plgpu.copy_gmem_to_smem(x_gmem, ref_small_ty, barrier=barrier)
plgpu.barrier_wait(barrier)
ref_large_ty[...] = ref_small_ty[...].astype(ref_large_ty.dtype) * 3
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(ref_large_ty, o_gmem)
plgpu.wait_smem_to_gmem(0, wait_read_only=True)
kernel_fn = self.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=plgpu.GMEM)],
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct(shape, large_ty),
grid=(1, 1),
)
key = jax.random.key(42)
x = jax.random.randint(key, shape, 0, 4).astype(small_ty)
expected = x * 3
np.testing.assert_array_equal(kernel_fn(x), expected)
def test_assigning_to_ref_union_raises(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([128], jnp.float32),
in_specs=[pl.BlockSpec((128,))],
out_specs=pl.BlockSpec((128,), memory_space=plgpu.GMEM),
scratch_shapes=[plgpu.RefUnion(plgpu.SMEM((128,), jnp.float32))],
)
def kernel(x_ref, o_ref128, aliased_ref):
aliased_ref[...] = x_ref[...] + 1
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(aliased_ref, o_ref128)
with self.assertRaisesRegex(ValueError, "can't be assigned to"):
kernel(jnp.arange(128).astype(jnp.float32))
def test_loading_from_ref_union_works(self):
self.skip_if_wg_semantics() # Transform inference not implemented.
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([128], jnp.float32),
in_specs=[pl.BlockSpec((128,))] * 2,
out_specs=pl.BlockSpec((128,), memory_space=plgpu.GMEM),
scratch_shapes=[plgpu.RefUnion(plgpu.SMEM((128,), jnp.float32)),
plgpu.SMEM((128,), jnp.float32)],
)
def kernel(x_ref, y_ref, o_ref128, ref_union, o_smem):
[aliased_ref] = ref_union
aliased_ref[...] = x_ref[...]
plgpu.commit_smem()
load_ref = lambda r: plgpu.load(r, (), layout=plgpu.Layout.TCGEN05_ROW)
# This is a regression test for b/423697560, where we used to fail to
# transform the dtype correctly when processing an aliased ref.
o_smem[...] = load_ref(aliased_ref) + load_ref(y_ref)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(o_smem, o_ref128)
x, y = (jnp.arange(128).astype(jnp.float32) for _ in range(2))
np.testing.assert_array_equal(kernel(x, y), x + y)
@parameterized.parameters(1, 2, 3)
def test_nd_loop_with_carry(self, sm_steps):
@functools.partial(
self.kernel,
out_shape=(
jax.ShapeDtypeStruct((sm_steps, 132, 128), jnp.int32),
jax.ShapeDtypeStruct((132,), jnp.int32)
),
grid=(132,),
grid_names=("sm",),
)
def kernel(o_ref, steps_ref):
def body(loop_info, carry):
idx = loop_info.index
assert len(idx) == 3
# We need to use `mode="clip"`, because the indices are not static.
flat_idx = jnp.ravel_multi_index(idx, (sm_steps, 4, 33), mode="clip")
sm_step = lax.div(
flat_idx, lax.convert_element_type(lax.axis_size("sm"), jnp.int32)
)
o_ref[sm_step, lax.axis_index("sm")] = lax.broadcast(
flat_idx, o_ref.shape[-1:]
)
return carry + 1
steps_ref[lax.axis_index("sm")] = plgpu.nd_loop(
(sm_steps, 4, 33), collective_axes="sm", init_carry=0
)(body)
result, steps = kernel() # pylint: disable=unpacking-non-sequence
for sm_step in range(sm_steps):
np.testing.assert_array_equal(steps, jnp.full((132,), sm_steps))
np.testing.assert_array_equal(
result[sm_step],
jnp.tile(
(132 * sm_step + jnp.arange(132))[:, None],
128,
),
)
@parameterized.product(
sm_steps=(1, 2, 3),
tiling=(None, 1, 2, 4),
)
def test_nd_loop(self, sm_steps: int, tiling: int | None):
if tiling is not None:
tiling = (sm_steps, tiling, 33)
@functools.partial(
self.kernel,
out_shape=jax.ShapeDtypeStruct((sm_steps, 132, 128), jnp.int32),
grid=(132,),
grid_names=("sm",),
)
def kernel(o_ref):
@plgpu.nd_loop((sm_steps, 4, 33), tiling=tiling, collective_axes="sm")
def _(loop_info):
idx = loop_info.index
assert len(idx) == 3
# We need to use `mode="clip"`, because the indices are not static.
grid = (sm_steps, 4, 33)
if tiling:
# Reconstruct the tiled grid and index.
tiled_grid = tuple(g // t for g, t in zip(grid, tiling))
grid = tiled_grid + tiling
tile_idx = tuple(
lax.div(idx, jnp.int32(t)) for idx, t in zip(idx, tiling))
subtile_idx = tuple(
lax.rem(idx, jnp.int32(t)) for idx, t in zip(idx, tiling))
idx = tile_idx + subtile_idx
flat_idx = jnp.ravel_multi_index(idx, grid, mode="clip")
sm_step = lax.div(
flat_idx, lax.convert_element_type(lax.axis_size("sm"), jnp.int32)
)
o_ref[sm_step, lax.axis_index("sm")] = lax.broadcast(
flat_idx, o_ref.shape[-1:]
)
result = kernel()
for sm_step in range(sm_steps):
np.testing.assert_array_equal(
result[sm_step],
jnp.tile((132 * sm_step + jnp.arange(132))[:, None], 128),
)
def test_lowering_error_context(self):
def body(x_ref, y_ref, barrier):
plgpu.copy_gmem_to_smem(x_ref, y_ref, barrier)
plgpu.barrier_wait(barrier)
x = jnp.arange(127, dtype=jnp.int4) # Size is not a multiple of bytes
offending_line = "plgpu.copy_gmem_to_smem(x_ref, y_ref, barrier)"
try:
self.pallas_call(
body,
in_specs=[pl.BlockSpec(memory_space=plgpu.GMEM)],
out_specs=pl.BlockSpec(memory_space=plgpu.SMEM),
out_shape=x,
scratch_shapes=[plgpu.Barrier()],
)(x)
except:
# assertRaisesRegex raises does not let us match the traceback.
self.assertIn(offending_line, traceback.format_exc())
else:
self.fail("Should have raised an exception")
def test_lower_with_abstract_mesh(self):
def kernel(y_ref, sem):
plgpu.semaphore_signal_multicast(sem, collective_axes='x')
# Wait for the multicast signal (each device gets signaled by all devices)
pl.semaphore_wait(sem, 2) # Wait for signals from both devices
y_ref[...] = jnp.ones_like(y_ref)
kernel_jax = pl.pallas_call(
kernel,
out_specs=pl.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
scratch_shapes=[plgpu.SemaphoreType.REGULAR],
)
abstract_mesh = jax.sharding.AbstractMesh((2,), ('x',))
jax.jit(jax.shard_map(
kernel_jax, mesh=abstract_mesh, in_specs=(),
out_specs=jax.P(), check_vma=False)).trace().lower(
lowering_platforms=('gpu',)) # doesn't crash
@parameterized.named_parameters(
(
f"_{''.join(map(str, collective_dims))}={collective_size}{'_' + ''.join(map(str, noncollective_dims)) if noncollective_dims else ''}",
collective_dims,
noncollective_dims,
collective_size,
)
for collective_dims in itertools.chain.from_iterable(
itertools.combinations("xyz", n) for n in range(1, 4)
)
for noncollective_dims in itertools.chain.from_iterable(
itertools.combinations("xyz", n) for n in range(3)
)
for collective_size in (1, 2, 4)
if all(d not in noncollective_dims for d in collective_dims)
)
def test_tma_load_multicast(self, collective_dims, noncollective_dims, collective_dim_size):
"""
1. Broadcast a GMEM slice to SMEM across collective CTAs.
2. Send a SMEM slice from each collective CTA to reconstruct the GMEM slice.
It's not strictly necessary to use every collective CTA, but we use them
to test that the cluster axes are used correctly.
"""
self.skip_if_wg_semantics() # User transforms are not supported.
dtype = jnp.float16
cluster = [1, 1, 1]
for d in collective_dims:
cluster["xyz".index(d)] = collective_dim_size
for d in noncollective_dims:
cluster["xyz".index(d)] = 2
if math.prod(cluster) > jtu.get_cuda_nonportable_max_cluster_size():
self.skipTest("Cluster is too big.")
collective_size = math.prod(cluster["xyz".index(d)] for d in collective_dims)
noncollective_size = math.prod(cluster) // collective_size
swizzle = 128
swizzle_elems = swizzle // jnp.dtype(dtype).itemsize
transforms = (
plgpu.TilingTransform((8, swizzle_elems)),
plgpu.SwizzleTransform(swizzle),
)
shape = (noncollective_size, collective_size * 8, swizzle_elems)
def body(x_gmem, out_gmem, smem, tma_barrier):
# Compute the index in a subset of the cluster.
def cluster_id(axes):
idx, stride = 0, 1
for d in sorted(axes):
idx += lax.axis_index(d) * stride
stride *= lax.axis_size(d)
return idx
noncollective_idx = cluster_id(noncollective_dims)
collective_idx = cluster_id(collective_dims)
plgpu.copy_gmem_to_smem(
x_gmem.at[noncollective_idx],
smem,
tma_barrier,
collective_axes=collective_dims)
plgpu.barrier_wait(tma_barrier)
plgpu.commit_smem()
collective_slice = pl.ds(8 * collective_idx, 8)
plgpu.copy_smem_to_gmem(
smem.at[collective_slice],
out_gmem.at[noncollective_idx, collective_slice, :],
)
plgpu.wait_smem_to_gmem(0)
x = np.arange(np.prod(shape), dtype=dtype).reshape(shape)
kernel = self.kernel(
body,
grid=cluster,
grid_names=("grid_x", "grid_y", "grid_z"),
cluster=cluster,
cluster_names=("x", "y", "z"),
out_shape=jax.ShapeDtypeStruct(shape, dtype),
scratch_shapes=(
plgpu.SMEM(shape[1:], dtype, transforms=transforms),
plgpu.Barrier(),
)
)
np.testing.assert_array_equal(kernel(x), x)
@parameterized.product(
layout=(
plgpu.Layout.WGMMA,
plgpu.Layout.TCGEN05,
plgpu.Layout.TCGEN05_TMEM_NATIVE,
plgpu.Layout.TCGEN05_M64_COLLECTIVE(128),
plgpu.Layout.TILED( # WGMMA, but defined as a custom tiling.
plgpu.Tiling(((64, 8), (16, 8), (8, 8), (2,))),
warp_dims=(-7,),
lane_dims=(-3, -2),
vector_dim=-1,
),
),
op=(jnp.sum, jnp.max),
)
def test_reduce_with_layout(self, layout, op):
self.skip_if_wg_semantics()
axis = -1
transforms = self.default_transforms(dtype=jnp.float32)
@functools.partial(
self.kernel,
out_shape=jnp.zeros((128,), jnp.float32),
scratch_shapes=[
plgpu.SMEM((128, 128), jnp.float32, transforms=transforms),
plgpu.SMEM((128,), jnp.float32),
plgpu.Barrier(),
],
)
def kernel(x_ref, y_ref, smem_ref, smem_reduced_ref, barrier_ref):
plgpu.copy_gmem_to_smem(x_ref, smem_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
x_val = plgpu.load(smem_ref, (), layout=layout)
smem_reduced_ref[...] = op(x_val, axis=axis)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(smem_reduced_ref, y_ref)
plgpu.wait_smem_to_gmem(0)
x = jax.random.uniform(
jax.random.key(0), shape=(128, 128), dtype=jnp.float32)
x_result = jax.block_until_ready(kernel(x))
np.testing.assert_allclose(x_result, op(x, axis=axis), atol=1e-5)
def _test_broadcast_in_dim_base(self, shape, layout, *, axis, hint):
assert len(shape) == 2
@functools.partial(
self.kernel,
out_shape=jnp.zeros(shape, jnp.float32),
scratch_shapes=[
plgpu.SMEM((shape[1 - axis],), jnp.float32),
plgpu.SMEM(shape, jnp.float32),
plgpu.Barrier(),
],
)
def kernel(x_ref, y_ref, smem_ref, smem_out_ref, barrier_ref):
plgpu.copy_gmem_to_smem(x_ref, smem_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
reduced_layout = layout.reduce(axis)
reduced = plgpu.load(smem_ref, (), layout=reduced_layout)
broadcasted = lax.broadcast_in_dim(reduced, shape, [1 - axis])
if hint:
broadcasted = plgpu.layout_cast(broadcasted, layout)
# Note that without the hint, the layout of broadcasted is not guaranteed
# to be the same as the layout argument!
smem_out_ref[...] = broadcasted
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(smem_out_ref, y_ref)
plgpu.wait_smem_to_gmem(0)
x = jax.random.uniform(jax.random.key(0), shape=(128,), dtype=jnp.float32)
x_result = jax.block_until_ready(kernel(x))
expected = jnp.expand_dims(x, axis=axis)
expected = jnp.broadcast_to(expected, shape)
np.testing.assert_array_equal(x_result, expected)
@parameterized.product(
layout=(
plgpu.Layout.WGMMA,
plgpu.Layout.TCGEN05,
plgpu.Layout.TCGEN05_TMEM_NATIVE,
plgpu.Layout.TCGEN05_M64_COLLECTIVE(128),
),
axis=(0, 1),
hint=(True, False),
)
def test_broadcast_in_dim(self, layout, axis, hint):
self._test_broadcast_in_dim_base((128, 128), layout, axis=axis, hint=hint)
# Regression test for a crash when using a small shape.
def test_broadcast_in_dim_does_not_crash_on_small_shape(self):
shape = (128, 4)
self._test_broadcast_in_dim_base(
shape, plgpu.Layout.TCGEN05_TMEM_NATIVE, axis=1, hint=False
)
def test_broadcast_in_dim_tcgen05_native_layout(self):
@functools.partial(
self.kernel,
out_shape=jnp.zeros((128, 128), jnp.float32),
scratch_shapes=[
plgpu.SMEM((128,), jnp.float32),
plgpu.SMEM((128, 128), jnp.float32),
plgpu.Barrier(),
],
num_threads=1,
thread_name="x",
)
def kernel(x_ref, y_ref, smem_ref, smem_out_ref, barrier_ref):
plgpu.copy_gmem_to_smem(x_ref, smem_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
reduced = plgpu.load(smem_ref, (), layout=plgpu.Layout.TCGEN05_TMEM_NATIVE.reduce(1))
broadcasted = lax.broadcast_in_dim(reduced, (128, 128), [0])
broadcasted = plgpu.layout_cast(broadcasted, plgpu.Layout.TCGEN05_TMEM_NATIVE)
smem_out_ref[...] = broadcasted
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(smem_out_ref, y_ref)
plgpu.wait_smem_to_gmem(0)
x = jax.random.uniform(jax.random.key(0), shape=(128,), dtype=jnp.float32)
np.testing.assert_array_equal(kernel(x), jnp.broadcast_to(x[:, None], (128, 128)))
@parameterized.named_parameters((l.name.lower(), l) for l in plgpu.Layout)
@jtu.skip_if_mosaic_gpu_exceeds_shared_memory(
device_patterns=("RTX PRO 6000 Blackwell", "GB10$"))
def test_copy_layout(self, layout):
if layout in {
plgpu.Layout.WG_SPLAT,
plgpu.Layout.WGMMA_TRANSPOSED,
plgpu.Layout.TCGEN05_TRANSPOSED,
plgpu.Layout.TILED
}:
self.skipTest("Not the right layout for this test")
# We don't infer optimized transfer-compatible transforms for load to
# registers with TCGEN05_TMEM_NATIVE layout.
# TODO(allanrenucci): Manually specify transforms when supported for WG
# lowering semantic.
optimized = (
self.LOWERING_SEMANTICS == plgpu.LoweringSemantics.Lane
or layout != plgpu.Layout.TCGEN05_TMEM_NATIVE
) and layout != plgpu.Layout.TCGEN05_M64_COLLECTIVE_NATIVE
shape = (128, 128) if "tcgen05" in layout.name.lower() else (64, 128)
dtype = jnp.float32
swizzle = 128
if layout in (plgpu.Layout.WGMMA_UPCAST_4X, plgpu.Layout.WGMMA_UPCAST_2X):
dtype = jnp.float8_e5m2
swizzle = 64
transforms = self.default_transforms(dtype=dtype, swizzle=swizzle)
if layout == plgpu.Layout.TCGEN05_M64_COLLECTIVE:
layout = plgpu.Layout.TCGEN05_M64_COLLECTIVE(128)
elif layout == plgpu.Layout.TCGEN05_M64_COLLECTIVE_NATIVE:
layout = plgpu.Layout.TCGEN05_M64_COLLECTIVE_NATIVE(128)
if self.LOWERING_SEMANTICS == plgpu.LoweringSemantics.Lane:
self.skipTest("Need to add support for optimized= for stores")
elif layout == plgpu.Layout.WG_STRIDED:
layout = plgpu.Layout.WG_STRIDED(shape, 2)
transforms = ()
elif layout == plgpu.Layout.SMEM_GMEM_COPY:
layout = plgpu.Layout.SMEM_GMEM_COPY(shape, jnp.float32, swizzle=128)
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct(shape, dtype),
in_specs=[plgpu.BlockSpec(transforms=transforms)],
out_specs=plgpu.BlockSpec(transforms=transforms),
)
def kernel(x_ref, o_ref):
o_ref[...] = plgpu.load(x_ref, (), layout=layout, optimized=optimized)
x = jnp.arange(math.prod(shape), dtype=dtype).reshape(shape)
np.testing.assert_array_equal(kernel(x), x)
@parameterized.parameters(
(((0, 0),), (128, 128), (128, 128)),
(((0, 1),), (128, 128), (128, 128)),
(((1, None),), (128, 128), (128,)),
(((0, 0),), (128, 128), (128, 128)),
(((0, 0), (0, 0)), (128, 128), (128, 128)),
)
def test_vmap_kernel(self, vmap_axes, x_shape, y_shape):
rng0, rng1 = jax.random.split(jax.random.key(0))
x = jax.random.uniform(rng0, x_shape, jnp.float32)
y = jax.random.uniform(rng1, y_shape, jnp.float32)
out_shape = list(x_shape)
for x_axis, _ in vmap_axes:
del out_shape[x_axis]
out_shape = jax.ShapeDtypeStruct(out_shape, jnp.float32)
@functools.partial(self.kernel, out_shape=out_shape)
def f(x_ref, y_ref, o_ref):
o_ref[...] = x_ref[...] + y_ref[...]
f_ref = lambda x, y: x + y
for in_axes in vmap_axes:
f = jax.vmap(f, in_axes)
f_ref = jax.vmap(f_ref, in_axes)
np.testing.assert_array_equal(f(x, y), f_ref(x, y))
def test_discharge_comms_effect(self):
def body(out, sem):
pl.semaphore_signal(sem, device_id=jnp.asarray(2, jnp.int32))
f = self.kernel(
body,
out_shape=jax.ShapeDtypeStruct((), jnp.int32),
scratch_shapes=[plgpu.SemaphoreType.REGULAR],
)
jax_core.check_jaxpr(jax.make_jaxpr(f)().jaxpr)
@jtu.thread_unsafe_test() # Modifies ``os.environ``.
@jtu.skip_under_pytest("Test fails under pytest in CI")
def test_line_info(self):
self.skip_if_wg_semantics()
with jtu.set_env(MOSAIC_GPU_DUMP_PTX="1"), jtu.capture_stdout() as output:
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([256], jnp.float32),
)
def kernel(x_ref, o_ref):
o_ref[...] = x_ref[...] + x_ref[0]
jax.block_until_ready(kernel(jnp.arange(256, dtype=jnp.float32)))
ptx = output()
self.assertIn(".file", ptx)
self.assertIn(".loc", ptx)
[path] = re.findall(r'.file\s+\d+\s+"(.+)"', ptx)
self.assertEndsWith(__file__, path)
def test_collective_arrival_count(self):
def kernel(dst, collective_barrier):
plgpu.barrier_arrive(collective_barrier)
plgpu.barrier_arrive(collective_barrier)
plgpu.barrier_arrive(collective_barrier)
plgpu.barrier_arrive(collective_barrier)
plgpu.barrier_wait(collective_barrier)
dst[...] = jnp.ones_like(dst)
y = self.kernel(
kernel,
out_shape=jax.ShapeDtypeStruct((128,), jnp.int32),
scratch_shapes=[plgpu.ClusterBarrier(collective_axes=("x",), num_arrivals=4)],
cluster=(2,),
cluster_names=("x",)
)()
np.testing.assert_array_equal(y, np.ones((), dtype=np.int32))
| PallasCallTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/context/logger.py | {
"start": 389,
"end": 2170
} | class ____:
"""The context object available as the argument to the initialization function of a :py:class:`dagster.LoggerDefinition`.
Users should not instantiate this object directly. To construct an
`InitLoggerContext` for testing purposes, use :py:func:`dagster.
build_init_logger_context`.
Example:
.. code-block:: python
from dagster import logger, InitLoggerContext
@logger
def hello_world(init_context: InitLoggerContext):
...
"""
def __init__(
self,
logger_config: Any,
logger_def: Optional[LoggerDefinition] = None,
job_def: Optional[JobDefinition] = None,
run_id: Optional[str] = None,
):
self._logger_config = logger_config
self._job_def = check.opt_inst_param(job_def, "job_def", JobDefinition)
self._logger_def = check.opt_inst_param(logger_def, "logger_def", LoggerDefinition)
self._run_id = check.opt_str_param(run_id, "run_id")
@public
@property
def logger_config(self) -> Any:
"""The configuration data provided by the run config. The
schema for this data is defined by ``config_schema`` on the :py:class:`LoggerDefinition`.
"""
return self._logger_config
@property
def job_def(self) -> Optional[JobDefinition]:
"""The job definition currently being executed."""
return self._job_def
@public
@property
def logger_def(self) -> Optional[LoggerDefinition]:
"""The logger definition for the logger being constructed."""
return self._logger_def
@public
@property
def run_id(self) -> Optional[str]:
"""The ID for this run of the job."""
return self._run_id
| InitLoggerContext |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/uninitializedVariable2.py | {
"start": 989,
"end": 1021
} | class ____(IAbstract):
p3: int
| I |
python | django__django | tests/generic_inline_admin/tests.py | {
"start": 11126,
"end": 11400
} | class ____(SimpleTestCase):
def test_no_deletion(self):
inline = MediaPermanentInline(EpisodePermanent, admin_site)
fake_request = object()
formset = inline.get_formset(fake_request)
self.assertFalse(formset.can_delete)
| NoInlineDeletionTest |
python | pytorch__pytorch | torch/distributed/fsdp/api.py | {
"start": 2904,
"end": 5102
} | class ____(Enum):
"""
This configures explicit backward prefetching, which improves throughput by
enabling communication and computation overlap in the backward pass at the
cost of slightly increased memory usage.
- ``BACKWARD_PRE``: This enables the most overlap but increases memory
usage the most. This prefetches the next set of parameters *before* the
current set of parameters' gradient computation. This overlaps the *next
all-gather* and the *current gradient computation*, and at the peak, it
holds the current set of parameters, next set of parameters, and current
set of gradients in memory.
- ``BACKWARD_POST``: This enables less overlap but requires less memory
usage. This prefetches the next set of parameters *after* the current
set of parameters' gradient computation. This overlaps the *current
reduce-scatter* and the *next gradient computation*, and it frees the
current set of parameters before allocating memory for the next set of
parameters, only holding the next set of parameters and current set of
gradients in memory at the peak.
- FSDP's ``backward_prefetch`` argument accepts ``None``, which disables
the backward prefetching altogether. This has no overlap and does not
increase memory usage. In general, we do not recommend this setting since
it may degrade throughput significantly.
For more technical context: For a single process group using NCCL backend,
any collectives, even if issued from different streams, contend for the
same per-device NCCL stream, which implies that the relative order in which
the collectives are issued matters for overlapping. The two backward
prefetching values correspond to different issue orders.
"""
# NOTE: For both modes, the ordering that defines "current" and "next" is
# not always exact in the current implementation. A mistargeted prefetch
# simply means that the parameter memory is allocated earlier than needed,
# possibly increasing peak memory usage, but does not affect correctness.
BACKWARD_PRE = auto()
BACKWARD_POST = auto()
@dataclass
| BackwardPrefetch |
python | davidhalter__jedi | test/completion/usages.py | {
"start": 2590,
"end": 2826
} | class ____(object):
#< 8 (0,8), (2,13)
def a_method(self):
#< 13 (-2,8), (0,13)
self.a_method()
#< 13 (2,8), (0,13), (3,13)
self.b_method()
def b_method(self):
self.b_method
| TestMethods |
python | pytorch__pytorch | torch/utils/benchmark/op_fuzzers/unary.py | {
"start": 321,
"end": 3154
} | class ____(Fuzzer):
def __init__(self, seed, dtype=torch.float32, cuda=False) -> None:
super().__init__(
parameters=[
# Dimensionality of x. (e.g. 1D, 2D, or 3D.)
FuzzedParameter("dim", distribution={1: 0.3, 2: 0.4, 3: 0.3}, strict=True),
# Shapes for `x`.
# It is important to test all shapes, however
# powers of two are especially important and therefore
# warrant special attention. This is done by generating
# both a value drawn from all integers between the min and
# max allowed values, and another from only the powers of two
# (both distributions are loguniform) and then randomly
# selecting between the two.
[
FuzzedParameter(
name=f"k_any_{i}",
minval=_MIN_DIM_SIZE,
maxval=_MAX_DIM_SIZE,
distribution="loguniform",
) for i in range(3)
],
[
FuzzedParameter(
name=f"k_pow2_{i}",
distribution={size: 1. / len(_POW_TWO_SIZES) for size in _POW_TWO_SIZES}
) for i in range(3)
],
[
FuzzedParameter(
name=f"k{i}",
distribution={
ParameterAlias(f"k_any_{i}"): 0.8,
ParameterAlias(f"k_pow2_{i}"): 0.2,
},
strict=True,
) for i in range(3)
],
# Steps for `x`. (Benchmarks strided memory access.)
[
FuzzedParameter(
name=f"x_step_{i}",
distribution={1: 0.8, 2: 0.06, 4: 0.06, 8: 0.04, 16: 0.04},
) for i in range(3)
],
# Repeatable entropy for downstream applications.
FuzzedParameter(name="random_value", minval=0, maxval=2 ** 32 - 1, distribution="uniform"),
],
tensors=[
FuzzedTensor(
name="x",
size=("k0", "k1", "k2"),
steps=("x_step_0", "x_step_1", "x_step_2"),
probability_contiguous=0.75,
min_elements=4 * 1024,
max_elements=32 * 1024 ** 2,
max_allocation_bytes=2 * 1024**3, # 2 GB
dim_parameter="dim",
dtype=dtype,
cuda=cuda,
),
],
seed=seed,
)
| UnaryOpFuzzer |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py | {
"start": 85587,
"end": 104927
} | class ____(
Qwen3OmniMoePreTrainedModelForConditionalGeneration, GenerationMixin
):
config: Qwen3OmniMoeThinkerConfig
base_model_prefix = "thinker"
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_no_split_modules = [
"Qwen3OmniMoeAudioEncoderLayer",
"Qwen3OmniMoeThinkerTextDecoderLayer",
]
_can_record_outputs = {
"hidden_states": Qwen3OmniMoeThinkerTextDecoderLayer,
"attentions": Qwen3OmniMoeThinkerTextAttention,
"router_logits": OutputRecorder(Qwen3OmniMoeThinkerTextSparseMoeBlock, index=1),
}
def __init__(self, config):
super().__init__(config)
self.audio_tower = Qwen3OmniMoeAudioEncoder._from_config(config.audio_config)
self.visual = Qwen3OmniMoeVisionEncoder._from_config(config.vision_config)
self.vocab_size = config.text_config.vocab_size
self.model = Qwen3OmniMoeThinkerTextModel._from_config(config.text_config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
self.spatial_merge_size = config.vision_config.spatial_merge_size
self.rope_deltas = None
self.num_experts = config.text_config.num_experts
self.num_experts_per_tok = config.text_config.num_experts_per_tok
self.router_aux_loss_coef = config.text_config.router_aux_loss_coef
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_video_features(
self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None
):
"""
Encodes videos into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input videos.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
"""
pixel_values_videos = pixel_values_videos.type(self.visual.dtype)
video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw)
return video_embeds
def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
"""
Encodes images into continuous embeddings that can be forwarded to the language model.
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
The tensors corresponding to the input images.
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
"""
pixel_values = pixel_values.type(self.visual.dtype)
image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
return image_embeds
def get_audio_features(
self,
input_features: torch.FloatTensor,
feature_attention_mask: Optional[torch.LongTensor] = None,
audio_feature_lengths: Optional[torch.LongTensor] = None,
):
"""
Encodes audios into continuous embeddings that can be forwarded to the language model.
Args:
input_features (`torch.FloatTensor`):
The tensors corresponding to the input audios.
feature_attention_mask (`torch.LongTensor`, *optional*):
Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:
audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*):
The length of feature shape of each audio in LLM.
"""
if feature_attention_mask is not None:
audio_feature_lengths = torch.sum(feature_attention_mask, dim=1)
input_features = input_features.permute(0, 2, 1)[feature_attention_mask.bool()].permute(1, 0)
else:
audio_feature_lengths = None
feature_lens = audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1)
audio_outputs = self.audio_tower(
input_features,
feature_lens=feature_lens,
)
audio_features = audio_outputs.last_hidden_state
return audio_features
def get_placeholder_mask(
self,
input_ids: torch.LongTensor,
inputs_embeds: torch.FloatTensor,
image_features: Optional[torch.FloatTensor] = None,
video_features: Optional[torch.FloatTensor] = None,
):
"""
Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
special_video_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_video_mask = special_video_mask.all(-1)
special_audio_mask = (
inputs_embeds
== self.get_input_embeddings()(
torch.tensor(self.config.audio_token_id, dtype=torch.long, device=inputs_embeds.device)
)
).all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
special_video_mask = input_ids == self.config.video_token_id
special_audio_mask = input_ids == self.config.audio_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
)
n_video_tokens = special_video_mask.sum()
special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
raise ValueError(
f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}"
)
special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
return special_image_mask, special_video_mask, special_audio_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids=None,
input_features=None,
pixel_values=None,
pixel_values_videos=None,
image_grid_thw=None,
video_grid_thw=None,
attention_mask=None,
feature_attention_mask=None,
audio_feature_lengths=None,
position_ids=None,
past_key_values=None,
inputs_embeds=None,
rope_deltas=None,
labels=None,
use_cache=None,
output_router_logits: Optional[bool] = None,
use_audio_in_video=None,
cache_position=None,
video_second_per_grid=None,
**kwargs,
) -> Union[tuple, Qwen3OmniMoeThinkerCausalLMOutputWithPast]:
r"""
image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
The temporal, height and width of feature shape of each image in LLM.
video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
The temporal, height and width of feature shape of each video in LLM.
feature_attention_mask (`torch.Tensor` of shape `(batch_size, feature_sequence_length)`, *optional*):
Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*):
The length of feature shape of each audio in LLM.
rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
The rope index difference between sequence length and multimodal rope.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
use_audio_in_video (`bool`, *optional*):
Whether or not use audio track in video, should same as the parameter in `process_audio_info`.
video_second_per_grid (`torch.LongTensor` of shape `(num_videos)`, *optional*):
Number of seconds per grid for each video, used for temporal feature mapping.
Example:
```python
>>> from io import BytesIO
>>> from urllib.request import urlopen
>>> import librosa
>>> from qwen_vl_utils import process_vision_info
>>> from transformers import Qwen3OmniMoeProcessor, Qwen3OmniMoeThinkerForConditionalGeneration
>>> thinker = Qwen3OmniMoeThinkerForConditionalGeneration.from_pretrained("Qwen/Qwen2.5-Omni-7B")
>>> processor = Qwen3OmniMoeProcessor.from_pretrained("Qwen/Qwen2.5-Omni-7B")
>>> conversations = [
>>> {'role': 'system', 'content': 'You are a helpful voice chat bot, and please respond to me in a casual conversation manner using random voice.'},
>>> {"role": "user", "content": [
>>> {"type": "image", "image_url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
>>> {"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"},
>>> ]},
>>> ]
>>> text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
>>> audios = [ librosa.load(BytesIO(urlopen( conversations[1]['content'][1]['audio_url'] ).read()), sr=self.processor.feature_extractor.sampling_rate) ]
>>> images, videos = process_vision_info(conversations)
>>> inputs = processor(text=text, audio=audios, images=images, videos=videos, return_tensors="pt", padding=True)
>>> # Generate
>>> inputs['use_audio_in_video'] = `True` or `False`
>>> generation = thinker.generate(**inputs, max_new_tokens=2048)
>>> generate_ids = generation[:, inputs.input_ids.size(1):]
>>> response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
```"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.text_config.output_router_logits
)
if inputs_embeds is None:
# 1. Extract the input embeddings
inputs_embeds = self.get_input_embeddings()(input_ids)
visual_embeds_multiscale = None
visual_pos_masks = None
image_mask, video_mask = None, None
# 2. Merge text , audios , image and video
if input_features is not None:
audio_features = self.get_audio_features(
input_features,
feature_attention_mask=feature_attention_mask,
audio_feature_lengths=audio_feature_lengths,
)
audio_features = audio_features.to(inputs_embeds.device, inputs_embeds.dtype)
_, _, audio_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds)
inputs_embeds = inputs_embeds.masked_scatter(audio_mask, audio_features)
if pixel_values is not None:
image_embeds, image_embeds_multiscale = self.get_image_features(pixel_values, image_grid_thw)
image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
image_mask, _, _ = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
)
inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
if pixel_values_videos is not None:
video_embeds, video_embeds_multiscale = self.get_video_features(pixel_values_videos, video_grid_thw)
video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
_, video_mask, _ = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
)
inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
if image_mask is not None and video_mask is not None:
image_mask = image_mask[..., 0]
video_mask = video_mask[..., 0]
visual_pos_masks = video_mask | image_mask
visual_embeds_multiscale_joint = ()
image_mask_joint = image_mask[visual_pos_masks]
video_mask_joint = video_mask[visual_pos_masks]
for img_embed, vid_embed in zip(image_embeds_multiscale, video_embeds_multiscale):
embed_joint = img_embed.new_zeros(visual_pos_masks.sum(), img_embed.shape[-1])
embed_joint[image_mask_joint, :] = img_embed
embed_joint[video_mask_joint, :] = vid_embed
visual_embeds_multiscale_joint = visual_embeds_multiscale_joint + (embed_joint,)
visual_embeds_multiscale = visual_embeds_multiscale_joint
elif image_mask is not None:
image_mask = image_mask[..., 0]
visual_embeds_multiscale = image_embeds_multiscale
visual_pos_masks = image_mask
elif video_mask is not None:
video_mask = video_mask[..., 0]
visual_embeds_multiscale = video_embeds_multiscale
visual_pos_masks = video_mask
if feature_attention_mask is not None:
audio_feature_lengths = torch.sum(feature_attention_mask, dim=1)
else:
audio_feature_lengths = None
if attention_mask is not None and position_ids is None:
if (
cache_position is None
or (cache_position is not None and cache_position[0] == 0)
or self.rope_deltas is None
):
delta0 = (1 - attention_mask).sum(dim=-1).unsqueeze(1)
position_ids, rope_deltas = self.get_rope_index(
input_ids,
image_grid_thw,
video_grid_thw,
attention_mask,
use_audio_in_video,
audio_feature_lengths,
video_second_per_grid,
)
rope_deltas = rope_deltas - delta0
self.rope_deltas = rope_deltas
else:
batch_size, seq_length = input_ids.shape
delta = cache_position[0] + self.rope_deltas if cache_position is not None else 0
position_ids = torch.arange(seq_length, device=input_ids.device)
position_ids = position_ids.view(1, -1).expand(batch_size, -1)
position_ids = position_ids.add(delta)
position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
outputs = self.model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_router_logits=output_router_logits,
cache_position=cache_position,
deepstack_visual_embeds=visual_embeds_multiscale,
visual_pos_masks=visual_pos_masks,
**kwargs,
)
hidden_states = outputs[0]
logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.get_text_config().vocab_size
)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return Qwen3OmniMoeThinkerCausalLMOutputWithPast(
loss=loss,
logits=logits,
aux_loss=aux_loss,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
past_key_values=outputs.past_key_values,
rope_deltas=self.rope_deltas,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
pixel_values=None,
pixel_values_videos=None,
image_grid_thw=None,
video_grid_thw=None,
input_features=None,
feature_attention_mask=None,
use_audio_in_video=False,
video_second_per_grid=None,
**kwargs,
):
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
cache_position=cache_position,
position_ids=position_ids,
use_cache=use_cache,
pixel_values=pixel_values,
pixel_values_videos=pixel_values_videos,
image_grid_thw=image_grid_thw,
video_grid_thw=video_grid_thw,
input_features=input_features,
feature_attention_mask=feature_attention_mask,
use_audio_in_video=use_audio_in_video,
video_second_per_grid=video_second_per_grid,
**kwargs,
)
model_inputs["position_ids"] = None
if cache_position[0] != 0:
model_inputs["pixel_values"] = None
model_inputs["pixel_values_videos"] = None
model_inputs["input_features"] = None
return model_inputs
| Qwen3OmniMoeThinkerForConditionalGeneration |
python | pallets__werkzeug | examples/i18nurls/application.py | {
"start": 1836,
"end": 2865
} | class ____:
def __init__(self):
from i18nurls import views
self.not_found = views.page_not_found
def __call__(self, environ, start_response):
urls = map.bind_to_environ(environ)
req = Request(environ, urls)
try:
endpoint, args = urls.match(req.path)
req.matched_url = (endpoint, args)
if endpoint == "#language_select":
lng = req.accept_languages.best
lng = lng.split("-")[0].lower() if lng else "en"
index_url = urls.build("index", {"lang_code": lng})
resp = Response(f"Moved to {index_url}", status=302)
resp.headers["Location"] = index_url
else:
req.language = args.pop("lang_code", None)
resp = views[endpoint](req, **args)
except NotFound:
resp = self.not_found(req)
except (RequestRedirect, HTTPException) as e:
resp = e
return resp(environ, start_response)
| Application |
python | keras-team__keras | keras/src/layers/reshaping/zero_padding1d_test.py | {
"start": 157,
"end": 2719
} | class ____(testing.TestCase):
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_1d(self, data_format):
inputs = np.random.rand(1, 2, 3)
outputs = layers.ZeroPadding1D(padding=(1, 2), data_format=data_format)(
inputs
)
if data_format == "channels_last":
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, :], inputs)
else:
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, :, index], 0.0)
self.assertAllClose(outputs[:, :, 1:-2], inputs)
@parameterized.named_parameters(("one_tuple", (2, 2)), ("one_int", 2))
def test_zero_padding_1d_with_same_padding(self, padding):
inputs = np.random.rand(1, 2, 3)
outputs = layers.ZeroPadding1D(
padding=padding, data_format="channels_last"
)(inputs)
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, :], inputs)
def test_zero_padding_1d_with_dynamic_spatial_dim(self):
input_layer = layers.Input(batch_shape=(1, None, 3))
padded = layers.ZeroPadding1D((1, 2), data_format="channels_last")(
input_layer
)
self.assertEqual(padded.shape, (1, None, 3))
input_layer = layers.Input(batch_shape=(1, 2, 3))
padded = layers.ZeroPadding1D((1, 2), data_format="channels_last")(
input_layer
)
self.assertEqual(padded.shape, (1, 5, 3))
@parameterized.parameters(
{"padding": (1,)},
{"padding": (1, 2, 3)},
{"padding": "1"},
)
def test_zero_padding_1d_errors_if_padding_argument_invalid(self, padding):
with self.assertRaises(ValueError):
layers.ZeroPadding1D(padding)
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_1d_get_config(self, data_format):
layer = layers.ZeroPadding1D(padding=(1, 2), data_format=data_format)
expected_config = {
"dtype": dtype_policies.serialize(layer.dtype_policy),
"data_format": data_format,
"name": layer.name,
"padding": (1, 2),
"trainable": layer.trainable,
}
self.assertEqual(layer.get_config(), expected_config)
| ZeroPadding1DTest |
python | huggingface__transformers | src/transformers/models/falcon_mamba/modular_falcon_mamba.py | {
"start": 11078,
"end": 25556
} | class ____(MambaMixer):
def warn_slow_implementation(self):
causal_conv1d = lazy_load_kernel("causal-conv1d")
causal_conv1d_update, causal_conv1d_fn = (
(causal_conv1d.causal_conv1d_update, causal_conv1d.causal_conv1d_fn)
if causal_conv1d is not None
else (None, None)
)
is_fast_path_available = all(
(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)
)
if not is_fast_path_available:
if self.use_falcon_mambapy:
if is_mambapy_available():
logger.warning_once(
"The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`"
" is None. Falling back to the mamba.py backend. To install follow https://github.com/state-spaces/mamba/#installation for mamba-ssm and"
" https://github.com/Dao-AILab/causal-conv1d or `pip install kernels` for causal-conv1d"
)
else:
raise ImportError(
"use_mambapy is set to True but the mambapy package is not installed. To install it follow https://github.com/alxndrTL/mamba.py."
)
else:
logger.warning_once(
"The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`"
" is None. Falling back to the sequential implementation of Mamba, as use_mambapy is set to False. To install follow https://github.com/state-spaces/mamba/#installation for mamba-ssm and"
" https://github.com/Dao-AILab/causal-conv1d or `pip install kernels` for causal-conv1d. For the mamba.py backend, follow https://github.com/alxndrTL/mamba.py."
)
def __init__(self, config: FalconMambaConfig, layer_idx: int):
super().__init__(config, layer_idx)
# Triton expects to pass RMS weights even if they are non learnable, thus we need to create these weights here
self.register_buffer(
"b_c_rms", torch.nn.Parameter(torch.ones(self.ssm_state_size), requires_grad=False), persistent=False
)
self.register_buffer(
"dt_rms", torch.nn.Parameter(torch.ones(self.intermediate_size), requires_grad=False), persistent=False
)
self.rms_eps = config.mixer_rms_eps
def cuda_kernels_forward(
self,
hidden_states: torch.Tensor,
cache_params: Optional[FalconMambaCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
# 1. Gated MLP's linear projection
projected_states = self.in_proj(hidden_states).transpose(1, 2)
if self.training and cache_params is None: # Doesn't support outputting the states -> used for training
contextualized_states = mamba_inner_fn(
projected_states,
self.conv1d.weight,
self.conv1d.bias if self.use_conv_bias else None,
self.x_proj.weight,
self.dt_proj.weight,
self.out_proj.weight,
self.out_proj.bias.float() if self.use_bias else None,
-torch.exp(self.A_log.float()),
None, # input-dependent B
None, # input-dependent C
self.D.float(),
delta_bias=self.dt_proj.bias.float(),
delta_softplus=True,
b_rms_weight=self.b_c_rms,
c_rms_weight=self.b_c_rms,
dt_rms_weight=self.dt_rms,
b_c_dt_rms_eps=self.rms_eps,
)
else:
causal_conv1d = lazy_load_kernel("causal-conv1d")
causal_conv1d_update, causal_conv1d_fn = (
(causal_conv1d.causal_conv1d_update, causal_conv1d.causal_conv1d_fn)
if causal_conv1d is not None
else (None, None)
)
hidden_states, gate = projected_states.chunk(2, dim=1)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 2. Convolution sequence transformation
conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2))
if cache_params is not None and cache_position[0] > 0:
hidden_states = causal_conv1d_update(
hidden_states.squeeze(-1),
cache_params.conv_states[self.layer_idx],
conv_weights,
self.conv1d.bias,
self.activation,
)
hidden_states = hidden_states.unsqueeze(-1)
else:
if cache_params is not None:
conv_states = nn.functional.pad(
hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0)
)
cache_params.update_conv_state(self.layer_idx, conv_states, cache_position)
hidden_states = causal_conv1d_fn(
hidden_states, conv_weights, self.conv1d.bias, activation=self.activation
)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 3. State Space Model sequence transformation
# 3.a. input varying initialization of time_step, B and C
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
time_step, B, C = torch.split(
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
)
B = rms_forward(B, variance_epsilon=self.rms_eps)
C = rms_forward(C, variance_epsilon=self.rms_eps)
time_step = rms_forward(time_step, variance_epsilon=self.rms_eps)
# In case the model has been quantized, we need a hack to properly call the `nn.Linear` module
# at the price of a small overhead.
if hasattr(self.config, "_pre_quantization_dtype"):
discrete_time_step = (self.dt_proj(time_step) - self.dt_proj.bias).transpose(1, 2)
else:
discrete_time_step = self.dt_proj.weight @ time_step.transpose(1, 2)
A = -torch.exp(self.A_log.float())
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
time_proj_bias = self.dt_proj.bias.float() if hasattr(self.dt_proj, "bias") else None
if cache_params is not None and cache_position[0] > 0:
scan_outputs = selective_state_update(
cache_params.ssm_states[self.layer_idx],
hidden_states[..., 0],
discrete_time_step[..., 0],
A,
B[:, 0],
C[:, 0],
self.D,
gate[..., 0],
time_proj_bias,
dt_softplus=True,
).unsqueeze(-1)
else:
scan_outputs, ssm_state = selective_scan_fn(
hidden_states,
discrete_time_step,
A,
B.transpose(1, 2),
C.transpose(1, 2),
self.D.float(),
gate,
time_proj_bias,
delta_softplus=True,
return_last_state=True,
)
if ssm_state is not None and cache_params is not None:
cache_params.update_ssm_state(self.layer_idx, ssm_state)
# 4. Final linear projection
contextualized_states = self.out_proj(scan_outputs.transpose(1, 2))
return contextualized_states
def slow_forward(
self,
input_states,
cache_params: Optional[FalconMambaCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
# 1. Gated MLP's linear projection
projected_states = self.in_proj(input_states).transpose(1, 2) # [batch, 2 * intermediate_size, seq_len]
hidden_states, gate = projected_states.chunk(2, dim=1)
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 2. Convolution sequence transformation
if cache_params is not None:
ssm_state = cache_params.ssm_states[self.layer_idx].clone()
ssm_state = ssm_state.to(hidden_states.device)
# use `cache_position.shape[0]` to check whether we are in prefill
# stage, it's equivalent to check `cache_position[0] == 0`, which
# breaks dynamo fullgraph constraints
if cache_position is not None and cache_position.shape[0] == self.conv_kernel_size:
conv_state = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
cache_params.update_conv_state(self.layer_idx, conv_state, cache_position)
hidden_states = self.act(
self.conv1d(hidden_states)[..., :seq_len]
) # [batch, intermediate_size, seq_len]
else:
conv_state = cache_params.update_conv_state(self.layer_idx, hidden_states, cache_position)
conv_state = conv_state.to(self.conv1d.weight.device)
hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1)
if self.use_conv_bias:
hidden_states += self.conv1d.bias
hidden_states = (
self.act(hidden_states).to(dtype).unsqueeze(-1)
) # [batch, intermediate_size, 1] : decoding
else:
ssm_state = torch.zeros(
(batch_size, self.intermediate_size, self.ssm_state_size), device=hidden_states.device, dtype=dtype
)
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len]) # [batch, intermediate_size, seq_len]
if attention_mask is not None:
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 3. State Space Model sequence transformation
# 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2]
ssm_parameters = self.x_proj(hidden_states.transpose(1, 2))
time_step, B, C = torch.split(
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
)
B = rms_forward(B, variance_epsilon=self.rms_eps)
C = rms_forward(C, variance_epsilon=self.rms_eps)
time_step = rms_forward(time_step, variance_epsilon=self.rms_eps)
discrete_time_step = self.dt_proj(time_step) # [batch, seq_len, intermediate_size]
discrete_time_step = nn.functional.softplus(discrete_time_step).transpose(
1, 2
) # [batch, intermediate_size, seq_len]
# 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM)
A = -torch.exp(self.A_log.float()) # [intermediate_size, ssm_state_size]
discrete_A = torch.exp(
A[None, :, None, :] * discrete_time_step[:, :, :, None]
) # [batch, intermediate_size, seq_len, ssm_state_size]
discrete_B = (
discrete_time_step[:, :, :, None] * B[:, None, :, :].float()
) # [batch, intermediate_size, seq_len, ssm_state_size]
deltaB_u = discrete_B * hidden_states[:, :, :, None].float()
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
if self.use_falcon_mambapy and self.training and cache_params is None:
hs = pscan(
discrete_A.transpose(1, 2), deltaB_u.transpose(1, 2)
) # [batch, seq_len, intermediate_size, ssm_state_size]
scan_output = (hs @ C.unsqueeze(-1)).squeeze(3).transpose(1, 2) # [batch, intermediate_size, seq_len]
scan_output = scan_output + hidden_states * self.D[None, :, None]
scan_output = scan_output * self.act(gate)
else:
scan_outputs = []
for i in range(seq_len):
ssm_state = (
discrete_A[:, :, i, :] * ssm_state + deltaB_u[:, :, i, :]
) # [batch, intermediate_size, ssm_state]
scan_output = torch.matmul(
ssm_state.to(dtype), C[:, i, :].unsqueeze(-1)
) # [batch, intermediate_size, 1]
scan_outputs.append(scan_output[:, :, 0])
scan_output = torch.stack(scan_outputs, dim=-1) # [batch, intermediate_size, seq_len]
scan_output = scan_output + (hidden_states * self.D[None, :, None])
scan_output = scan_output * self.act(gate)
if cache_params is not None:
cache_params.update_ssm_state(self.layer_idx, ssm_state)
# 4. Final linear projection
contextualized_states = self.out_proj(scan_output.transpose(1, 2)) # [batch, seq_len, hidden_size]
return contextualized_states
def forward(
self,
hidden_states,
cache_params: Optional[FalconMambaCache] = None,
cache_position: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
causal_conv1d = lazy_load_kernel("causal-conv1d")
causal_conv1d_update, causal_conv1d_fn = (
(causal_conv1d.causal_conv1d_update, causal_conv1d.causal_conv1d_fn)
if causal_conv1d is not None
else (None, None)
)
is_fast_path_available = all(
(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)
)
if is_fast_path_available and "cuda" in self.x_proj.weight.device.type and not is_torchdynamo_compiling():
return self.cuda_kernels_forward(hidden_states, cache_params, cache_position, attention_mask)
return self.slow_forward(hidden_states, cache_params, cache_position, attention_mask)
| FalconMambaMixer |
python | fastai__fastai | fastai/vision/models/unet.py | {
"start": 783,
"end": 2091
} | class ____(Module):
"A quasi-UNet block, using `PixelShuffle_ICNR upsampling`."
@delegates(ConvLayer.__init__)
def __init__(self, up_in_c, x_in_c, hook, final_div=True, blur=False, act_cls=defaults.activation,
self_attention=False, init=nn.init.kaiming_normal_, norm_type=None, **kwargs):
self.hook = hook
self.shuf = PixelShuffle_ICNR(up_in_c, up_in_c//2, blur=blur, act_cls=act_cls, norm_type=norm_type)
self.bn = BatchNorm(x_in_c)
ni = up_in_c//2 + x_in_c
nf = ni if final_div else ni//2
self.conv1 = ConvLayer(ni, nf, act_cls=act_cls, norm_type=norm_type, **kwargs)
self.conv2 = ConvLayer(nf, nf, act_cls=act_cls, norm_type=norm_type,
xtra=SelfAttention(nf) if self_attention else None, **kwargs)
self.relu = act_cls()
apply_init(nn.Sequential(self.conv1, self.conv2), init)
def forward(self, up_in):
s = self.hook.stored
up_out = self.shuf(up_in)
ssh = s.shape[-2:]
if ssh != up_out.shape[-2:]:
up_out = F.interpolate(up_out, s.shape[-2:], mode='nearest')
cat_x = self.relu(torch.cat([up_out, self.bn(s)], dim=1))
return self.conv2(self.conv1(cat_x))
# %% ../../../nbs/15a_vision.models.unet.ipynb 8
| UnetBlock |
python | django-extensions__django-extensions | tests/management/commands/test_list_signals.py | {
"start": 118,
"end": 845
} | class ____(TestCase):
"""Tests for list_signals command."""
def setUp(self):
self.out = StringIO()
def test_should_print_all_signals(self):
expected_result = """django.contrib.sites.models.Site (site)
pre_delete
django.contrib.sites.models.clear_site_cache #
pre_save
django.contrib.sites.models.clear_site_cache #
tests.testapp.models.HasOwnerModel (has owner model)
pre_save
tests.testapp.models.dummy_handler #
"""
call_command("list_signals", stdout=self.out)
# Strip line numbers to make the test less brittle
out = re.sub(r"(?<=#)\d+", "", self.out.getvalue(), flags=re.M)
self.assertIn(expected_result, out)
| ListSignalsTests |
python | doocs__leetcode | solution/3700-3799/3746.Minimum String Length After Balanced Removals/Solution.py | {
"start": 0,
"end": 143
} | class ____:
def minLengthAfterRemovals(self, s: str) -> int:
a = s.count("a")
b = len(s) - a
return abs(a - b)
| Solution |
python | networkx__networkx | networkx/generators/tests/test_geometric.py | {
"start": 170,
"end": 2527
} | class ____:
"""Unit tests for :func:`~networkx.random_geometric_graph`"""
def test_number_of_nodes(self):
G = nx.random_geometric_graph(50, 0.25, seed=42)
assert len(G) == 50
G = nx.random_geometric_graph(range(50), 0.25, seed=42)
assert len(G) == 50
def test_distances(self):
"""Tests that pairs of vertices adjacent if and only if they are
within the prescribed radius.
"""
# Use the Euclidean metric, the default according to the
# documentation.
G = nx.random_geometric_graph(50, 0.25)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25
# Nonadjacent vertices must be at greater distance.
else:
assert not math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25
def test_p(self):
"""Tests for providing an alternate distance metric to the generator."""
# Use the L1 metric.
G = nx.random_geometric_graph(50, 0.25, p=1)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert l1dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25
# Nonadjacent vertices must be at greater distance.
else:
assert not l1dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25
def test_node_names(self):
"""Tests using values other than sequential numbers as node IDs."""
import string
nodes = list(string.ascii_lowercase)
G = nx.random_geometric_graph(nodes, 0.25)
assert len(G) == len(nodes)
for u, v in combinations(G, 2):
# Adjacent vertices must be within the given distance.
if v in G[u]:
assert math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25
# Nonadjacent vertices must be at greater distance.
else:
assert not math.dist(G.nodes[u]["pos"], G.nodes[v]["pos"]) <= 0.25
def test_pos_name(self):
G = nx.random_geometric_graph(50, 0.25, seed=42, pos_name="coords")
assert all(len(d["coords"]) == 2 for n, d in G.nodes.items())
| TestRandomGeometricGraph |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/dms.py | {
"start": 6712,
"end": 8512
} | class ____(AwsBaseOperator[DmsHook]):
"""
Describes AWS DMS replication tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsDescribeTasksOperator`
:param describe_tasks_kwargs: Describe tasks command arguments
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = DmsHook
template_fields: Sequence[str] = aws_template_fields("describe_tasks_kwargs")
template_fields_renderers: ClassVar[dict[str, str]] = {"describe_tasks_kwargs": "json"}
def __init__(self, *, describe_tasks_kwargs: dict | None = None, **kwargs):
super().__init__(**kwargs)
self.describe_tasks_kwargs = describe_tasks_kwargs or {}
def execute(self, context: Context) -> tuple[str | None, list]:
"""
Describe AWS DMS replication tasks from Airflow.
:return: Marker and list of replication tasks
"""
return self.hook.describe_replication_tasks(**self.describe_tasks_kwargs)
| DmsDescribeTasksOperator |
python | mlflow__mlflow | tests/transformers/test_transformers_llm_inference_utils.py | {
"start": 2476,
"end": 10776
} | class ____(NamedTuple):
data: Any
params: Any
expected_data: Any
expected_params: Any
@pytest.mark.parametrize(
"case",
[
# Case 0: Data only includes prompt
_TestCase(
data=pd.DataFrame({"prompt": ["Hello world!"]}),
params={},
expected_data=["Hello world!"],
expected_params={},
),
# Case 1: Data includes prompt and params
_TestCase(
data=pd.DataFrame(
{
"prompt": ["Hello world!"],
"temperature": [0.7],
"max_tokens": [100],
"stop": [None],
}
),
params={},
expected_data=["Hello world!"],
expected_params={
"temperature": 0.7,
# max_tokens is replaced with max_new_tokens
"max_new_tokens": 100,
# do not pass `stop` to params as it is None
},
),
# Case 2: Params are passed if not specified in data
_TestCase(
data=pd.DataFrame(
{
"prompt": ["Hello world!"],
}
),
params={
"temperature": 0.7,
"max_tokens": 100,
"stop": ["foo", "bar"],
},
expected_data=["Hello world!"],
expected_params={
"temperature": 0.7,
"max_new_tokens": 100,
# Stopping criteria is _StopSequenceMatchCriteria instance
# "stop": ...
},
),
# Case 3: Data overrides params
_TestCase(
data=pd.DataFrame(
{
"messages": [
[
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi!"},
]
],
"temperature": [0.1],
"max_tokens": [100],
"stop": [["foo", "bar"]],
}
),
params={
"temperature": [0.2],
"max_tokens": [200],
"stop": ["foo", "bar", "baz"],
},
expected_data=[
[
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi!"},
]
],
expected_params={
"temperature": 0.1,
"max_new_tokens": 100,
},
),
# Case 4: Batch input
_TestCase(
data=pd.DataFrame(
{
"prompt": ["Hello!", "Hi", "Hola"],
"temperature": [0.1, 0.2, 0.3],
"max_tokens": [None, 200, 300],
}
),
params={
"temperature": 0.4,
"max_tokens": 400,
},
expected_data=["Hello!", "Hi", "Hola"],
# The values in the first data is used, otherwise params
expected_params={
"temperature": 0.1,
"max_new_tokens": 400,
},
),
# Case 5: Raw dict input
_TestCase(
data={
"messages": [
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi!"},
],
"temperature": 0.1,
"max_tokens": 100,
"stop": ["foo", "bar"],
},
params={},
expected_data=[
[
{"role": "user", "content": "Hello!"},
{"role": "assistant", "content": "Hi!"},
]
],
expected_params={
"temperature": 0.1,
"max_new_tokens": 100,
},
),
],
)
def test_preprocess_llm_inference_input(case):
task = "llm/v1/completions" if "prompt" in case.data else "llm/v1/chat"
flavor_config = {"inference_task": task, "source_model_name": "test"}
with mock.patch(
"mlflow.transformers.llm_inference_utils._get_stopping_criteria"
) as mock_get_stopping_criteria:
data, params = preprocess_llm_inference_input(case.data, case.params, flavor_config)
# Test that OpenAI params are separated from data and replaced with Hugging Face params
assert data == case.expected_data
if "stopping_criteria" in params:
assert params.pop("stopping_criteria") is not None
mock_get_stopping_criteria.assert_called_once_with(["foo", "bar"], "test")
assert params == case.expected_params
def test_preprocess_llm_inference_input_raise_if_key_invalid():
# Missing input key
with pytest.raises(MlflowException, match=r"Transformer model saved with"):
preprocess_llm_inference_input(
pd.DataFrame({"invalid_key": [1, 2, 3]}),
flavor_config={"inference_task": "llm/v1/completions"},
)
# Unmatched key (should be "messages" for chat task)
with pytest.raises(MlflowException, match=r"Transformer model saved with"):
preprocess_llm_inference_input(
pd.DataFrame({"prompt": ["Hi"]}), flavor_config={"inference_task": "llm/v1/chat"}
)
def test_stopping_criteria():
with mock.patch("transformers.AutoTokenizer.from_pretrained") as mock_from_pretrained:
mock_from_pretrained.return_value = DummyTokenizer()
stopping_criteria = _get_stopping_criteria(stop=None, model_name=None)
assert stopping_criteria is None
input_ids = torch.tensor([[1, 2, 3, 4, 5]])
scores = torch.ones(1, 5)
stopping_criteria = _get_stopping_criteria(stop="5", model_name="my/model")
stopping_criteria_matches = [f(input_ids, scores) for f in stopping_criteria]
assert stopping_criteria_matches == [True, True]
stopping_criteria = _get_stopping_criteria(stop=["100", "5"], model_name="my/model")
stopping_criteria_matches = [f(input_ids, scores) for f in stopping_criteria]
assert stopping_criteria_matches == [False, False, True, True]
def test_output_dict_for_completions():
prompt = "1 2 3"
output_tensor = [1, 2, 3, 4, 5]
flavor_config = {"source_model_name": "gpt2"}
model_config = {"max_new_tokens": 2}
inference_task = "llm/v1/completions"
pipeline = mock.MagicMock()
pipeline.tokenizer = DummyTokenizer()
output_dict = _get_output_and_usage_from_tensor(
prompt, output_tensor, pipeline, flavor_config, model_config, inference_task
)
# Test UUID validity
uuid.UUID(output_dict["id"])
assert output_dict["object"] == "text_completion"
assert output_dict["model"] == "gpt2"
assert output_dict["choices"][0]["text"] == "4 5"
assert output_dict["choices"][0]["finish_reason"] == "length"
usage = output_dict["usage"]
assert usage["prompt_tokens"] + usage["completion_tokens"] == usage["total_tokens"]
def test_token_usage():
prompt = "1 2 3"
output_tensor = [1, 2, 3, 4, 5]
pipeline = mock.MagicMock()
pipeline.tokenizer = DummyTokenizer()
usage = _get_token_usage(prompt, output_tensor, pipeline, {})
assert usage["prompt_tokens"] == 3
assert usage["completion_tokens"] == 2
assert usage["total_tokens"] == 5
def test_finish_reason():
assert _get_finish_reason(total_tokens=20, completion_tokens=10, model_config={}) == "stop"
assert (
_get_finish_reason(
total_tokens=20, completion_tokens=10, model_config={"max_new_tokens": 10}
)
== "length"
)
assert (
_get_finish_reason(total_tokens=20, completion_tokens=10, model_config={"max_length": 15})
== "length"
)
@pytest.mark.parametrize(
("inference_task", "expected_task"),
[
("llm/v1/completions", "text-generation"),
("llm/v1/chat", "text-generation"),
(None, None),
],
)
def test_default_task_for_llm_inference_task(inference_task, expected_task):
assert _get_default_task_for_llm_inference_task(inference_task) == expected_task
| _TestCase |
python | pytorch__pytorch | torch/_functorch/_aot_autograd/descriptors.py | {
"start": 16243,
"end": 16584
} | class ____(DifferentiableAOTInput):
"""The input is a buffer, whose FQN is target"""
target: str
def expr(self) -> str:
return f"self.get_buffer({self.target!r})"
def is_param(self) -> bool:
return False
def is_buffer(self) -> bool:
return True
@dataclasses.dataclass(frozen=True)
| BufferAOTInput |
python | walkccc__LeetCode | solutions/97. Interleaving String/97.py | {
"start": 0,
"end": 717
} | class ____:
def isInterleave(self, s1: str, s2: str, s3: str) -> bool:
m = len(s1)
n = len(s2)
if m + n != len(s3):
return False
# dp[i][j] := true if s3[0..i + j) is formed by the interleaving of
# s1[0..i) and s2[0..j)
dp = [[False] * (n + 1) for _ in range(m + 1)]
dp[0][0] = True
for i in range(1, m + 1):
dp[i][0] = dp[i - 1][0] and s1[i - 1] == s3[i - 1]
for j in range(1, n + 1):
dp[0][j] = dp[0][j - 1] and s2[j - 1] == s3[j - 1]
for i in range(1, m + 1):
for j in range(1, n + 1):
dp[i][j] = (dp[i - 1][j] and s1[i - 1] == s3[i + j - 1] or
dp[i][j - 1] and s2[j - 1] == s3[i + j - 1])
return dp[m][n]
| Solution |
python | doocs__leetcode | solution/0400-0499/0494.Target Sum/Solution.py | {
"start": 0,
"end": 496
} | class ____:
def findTargetSumWays(self, nums: List[int], target: int) -> int:
s = sum(nums)
if s < target or (s - target) % 2:
return 0
m, n = len(nums), (s - target) // 2
f = [[0] * (n + 1) for _ in range(m + 1)]
f[0][0] = 1
for i, x in enumerate(nums, 1):
for j in range(n + 1):
f[i][j] = f[i - 1][j]
if j >= x:
f[i][j] += f[i - 1][j - x]
return f[m][n]
| Solution |
python | django__django | tests/auth_tests/urls.py | {
"start": 2797,
"end": 2964
} | class ____(View):
def get(self, request, *args, **kwargs):
return HttpResponse()
@method_decorator(login_not_required, name="dispatch")
| EmptyResponseBaseView |
python | huggingface__transformers | src/transformers/models/deberta/configuration_deberta.py | {
"start": 778,
"end": 7173
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DebertaModel`]. It is
used to instantiate a DeBERTa model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa
[microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the DeBERTa model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`DebertaModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 0):
The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
relative_attention (`bool`, *optional*, defaults to `False`):
Whether use relative position encoding.
max_relative_positions (`int`, *optional*, defaults to 1):
The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
as `max_position_embeddings`.
pad_token_id (`int`, *optional*, defaults to 0):
The value used to pad input_ids.
position_biased_input (`bool`, *optional*, defaults to `True`):
Whether add absolute position embedding to content embedding.
pos_att_type (`list[str]`, *optional*):
The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
`["p2c", "c2p"]`.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
legacy (`bool`, *optional*, defaults to `True`):
Whether or not the model should use the legacy `LegacyDebertaOnlyMLMHead`, which does not work properly
for mask infilling tasks.
Example:
```python
>>> from transformers import DebertaConfig, DebertaModel
>>> # Initializing a DeBERTa microsoft/deberta-base style configuration
>>> configuration = DebertaConfig()
>>> # Initializing a model (with random weights) from the microsoft/deberta-base style configuration
>>> model = DebertaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "deberta"
def __init__(
self,
vocab_size=50265,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=0,
initializer_range=0.02,
layer_norm_eps=1e-7,
relative_attention=False,
max_relative_positions=-1,
pad_token_id=0,
position_biased_input=True,
pos_att_type=None,
pooler_dropout=0,
pooler_hidden_act="gelu",
legacy=True,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.relative_attention = relative_attention
self.max_relative_positions = max_relative_positions
self.pad_token_id = pad_token_id
self.position_biased_input = position_biased_input
# Backwards compatibility
if isinstance(pos_att_type, str):
pos_att_type = [x.strip() for x in pos_att_type.lower().split("|")]
self.pos_att_type = pos_att_type
self.vocab_size = vocab_size
self.layer_norm_eps = layer_norm_eps
self.pooler_hidden_size = kwargs.get("pooler_hidden_size", hidden_size)
self.pooler_dropout = pooler_dropout
self.pooler_hidden_act = pooler_hidden_act
self.legacy = legacy
__all__ = ["DebertaConfig"]
| DebertaConfig |
python | getsentry__sentry | src/sentry/api/fields/serializedfile.py | {
"start": 292,
"end": 425
} | class ____(SentryAPIException):
status_code = 413
default_detail = "File too large"
default_code = "too_large"
| FileTooLarge |
python | neetcode-gh__leetcode | python/0001-two-sum.py | {
"start": 0,
"end": 287
} | class ____:
def twoSum(self, nums: List[int], target: int) -> List[int]:
prevMap = {} # val -> index
for i, n in enumerate(nums):
diff = target - n
if diff in prevMap:
return [prevMap[diff], i]
prevMap[n] = i
| Solution |
python | getsentry__sentry | src/sentry/api/serializers/models/dashboard.py | {
"start": 12113,
"end": 12476
} | class ____(Serializer):
def serialize(self, obj, attrs, user, **kwargs) -> OnDemandResponse:
return {
"enabled": obj.extraction_enabled(),
"extractionState": obj.extraction_state,
"dashboardWidgetQueryId": obj.dashboard_widget_query_id,
}
@register(DashboardWidgetQuery)
| DashboardWidgetQueryOnDemandSerializer |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_theme07.py | {
"start": 350,
"end": 2146
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_theme07.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with chart formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line", "subtype": "stacked"})
chart.axis_ids = [68411392, 68414848]
# Add some test data for the chart(s).
for row_num in range(8):
for col_num in range(6):
worksheet.write_number(row_num, col_num, 1)
chart.add_series(
{
"values": ["Sheet1", 0, 0, 7, 0],
"line": {"color": Color((5, 0))},
}
)
chart.add_series(
{
"values": ["Sheet1", 0, 1, 7, 1],
"line": {"color": Color((5, 1))},
}
)
chart.add_series(
{
"values": ["Sheet1", 0, 2, 7, 2],
"line": {"color": Color((5, 2))},
}
)
chart.add_series(
{
"values": ["Sheet1", 0, 3, 7, 3],
"line": {"color": Color((5, 3))},
}
)
chart.add_series(
{
"values": ["Sheet1", 0, 4, 7, 4],
"line": {"color": Color((5, 4))},
}
)
chart.add_series(
{
"values": ["Sheet1", 0, 5, 7, 5],
"line": {"color": Color((5, 5))},
}
)
worksheet.insert_chart(8, 7, chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | doocs__leetcode | solution/2900-2999/2913.Subarrays Distinct Element Sum of Squares I/Solution.py | {
"start": 0,
"end": 267
} | class ____:
def sumCounts(self, nums: List[int]) -> int:
ans, n = 0, len(nums)
for i in range(n):
s = set()
for j in range(i, n):
s.add(nums[j])
ans += len(s) * len(s)
return ans
| Solution |
python | getsentry__sentry | tests/sentry/hybridcloud/test_organization.py | {
"start": 10576,
"end": 15103
} | class ____(TestCase):
def test_get_audit_log_metadata(self) -> None:
org = self.create_organization(owner=self.user)
user = self.create_user(email="foobar@sentry.io")
member = self.create_member(user_id=user.id, role="owner", organization_id=org.id)
self.create_team(organization=org, slug="baz", members=[user])
rpc_member = serialize_member(member)
assert member.get_audit_log_data() == rpc_member.get_audit_log_metadata()
@django_db_all(transaction=True)
def test_update_organization_member() -> None:
org = Factories.create_organization()
user = Factories.create_user(email="test@sentry.io")
rpc_member = organization_service.add_organization_member(
organization_id=org.id,
default_org_role="member",
user_id=user.id,
invite_status=InviteStatus.APPROVED.value,
)
member_query = OrganizationMember.objects.all()
assert member_query.count() == 1
assert member_query[0].role == "member"
assert rpc_member.id == member_query[0].id
organization_service.update_organization_member(
organization_id=org.id, member_id=rpc_member.id, attrs=dict(role="manager")
)
member_query = OrganizationMember.objects.all()
assert member_query.count() == 1
assert member_query[0].role == "manager"
@django_db_all(transaction=True)
@all_silo_test
def test_count_members_without_sso() -> None:
org = Factories.create_organization()
user = Factories.create_user(email="test@sentry.io")
user_two = Factories.create_user(email="has.sso@sentry.io")
Factories.create_member(organization=org, user=user)
Factories.create_member(organization=org, email="invite@sentry.io")
# has sso setup, not included in result
Factories.create_member(
organization=org,
user=user_two,
flags=OrganizationMember.flags["sso:linked"],
)
result = organization_service.count_members_without_sso(organization_id=org.id)
assert result == 2
@django_db_all(transaction=True)
@all_silo_test
def test_send_sso_unlink_emails() -> None:
org = Factories.create_organization()
user = Factories.create_user(email="test@sentry.io")
user_two = Factories.create_user(email="two@sentry.io")
Factories.create_member(
organization=org, user=user, flags=OrganizationMember.flags["sso:linked"]
)
Factories.create_member(
organization=org, user=user_two, flags=OrganizationMember.flags["sso:linked"]
)
Factories.create_member(
organization=org, email="invite@sentry.io", flags=OrganizationMember.flags["sso:invalid"]
)
with TaskRunner():
result = organization_service.send_sso_unlink_emails(
organization_id=org.id,
sending_user_email="owner@sentry.io",
provider_key="google",
)
assert result is None
with assume_test_silo_mode(SiloMode.REGION):
# No members should be linked or invalid now
assert (
OrganizationMember.objects.filter(
flags=F("flags").bitor(OrganizationMember.flags["sso:linked"])
).count()
== 0
)
assert (
OrganizationMember.objects.filter(
flags=F("flags").bitor(OrganizationMember.flags["sso:invalid"])
).count()
== 0
)
# Only real members should get emails
assert len(mail.outbox) == 2
assert "Action Required" in mail.outbox[0].subject
assert "Single Sign-On" in mail.outbox[0].body
@django_db_all(transaction=True)
@all_silo_test
def test_get_aggregate_project_flags() -> None:
org = Factories.create_organization()
project1 = Factories.create_project(organization_id=org.id, name="test-project-1")
project2 = Factories.create_project(organization_id=org.id, name="test-project-2")
flags = organization_service.get_aggregate_project_flags(organization_id=org.id)
assert flags.has_insights_http is False
assert flags.has_cron_checkins is False
with assume_test_silo_mode_of(Project):
project1.flags.has_insights_http = True
project1.update(flags=F("flags").bitor(Project.flags.has_insights_http))
project2.flags.has_insights_http = True
project2.update(flags=F("flags").bitor(Project.flags.has_cron_checkins))
flags = organization_service.get_aggregate_project_flags(organization_id=org.id)
assert flags.has_insights_http is True
assert flags.has_cron_checkins is True
| RpcOrganizationMemberTest |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 32653,
"end": 32915
} | class ____:
xlCalculationAutomatic = -4105 # from enum XlCalculation
xlCalculationManual = -4135 # from enum XlCalculation
xlCalculationSemiautomatic = 2 # from enum XlCalculation
calculations = ("automatic", "manual", "semiautomatic")
| Calculation |
python | apache__avro | lang/py/avro/test/test_io.py | {
"start": 14692,
"end": 16114
} | class ____(unittest.TestCase):
def __init__(self, write_type: str, read_type: str) -> None:
"""Ignore the normal signature for unittest.TestCase because we are generating
many test cases from this one class. This is safe as long as the autoloader
ignores this class. The autoloader will ignore this class as long as it has
no methods starting with `test_`.
"""
super().__init__("check_schema_promotion")
self.writers_schema = avro.schema.parse(f'"{write_type}"')
self.readers_schema = avro.schema.parse(f'"{read_type}"')
# Never hide repeated warnings when running this test case.
warnings.simplefilter("always")
def check_schema_promotion(self) -> None:
"""Test schema promotion"""
# note that checking writers_schema.type in read_data
# allows us to handle promotion correctly
DATUM_TO_WRITE = 219
with warnings.catch_warnings(record=True) as actual_warnings:
writer, enc, dw = write_datum(DATUM_TO_WRITE, self.writers_schema)
datum_read = read_datum(writer, self.writers_schema, self.readers_schema)
self.assertEqual(
datum_read,
DATUM_TO_WRITE,
f"Datum changed between schema that were supposed to promote: writer: {self.writers_schema} reader: {self.readers_schema}.",
)
| SchemaPromotionTestCase |
python | modin-project__modin | asv_bench/benchmarks/benchmarks.py | {
"start": 38778,
"end": 39145
} | class ____:
params = [get_benchmark_shapes("TimeIsnull")]
param_names = ["shape"]
def setup(self, shape):
sample = np.array([np.nan, 1.0])
data = np.random.choice(sample, (shape[0], shape[1]))
self.df = IMPL.DataFrame(data)
execute(self.df)
def time_isnull(self, shape):
execute(IMPL.isnull(self.df))
| TimeIsnull |
python | huggingface__transformers | src/transformers/models/ernie4_5_moe/modular_ernie4_5_moe.py | {
"start": 1717,
"end": 2159
} | class ____(Qwen3MoeMLP):
def __init__(self, config, intermediate_size=None):
super().__init__(config, intermediate_size)
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.use_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.use_bias)
| Ernie4_5_MoeMLP |
python | keras-team__keras | keras/src/backend/jax/core_test.py | {
"start": 570,
"end": 2433
} | class ____(testing.TestCase):
def setup(self):
super().setup()
class NNXModel(nnx.Module):
def __init__(self, rngs):
self.linear = nnx.Linear(2, 3, rngs=rngs)
# Use NnxVariable directly as KerasJaxVariable
# might be JaxVariable if NNX is disabled globally.
self.custom_variable = NnxVariable(jnp.ones((1, 3)))
def __call__(self, x):
return self.linear(x) + self.custom_variable
self.nnx_model = NNXModel(rngs=nnx.Rngs(0))
self.keras_nnx_model = keras.Sequential(
[keras.layers.Dense(units=1, input_shape=(10,))]
)
self.single_dummy_input = np.random.rand(1, 10)
def test_variable_in_nnx_module(self):
self.assertTrue(hasattr(self.nnx_model.custom_variable, "_trace_state"))
self.assertIsNotNone(self.nnx_model.custom_variable._trace_state)
self.assertAllEqual(self.nnx_model.custom_variable.value, [[1, 1, 1]])
self.assertTrue(
isinstance(self.nnx_model.custom_variable, nnx.Variable)
)
def test_model_saving(self):
path = os.path.join(self.get_temp_dir(), "model.keras")
original_outputs = self.keras_nnx_model(self.single_dummy_input)
self.keras_nnx_model.save(path, save_format="keras_v3")
restored_model = keras.models.load_model(path)
restored_outputs = restored_model(self.single_dummy_input)
self.assertAllEqual(original_outputs, restored_outputs)
def test_keras_variable_nnx_split_merge_sync(self):
variable1 = keras.Variable(jnp.array(1.0))
graphdef, state = nnx.split(variable1)
state = jax.tree.map(lambda x: x + 1, state)
variable2 = nnx.merge(graphdef, state)
self.assertEqual(variable2._value, variable2.value)
| NnxVariableTest |
python | pydantic__pydantic | pydantic-core/tests/test_errors.py | {
"start": 30162,
"end": 33009
} | class ____(enum.Enum):
CAUSE = enum.auto()
NO_CAUSE = enum.auto()
IMPORT_ERROR = enum.auto()
@pytest.mark.parametrize(
'desc,config,expected_result',
[ # Without the backport should still work after 3.10 as not needed:
(
'Enabled',
CoreConfig(validation_error_cause=True),
CauseResult.CAUSE if sys.version_info >= (3, 11) else CauseResult.IMPORT_ERROR,
),
('Disabled specifically', CoreConfig(validation_error_cause=False), CauseResult.NO_CAUSE),
('Disabled implicitly', {}, CauseResult.NO_CAUSE),
],
)
def test_validation_error_cause_config_variants(
desc: str, config: CoreConfig, expected_result: CauseResult, mocker: MockerFixture
):
# Simulate the package being missing:
mocker.patch.dict('sys.modules', {'exceptiongroup': None})
def singular_raise_py_error(v: Any) -> Any:
raise ValueError('Oh no!')
s = SchemaValidator(core_schema.no_info_plain_validator_function(singular_raise_py_error), config=config)
if expected_result is CauseResult.IMPORT_ERROR:
# Confirm error message contains "requires the exceptiongroup module" in the middle of the string:
with pytest.raises(ImportError, match='requires the exceptiongroup module'):
s.validate_python('anything')
elif expected_result is CauseResult.CAUSE:
with pytest.raises(ValidationError) as exc_info:
s.validate_python('anything')
assert exc_info.value.__cause__ is not None
assert hasattr(exc_info.value.__cause__, 'exceptions')
assert len(exc_info.value.__cause__.exceptions) == 1
assert repr(exc_info.value.__cause__.exceptions[0]) == repr(ValueError('Oh no!'))
elif expected_result is CauseResult.NO_CAUSE:
with pytest.raises(ValidationError) as exc_info:
s.validate_python('anything')
assert exc_info.value.__cause__ is None
else:
raise AssertionError(f'Unhandled result: {expected_result}')
def test_validation_error_cause_traceback_preserved():
"""Makes sure historic bug of traceback being lost is fixed."""
enabled_config: CoreConfig = CoreConfig(validation_error_cause=True)
def singular_raise_py_error(v: Any) -> Any:
raise ValueError('Oh no!')
s1 = SchemaValidator(core_schema.no_info_plain_validator_function(singular_raise_py_error), config=enabled_config)
with pytest.raises(ValidationError) as exc_info:
s1.validate_python('anything')
base_errs = getattr(exc_info.value.__cause__, 'exceptions', [])
assert len(base_errs) == 1
base_err = base_errs[0]
# Get to the root error:
cause = base_err
while cause.__cause__ is not None:
cause = cause.__cause__
# Should still have a traceback:
assert cause.__traceback__ is not None
| CauseResult |
python | getsentry__sentry | src/sentry/relay/types/rule_condition.py | {
"start": 1173,
"end": 1296
} | class ____(TypedDict):
"""Less than condition"""
op: Literal["lt"]
name: str
value: Value | None
| LtCondition |
python | numba__numba | numba/core/debuginfo.py | {
"start": 571,
"end": 1528
} | class ____(metaclass=abc.ABCMeta):
@abc.abstractmethod
def mark_variable(self, builder, allocavalue, name, lltype, size, line,
datamodel=None, argidx=None):
"""Emit debug info for the variable.
"""
pass
@abc.abstractmethod
def mark_location(self, builder, line):
"""Emit source location information to the given IRBuilder.
"""
pass
@abc.abstractmethod
def mark_subprogram(self, function, qualname, argnames, argtypes, line):
"""Emit source location information for the given function.
"""
pass
@abc.abstractmethod
def initialize(self):
"""Initialize the debug info. An opportunity for the debuginfo to
prepare any necessary data structures.
"""
@abc.abstractmethod
def finalize(self):
"""Finalize the debuginfo by emitting all necessary metadata.
"""
pass
| AbstractDIBuilder |
python | walkccc__LeetCode | solutions/2149. Rearrange Array Elements by Sign/2149.py | {
"start": 0,
"end": 247
} | class ____:
def rearrangeArray(self, nums: list[int]) -> list[int]:
ans = []
pos = []
neg = []
for num in nums:
(pos if num > 0 else neg).append(num)
for p, n in zip(pos, neg):
ans += [p, n]
return ans
| Solution |
python | doocs__leetcode | solution/0300-0399/0331.Verify Preorder Serialization of a Binary Tree/Solution.py | {
"start": 0,
"end": 347
} | class ____:
def isValidSerialization(self, preorder: str) -> bool:
stk = []
for c in preorder.split(","):
stk.append(c)
while len(stk) > 2 and stk[-1] == stk[-2] == "#" and stk[-3] != "#":
stk = stk[:-3]
stk.append("#")
return len(stk) == 1 and stk[0] == "#"
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver15.py | {
"start": 438,
"end": 543
} | class ____: ...
E = TypeVar("E", bound=F)
def coercer_method(value: E | str, enum: type[E]) -> E: ...
| F |
python | arrow-py__arrow | arrow/locales.py | {
"start": 84997,
"end": 87267
} | class ____(Locale):
names = ["hu", "hu-hu"]
past = "{0} ezelőtt"
future = "{0} múlva"
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "éppen most",
"second": {"past": "egy második", "future": "egy második"},
"seconds": {"past": "{0} másodpercekkel", "future": "{0} pár másodperc"},
"minute": {"past": "egy perccel", "future": "egy perc"},
"minutes": {"past": "{0} perccel", "future": "{0} perc"},
"hour": {"past": "egy órával", "future": "egy óra"},
"hours": {"past": "{0} órával", "future": "{0} óra"},
"day": {"past": "egy nappal", "future": "egy nap"},
"days": {"past": "{0} nappal", "future": "{0} nap"},
"week": {"past": "egy héttel", "future": "egy hét"},
"weeks": {"past": "{0} héttel", "future": "{0} hét"},
"month": {"past": "egy hónappal", "future": "egy hónap"},
"months": {"past": "{0} hónappal", "future": "{0} hónap"},
"year": {"past": "egy évvel", "future": "egy év"},
"years": {"past": "{0} évvel", "future": "{0} év"},
}
month_names = [
"",
"január",
"február",
"március",
"április",
"május",
"június",
"július",
"augusztus",
"szeptember",
"október",
"november",
"december",
]
month_abbreviations = [
"",
"jan",
"febr",
"márc",
"ápr",
"máj",
"jún",
"júl",
"aug",
"szept",
"okt",
"nov",
"dec",
]
day_names = [
"",
"hétfő",
"kedd",
"szerda",
"csütörtök",
"péntek",
"szombat",
"vasárnap",
]
day_abbreviations = ["", "hét", "kedd", "szer", "csüt", "pént", "szom", "vas"]
meridians = {"am": "de", "pm": "du", "AM": "DE", "PM": "DU"}
def _format_timeframe(self, timeframe: TimeFrameLiteral, delta: int) -> str:
form = self.timeframes[timeframe]
if isinstance(form, Mapping):
if delta > 0:
form = form["future"]
else:
form = form["past"]
return form.format(abs(delta))
| HungarianLocale |
python | huggingface__transformers | src/transformers/models/csm/modeling_csm.py | {
"start": 6352,
"end": 9343
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: CsmConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[CsmConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
| CsmRotaryEmbedding |
python | tensorflow__tensorflow | tensorflow/python/keras/engine/training_v1.py | {
"start": 3544,
"end": 125150
} | class ____(training_lib.Model):
"""`Model` groups layers into an object with training and inference features.
There are two ways to instantiate a `Model`:
1 - With the "functional API", where you start from `Input`,
you chain layer calls to specify the model's forward pass,
and finally you create your model from inputs and outputs:
```python
import tensorflow as tf
inputs = tf.keras.Input(shape=(3,))
x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)
outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
2 - By subclassing the `Model` class: in that case, you should define your
layers in `__init__` and you should implement the model's forward pass
in `call`.
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
def call(self, inputs):
x = self.dense1(inputs)
return self.dense2(x)
model = MyModel()
```
If you subclass `Model`, you can optionally have
a `training` argument (boolean) in `call`, which you can use to specify
a different behavior in training and inference:
```python
import tensorflow as tf
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)
self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)
self.dropout = tf.keras.layers.Dropout(0.5)
def call(self, inputs, training=False):
x = self.dense1(inputs)
if training:
x = self.dropout(x, training=training)
return self.dense2(x)
model = MyModel()
```
"""
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
# initializing _distribution_strategy here since it is possible to call
# predict on a model without compiling it.
self._distribution_strategy = None
self._compile_time_distribution_strategy = None
if (ops.executing_eagerly_outside_functions() and
distribute_lib.has_strategy()):
self._set_strategy(
distribute_lib.get_strategy())
# This flag is used to track if the user is using the deprecated path of
# passing distribution strategy to compile rather than creating the model
# under distribution strategy scope.
self._compile_distribution = False
self._run_eagerly = None
self._experimental_run_tf_function = (
ops.executing_eagerly_outside_functions())
self._v1_compile_was_called = False
def _init_batch_counters(self):
pass # Batch counters should not be created in legacy graph mode.
@trackable.no_automatic_dependency_tracking
def _set_strategy(self, strategy):
self._compile_time_distribution_strategy = strategy
def get_weights(self):
"""Retrieves the weights of the model.
Returns:
A flat list of Numpy arrays.
"""
strategy = (self._distribution_strategy or
self._compile_time_distribution_strategy)
if strategy:
with strategy.scope():
return base_layer.Layer.get_weights(self)
return base_layer.Layer.get_weights(self)
def load_weights(self, filepath, by_name=False, skip_mismatch=False):
"""Loads all layer weights, either from a TensorFlow or an HDF5 weight file.
If `by_name` is False weights are loaded based on the network's
topology. This means the architecture should be the same as when the weights
were saved. Note that layers that don't have weights are not taken into
account in the topological ordering, so adding or removing layers is fine as
long as they don't have weights.
If `by_name` is True, weights are loaded into layers only if they share the
same name. This is useful for fine-tuning or transfer-learning models where
some of the layers have changed.
Only topological loading (`by_name=False`) is supported when loading weights
from the TensorFlow format. Note that topological loading differs slightly
between TensorFlow and HDF5 formats for user-defined classes inheriting from
`tf.keras.Model`: HDF5 loads based on a flattened list of weights, while the
TensorFlow format loads based on the object-local names of attributes to
which layers are assigned in the `Model`'s constructor.
Args:
filepath: String, path to the weights file to load. For weight files in
TensorFlow format, this is the file prefix (the same as was passed
to `save_weights`).
by_name: Boolean, whether to load weights by name or by topological
order. Only topological loading is supported for weight files in
TensorFlow format.
skip_mismatch: Boolean, whether to skip loading of layers where there is
a mismatch in the number of weights, or a mismatch in the shape of
the weight (only valid when `by_name=True`).
Returns:
When loading a weight file in TensorFlow format, returns the same status
object as `tf.train.Checkpoint.restore`. When graph building, restore
ops are run automatically as soon as the network is built (on first call
for user-defined classes inheriting from `Model`, immediately if it is
already built).
When loading weights in HDF5 format, returns `None`.
Raises:
ImportError: If h5py is not available and the weight file is in HDF5
format.
ValueError: If `skip_mismatch` is set to `True` when `by_name` is
`False`.
"""
if backend.is_tpu_strategy(self._distribution_strategy):
if (self._distribution_strategy.extended.steps_per_run > 1 and
(not saving_utils.is_hdf5_filepath(filepath))): # pylint: disable=protected-access
raise ValueError('Load weights is not yet supported with TPUStrategy '
'with steps_per_run greater than 1.')
return super(Model, self).load_weights(filepath, by_name, skip_mismatch)
@trackable.no_automatic_dependency_tracking
def compile(self,
optimizer='rmsprop',
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
distribute=None,
**kwargs):
"""Configures the model for training.
Args:
optimizer: String (name of optimizer) or optimizer instance.
See `tf.keras.optimizers`.
loss: String (name of objective function), objective function or
`tf.keras.losses.Loss` instance. See `tf.keras.losses`. An objective
function is any callable with the signature
`scalar_loss = fn(y_true, y_pred)`. If the model has multiple
outputs, you can use a different loss on each output by passing a
dictionary or a list of losses. The loss value that will be
minimized by the model will then be the sum of all individual
losses.
metrics: List of metrics to be evaluated by the model during training
and testing. Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary, such as
`metrics={'output_a': 'accuracy', 'output_b': ['accuracy', 'mse']}`.
You can also pass a list (len = len(outputs)) of lists of metrics
such as `metrics=[['accuracy'], ['accuracy', 'mse']]` or
`metrics=['accuracy', ['accuracy', 'mse']]`.
loss_weights: Optional list or dictionary specifying scalar
coefficients (Python floats) to weight the loss contributions
of different model outputs.
The loss value that will be minimized by the model
will then be the *weighted sum* of all individual losses,
weighted by the `loss_weights` coefficients.
If a list, it is expected to have a 1:1 mapping
to the model's outputs. If a tensor, it is expected to map
output names (strings) to scalar coefficients.
sample_weight_mode: If you need to do timestep-wise
sample weighting (2D weights), set this to `"temporal"`.
`None` defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
weighted_metrics: List of metrics to be evaluated and weighted
by sample_weight or class_weight during training and testing.
target_tensors: By default, Keras will create placeholders for the
model's target, which will be fed with the target data during
training. If instead you would like to use your own
target tensors (in turn, Keras will not expect external
Numpy data for these targets at training time), you
can specify them via the `target_tensors` argument. It can be
a single tensor (for a single-output model), a list of tensors,
or a dict mapping output names to target tensors.
distribute: NOT SUPPORTED IN TF 2.0, please create and compile the
model under distribution strategy scope instead of passing it to
compile.
**kwargs: Any additional arguments.
Raises:
ValueError: In case of invalid arguments for
`optimizer`, `loss`, `metrics` or `sample_weight_mode`.
"""
self._assert_built_as_v1()
self._run_eagerly = kwargs.pop('run_eagerly', None)
self._experimental_run_tf_function = kwargs.pop(
'experimental_run_tf_function', True)
self._v1_compile_was_called = True
# Prepare Session arguments (legacy).
kwargs.pop('cloning', None) # Legacy DistStrat argument, never used.
self._from_serialized = kwargs.pop('from_serialized', False)
allowed_kwargs = {'feed_dict', 'fetches', 'options', 'run_metadata'}
unknown_kwargs = set(kwargs.keys()) - allowed_kwargs
if unknown_kwargs:
raise TypeError(
'Invalid keyword argument(s) in `compile`: %s' % (unknown_kwargs,))
self._function_kwargs = kwargs
if self._function_kwargs:
self._experimental_run_tf_function = False
if self.run_eagerly:
raise ValueError(
'Session keyword arguments are not supported '
'when `run_eagerly=True`. You passed the following '
'Session arguments: %s' % (self._function_kwargs,))
self._set_optimizer(optimizer)
is_any_keras_optimizer_v1 = any(
(isinstance(opt, optimizer_v1.Optimizer)
and not isinstance(opt, optimizer_v1.TFOptimizer)
) for opt in nest.flatten(self.optimizer))
if is_any_keras_optimizer_v1 and ops.executing_eagerly_outside_functions():
raise ValueError('`tf.compat.v1.keras` Optimizer (', optimizer, ') is '
'not supported when eager execution is enabled. Use a '
'`tf.keras` Optimizer instead, or disable eager '
'execution.')
if ((target_tensors is not None)
or not ops.executing_eagerly_outside_functions()):
# Fallback out of things that aren't supported with v2 loops
self._experimental_run_tf_function = False
if distribute is not None:
if tf2.enabled() or self._experimental_run_tf_function:
raise ValueError(
'Distribute argument in compile is not available in TF 2.0 please '
'create the model under the distribution strategy scope.')
logging.warning('Distribute argument in compile is deprecated please '
'create the model under the distribution strategy scope.')
self._distribution_strategy = distribute
self._compile_distribution = True
else:
if distribute_lib.has_strategy():
# When the user builds the model in the DS scope and cross replica
# context we want distribution strategy to be set but when building the
# replica copies of the models internally we should not be compiling
# with distribution strategy and use the default compilation path.
if distribute_lib.in_cross_replica_context():
self._distribution_strategy = (
distribute_lib.get_strategy())
if isinstance(self._distribution_strategy,
parameter_server_strategy.ParameterServerStrategyV1):
raise NotImplementedError(
'`tf.compat.v1.distribute.experimental.ParameterServerStrategy` '
'currently only works with the deprecated tf.Estimator API')
if isinstance(self._distribution_strategy,
parameter_server_strategy_v2.ParameterServerStrategyV2):
raise NotImplementedError(
'`tf.distribute.experimental.ParameterServerStrategy` is only '
'supported in TF2.')
if not self._experimental_run_tf_function:
self._validate_compile_param_for_distribution_strategy(self.run_eagerly,
sample_weight_mode,
target_tensors,
weighted_metrics)
# We've disabled automatic dependency tracking for this method, but do want
# to add a checkpoint dependency on the optimizer if it's trackable.
if isinstance(self.optimizer, trackable.Trackable):
self._track_trackable(
self.optimizer, name='optimizer', overwrite=True)
self.loss = loss or {}
self.loss_weights = loss_weights
self.sample_weight_mode = sample_weight_mode
self._compile_metrics = metrics or []
self._compile_weighted_metrics = weighted_metrics
if self.run_eagerly and target_tensors is not None:
raise ValueError(
'target_tensors argument is not supported when '
'running a model eagerly.')
# _training_endpoints contains a list of _TrainingEndpoint object, which has
# all the model output/target/loss and related metadata.
self._training_endpoints = []
# Used to freeze the behavior of the Model once `compile` has been called.
self._compiled_trainable_state = self._get_trainable_state()
# Set tf.distribute.Strategy specific parameters.
self._distributed_model_cache = {}
self._distributed_function_cache = {}
# Clear any `_eager_losses` that was added.
self._clear_losses()
if (not context.executing_eagerly() and
self._distribution_strategy is not None):
# Ensures a Session is created and configured correctly for Distribution
# Strategy.
backend.configure_and_create_distributed_session(
self._distribution_strategy)
# Initialize model metric attributes.
self._init_metric_attributes()
if not self.built or not self.inputs or not self.outputs:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the first
# time the model gets called on training data.
return
self._is_compiled = True
# Prepare list of loss functions, same size of model outputs.
self.loss_functions = training_utils_v1.prepare_loss_functions(
self.loss, self.output_names)
target_tensors = self._process_target_tensor_for_compile(target_tensors)
for o, n, l, t in zip(self.outputs, self.output_names,
self.loss_functions, target_tensors):
endpoint = _TrainingEndpoint(o, n, l)
endpoint.create_training_target(t, run_eagerly=self.run_eagerly)
self._training_endpoints.append(endpoint)
# Prepare list loss weights, same size of model outputs.
training_utils_v1.prepare_loss_weights(self._training_endpoints,
loss_weights)
# Initialization for Eager mode execution.
if self.run_eagerly:
self._compile_eagerly(metrics, weighted_metrics, sample_weight_mode)
return
with backend.get_graph().as_default():
# Save all metric attributes per output of the model.
self._cache_output_metric_attributes(metrics, weighted_metrics)
# Set metric attributes on model.
self._set_metric_attributes()
# Invoke metric functions (unweighted) for all the outputs.
self._handle_metrics(
self.outputs,
targets=self._targets,
skip_target_masks=self._prepare_skip_target_masks(),
masks=self._prepare_output_masks())
# Prepare sample weight modes. List with the same length as model outputs.
training_utils_v1.prepare_sample_weight_modes(
self._training_endpoints, sample_weight_mode)
# Creates the model loss and weighted metrics sub-graphs.
self._compile_weights_loss_and_weighted_metrics()
# Functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self.train_function = None
self.test_function = None
self.predict_function = None
# Collected trainable weights, sorted in topological order.
self._collected_trainable_weights = self.trainable_weights
# Validate all variables were correctly created in distribution scope.
if self._distribution_strategy and not self._compile_distribution:
for v in self.variables:
strategy = self._distribution_strategy
if not strategy.extended.variable_created_in_scope(v):
raise ValueError(
'Variable (%s) was not created in the distribution strategy '
'scope of (%s). It is most likely due to not all layers or '
'the model or optimizer being created outside the distribution '
'strategy scope. Try to make sure your code looks similar '
'to the following.\n'
'with strategy.scope():\n'
' model=_create_model()\n'
' model.compile(...)'% (v, strategy))
@trackable.no_automatic_dependency_tracking
def _init_distributed_function_cache_if_not_compiled(self):
if not hasattr(self, '_distributed_function_cache'):
self._distributed_function_cache = {}
@property
def metrics(self):
"""Returns the model's metrics added using `compile`, `add_metric` APIs."""
metrics = []
if self._is_compiled:
if not hasattr(self, '_v1_compile_was_called'):
# See b/155687393 for more details, the model is created as a v2
# instance but converted to v1. Fallback to use base Model to retrieve
# the metrics.
return super(Model, self).metrics
metrics += self._compile_metric_functions
metrics.extend(self._metrics)
metrics.extend(
_get_metrics_from_layers(
list(self._flatten_layers(include_self=False, recursive=False))))
return metrics
@property
def metrics_names(self):
"""Returns the model's display labels for all outputs."""
# This property includes all output names including `loss` and per-output
# losses for backward compatibility.
metrics_names = ['loss']
if self._is_compiled:
if not hasattr(self, '_v1_compile_was_called'):
# See b/155687393 for more details, the model is created as a v2
# instance but converted to v1. Fallback to use base Model to retrieve
# the metrics name
return super(Model, self).metrics_names
# Add output loss metric names to the metric names list.
if len(self._training_endpoints) > 1:
metrics_names.extend([
e.loss_name()
for e in self._training_endpoints
if not e.should_skip_target()
])
# Add all metric names.
metrics_names += [m.name for m in self.metrics]
return metrics_names
@property
def run_eagerly(self):
"""Settable attribute indicating whether the model should run eagerly.
Running eagerly means that your model will be run step by step,
like Python code. Your model might run slower, but it should become easier
for you to debug it by stepping into individual layer calls.
By default, we will attempt to compile your model to a static graph to
deliver the best execution performance.
Returns:
Boolean, whether the model should run eagerly.
"""
if self._run_eagerly is True and not context.executing_eagerly():
raise ValueError('You can only set `run_eagerly=True` if eager execution '
'is enabled.')
if not self.dynamic:
if self._run_eagerly is None:
# Respect `tf.config.run_functions_eagerly` unless
# `run_eagerly` was explicitly passed to `compile`.
return def_function.functions_run_eagerly()
else:
return self._run_eagerly
else:
if not context.executing_eagerly():
raise ValueError('Your model contains layers that can only be '
'successfully run in eager execution (layers '
'constructed with `dynamic=True`). '
'You must enable eager execution with '
'`tf.enable_eager_execution()`.')
if self._run_eagerly is False:
# TODO(fchollet): consider using py_func to enable this.
raise ValueError('Your model contains layers that can only be '
'successfully run in eager execution (layers '
'constructed with `dynamic=True`). '
'You cannot set `run_eagerly=False`.')
return context.executing_eagerly()
@run_eagerly.setter
def run_eagerly(self, value):
self._run_eagerly = value
def _select_training_loop(self, inputs):
"""Select training loop for fit/eval/predict based on the inputs."""
# TODO(kaftan) or TODO(scottzhu): This check should eventually be nicely
# integrated into the data adapters in the v2 loop. We can't do this yet
# because we currently have to fall back for unhandled data types.
if isinstance(inputs, (iterator_ops.Iterator,
iterator_ops.IteratorBase)):
raise ValueError('For performance reasons Keras `fit`, `evaluate` and'
'`predict` accept tf.data `Datasets` as input but not '
'iterators that have been manually generated from '
'Datasets by users. Please directly pass in the '
'original `Dataset` object instead of passing in '
'`iter(dataset)`.')
# Case 1: distribution strategy.
if self._distribution_strategy:
if self._in_multi_worker_mode():
return training_distributed_v1.DistributionMultiWorkerTrainingLoop(
training_distributed_v1.DistributionSingleWorkerTrainingLoop())
else:
return training_distributed_v1.DistributionSingleWorkerTrainingLoop()
# Case 2: generator-like. Input is Python generator, or Sequence object,
# or a non-distributed Dataset or iterator in eager execution.
if data_utils.is_generator_or_sequence(inputs):
return training_generator_v1.GeneratorOrSequenceTrainingLoop()
if training_utils_v1.is_eager_dataset_or_iterator(inputs):
return training_generator_v1.EagerDatasetOrIteratorTrainingLoop()
# Case 3: Symbolic tensors or Numpy array-like.
# This includes Datasets and iterators in graph mode (since they
# generate symbolic tensors).
if self.run_eagerly:
return training_generator_v1.GeneratorLikeTrainingLoop()
else:
return training_arrays_v1.ArrayLikeTrainingLoop()
def fit(self,
x=None,
y=None,
batch_size=None,
epochs=1,
verbose=1,
callbacks=None,
validation_split=0.,
validation_data=None,
shuffle=True,
class_weight=None,
sample_weight=None,
initial_epoch=0,
steps_per_epoch=None,
validation_steps=None,
validation_freq=1,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
**kwargs):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
Args:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset. Should return a tuple
of either `(inputs, targets)` or
`(inputs, targets, sample_weights)`.
- A generator or `keras.utils.Sequence` returning `(inputs, targets)`
or `(inputs, targets, sample weights)`.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset, generator,
or `keras.utils.Sequence` instance, `y` should
not be specified (since targets will be obtained from `x`).
batch_size: Integer or `None`.
Number of samples per gradient update.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of symbolic tensors, datasets,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached.
verbose: 0, 1, or 2. Verbosity mode.
0 = silent, 1 = progress bar, 2 = one line per epoch.
Note that the progress bar is not particularly useful when
logged to a file, so verbose=2 is recommended when not running
interactively (eg, in a production environment).
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during training.
See `tf.keras.callbacks`.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate
the loss and any model metrics
on this data at the end of each epoch.
The validation data is selected from the last samples
in the `x` and `y` data provided, before shuffling. This argument is
not supported when `x` is a dataset, generator or
`keras.utils.Sequence` instance.
validation_data: Data on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
`validation_data` will override `validation_split`.
`validation_data` could be:
- tuple `(x_val, y_val)` of Numpy arrays or tensors
- tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays
- dataset
For the first two cases, `batch_size` must be provided.
For the last case, `validation_steps` could be provided.
shuffle: Boolean (whether to shuffle the training data
before each epoch) or str (for 'batch').
'batch' is a special option for dealing with the
limitations of HDF5 data; it shuffles in batch-sized chunks.
Has no effect when `steps_per_epoch` is not `None`.
class_weight: Optional dictionary mapping class indices (integers)
to a weight (float) value, used for weighting the loss function
(during training only).
This can be useful to tell the model to
"pay more attention" to samples from
an under-represented class.
sample_weight: Optional Numpy array of weights for
the training samples, used for weighting the loss function
(during training only). You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset, generator, or
`keras.utils.Sequence` instance, instead provide the sample_weights
as the third element of `x`.
initial_epoch: Integer.
Epoch at which to start training
(useful for resuming a previous training run).
steps_per_epoch: Integer or `None`.
Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. When training with input tensors such as
TensorFlow data tensors, the default `None` is equal to
the number of samples in your dataset divided by
the batch size, or 1 if that cannot be determined. If x is a
`tf.data` dataset, and 'steps_per_epoch'
is None, the epoch will run until the input dataset is exhausted.
This argument is not supported with array inputs.
validation_steps: Only relevant if `validation_data` is provided and
is a `tf.data` dataset. Total number of steps (batches of
samples) to draw before stopping when performing validation
at the end of every epoch. If 'validation_steps' is None, validation
will run until the `validation_data` dataset is exhausted. In the
case of a infinite dataset, it will run into a infinite loop.
If 'validation_steps' is specified and only part of the dataset
will be consumed, the evaluation will start from the beginning of
the dataset at each epoch. This ensures that the same validation
samples are used every time.
validation_freq: Only relevant if validation data is provided. Integer
or `collections.abc.Container` instance (e.g. list, tuple, etc.).
If an integer, specifies how many training epochs to run before a
new validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up
when using process-based threading. If unspecified, `workers`
will default to 1. If 0, will execute the generator on the main
thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
**kwargs: Used for backwards compatibility.
Returns:
A `History` object. Its `History.history` attribute is
a record of training loss values and metrics values
at successive epochs, as well as validation loss values
and validation metrics values (if applicable).
Raises:
RuntimeError: If the model was never compiled.
ValueError: In case of mismatch between the provided input data
and what the model expects.
"""
self._assert_built_as_v1()
# Legacy support
if 'nb_epoch' in kwargs:
logging.warning(
'The `nb_epoch` argument in `fit` has been renamed `epochs`.')
epochs = kwargs.pop('nb_epoch')
if kwargs:
raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
self._assert_compile_was_called()
self._check_call_args('fit')
func = self._select_training_loop(x)
return func.fit(
self,
x=x,
y=y,
batch_size=batch_size,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_split=validation_split,
validation_data=validation_data,
shuffle=shuffle,
class_weight=class_weight,
sample_weight=sample_weight,
initial_epoch=initial_epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
def evaluate(self,
x=None,
y=None,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Returns the loss value & metrics values for the model in test mode.
Computation is done in batches (see the `batch_size` arg.)
Args:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset.
- A generator or `keras.utils.Sequence` instance.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely).
If `x` is a dataset, generator or
`keras.utils.Sequence` instance, `y` should not be specified (since
targets will be obtained from the iterator/dataset).
batch_size: Integer or `None`.
Number of samples per batch of computation.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of symbolic tensors, dataset,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
verbose: 0 or 1. Verbosity mode.
0 = silent, 1 = progress bar.
sample_weight: Optional Numpy array of weights for
the test samples, used for weighting the loss function.
You can either pass a flat (1D)
Numpy array with the same length as the input samples
(1:1 mapping between weights and samples),
or in the case of temporal data,
you can pass a 2D array with shape
`(samples, sequence_length)`,
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
`sample_weight_mode="temporal"` in `compile()`. This argument is not
supported when `x` is a dataset, instead pass
sample weights as the third element of `x`.
steps: Integer or `None`.
Total number of steps (batches of samples)
before declaring the evaluation round finished.
Ignored with the default value of `None`.
If x is a `tf.data` dataset and `steps` is
None, 'evaluate' will run until the dataset is exhausted.
This argument is not supported with array inputs.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during evaluation.
See [callbacks](/api_docs/python/tf/keras/callbacks).
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: in case of invalid arguments.
"""
self._assert_built_as_v1()
self._assert_compile_was_called()
self._check_call_args('evaluate')
func = self._select_training_loop(x)
return func.evaluate(
self,
x=x,
y=y,
batch_size=batch_size,
verbose=verbose,
sample_weight=sample_weight,
steps=steps,
callbacks=callbacks,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
def predict(self,
x,
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False):
"""Generates output predictions for the input samples.
Computation is done in batches (see the `batch_size` arg.)
Args:
x: Input samples. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset.
- A generator or `keras.utils.Sequence` instance.
batch_size: Integer or `None`.
Number of samples per batch of computation.
If unspecified, `batch_size` will default to 32.
Do not specify the `batch_size` if your data is in the
form of symbolic tensors, dataset,
generators, or `keras.utils.Sequence` instances (since they generate
batches).
verbose: Verbosity mode, 0 or 1.
steps: Total number of steps (batches of samples)
before declaring the prediction round finished.
Ignored with the default value of `None`. If x is a `tf.data`
dataset and `steps` is None, `predict` will
run until the input dataset is exhausted.
callbacks: List of `keras.callbacks.Callback` instances.
List of callbacks to apply during prediction.
See [callbacks](/api_docs/python/tf/keras/callbacks).
max_queue_size: Integer. Used for generator or `keras.utils.Sequence`
input only. Maximum size for the generator queue.
If unspecified, `max_queue_size` will default to 10.
workers: Integer. Used for generator or `keras.utils.Sequence` input
only. Maximum number of processes to spin up when using
process-based threading. If unspecified, `workers` will default
to 1. If 0, will execute the generator on the main thread.
use_multiprocessing: Boolean. Used for generator or
`keras.utils.Sequence` input only. If `True`, use process-based
threading. If unspecified, `use_multiprocessing` will default to
`False`. Note that because this implementation relies on
multiprocessing, you should not pass non-picklable arguments to
the generator as they can't be passed easily to children processes.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between the provided
input data and the model's expectations,
or in case a stateful model receives a number of samples
that is not a multiple of the batch size.
"""
self._assert_built_as_v1()
self._check_call_args('predict')
func = self._select_training_loop(x)
return func.predict(
self,
x=x,
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing)
def reset_metrics(self):
"""Resets the state of metrics."""
metrics = self._get_training_eval_metrics()
for m in metrics:
m.reset_state()
# Reset metrics on all the distributed (cloned) models.
if self._distribution_strategy:
distributed_training_utils_v1._reset_metrics(self) # pylint: disable=protected-access
def train_on_batch(self,
x,
y=None,
sample_weight=None,
class_weight=None,
reset_metrics=True):
"""Runs a single gradient update on a single batch of data.
Args:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset.
y: Target data. Like the input data `x`, it could be either Numpy
array(s) or TensorFlow tensor(s). It should be consistent with `x`
(you cannot have Numpy inputs and tensor targets, or inversely). If
`x` is a dataset, `y` should not be specified
(since targets will be obtained from the iterator).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample. In the case of
temporal data, you can pass a 2D array with shape (samples,
sequence_length), to apply a different weight to every timestep of
every sample. In this case you should make sure to specify
sample_weight_mode="temporal" in compile(). This argument is not
supported when `x` is a dataset.
class_weight: Optional dictionary mapping class indices (integers) to a
weight (float) to apply to the model's loss for the samples from this
class during training. This can be useful to tell the model to "pay
more attention" to samples from an under-represented class.
reset_metrics: If `True`, the metrics returned will be only for this
batch. If `False`, the metrics will be statefully accumulated across
batches.
Returns:
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
self._assert_compile_was_called()
self._check_call_args('train_on_batch')
# If at this point we are in the replica context, then it is okay to execute
# the Eager code path. The expected way to get here is to call `fit` that
# calls `train_on_batch` on each replica.
if (self._distribution_strategy and
distribute_lib.in_cross_replica_context()):
raise NotImplementedError('`train_on_batch` is not supported for models '
'distributed with tf.distribute.Strategy.')
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, class_weight=class_weight,
extract_tensors_from_dataset=True)
# If `self._distribution_strategy` is True, then we are in a replica context
# at this point because of the check above. `train_on_batch` is being run
# for each replica by `self._distribution_strategy` and the same code path
# as Eager is expected to be taken.
if self.run_eagerly or self._distribution_strategy:
output_dict = training_eager_v1.train_on_batch(
self,
x,
y,
sample_weights=sample_weights,
output_loss_metrics=self._output_loss_metrics)
outputs = (output_dict['total_loss'] + output_dict['output_losses']
+ output_dict['metrics'])
outputs = [_non_none_constant_value(v) for v in outputs] # pylint: disable=protected-access
else:
x = training_utils_v1.ModelInputs(x).as_list()
ins = x + list(y or []) + list(sample_weights or [])
if not isinstance(backend.symbolic_learning_phase(), int):
ins += [True] # Add learning phase value.
self._update_sample_weight_modes(sample_weights=sample_weights)
self._make_train_function()
outputs = self.train_function(ins) # pylint: disable=not-callable
if reset_metrics:
self.reset_metrics()
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y=None, sample_weight=None, reset_metrics=True):
"""Test the model on a single batch of samples.
Args:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset `y` should
not be specified (since targets will be obtained from the iterator).
sample_weight: Optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify
sample_weight_mode="temporal" in compile(). This argument is not
supported when `x` is a dataset.
reset_metrics: If `True`, the metrics returned will be only for this
batch. If `False`, the metrics will be statefully accumulated across
batches.
Returns:
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
Raises:
ValueError: In case of invalid user-provided arguments.
"""
self._assert_compile_was_called()
self._check_call_args('test_on_batch')
if (self._distribution_strategy and
distribute_lib.in_cross_replica_context()):
raise NotImplementedError('`test_on_batch` is not supported for models '
'distributed with tf.distribute.Strategy.')
# Validate and standardize user data.
x, y, sample_weights = self._standardize_user_data(
x, y, sample_weight=sample_weight, extract_tensors_from_dataset=True)
# If `self._distribution_strategy` is True, then we are in a replica context
# at this point.
if self.run_eagerly or self._distribution_strategy:
output_dict = training_eager_v1.test_on_batch(
self,
x,
y,
sample_weights=sample_weights,
output_loss_metrics=self._output_loss_metrics)
outputs = (output_dict['total_loss'] + output_dict['output_losses']
+ output_dict['metrics'])
outputs = [_non_none_constant_value(v) for v in outputs] # pylint: disable=protected-access
else:
x = training_utils_v1.ModelInputs(x).as_list()
inputs = x + list(y or []) + list(sample_weights or [])
self._update_sample_weight_modes(sample_weights=sample_weights)
self._make_test_function()
outputs = self.test_function(inputs) # pylint: disable=not-callable
if reset_metrics:
self.reset_metrics()
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
"""Returns predictions for a single batch of samples.
Args:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A `tf.data` dataset.
Returns:
Numpy array(s) of predictions.
Raises:
ValueError: In case of mismatch between given number of inputs and
expectations of the model.
"""
self._check_call_args('predict_on_batch')
if (self._distribution_strategy and
distribute_lib.in_cross_replica_context()):
raise NotImplementedError(
'`predict_on_batch` is not supported for models distributed with'
' tf.distribute.Strategy.')
# Validate and standardize user data.
inputs, _, _ = self._standardize_user_data(
x, extract_tensors_from_dataset=True)
# If `self._distribution_strategy` is True, then we are in a replica context
# at this point.
if self.run_eagerly or self._distribution_strategy:
inputs = training_utils_v1.cast_if_floating_dtype(inputs)
if isinstance(inputs, collections.abc.Sequence):
# Unwrap lists with only one input, as we do when training on batch
if len(inputs) == 1:
inputs = inputs[0]
return self(inputs) # pylint: disable=not-callable
self._make_predict_function()
outputs = self.predict_function(inputs)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
validation_freq=1,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
"""Fits the model on data yielded batch-by-batch by a Python generator.
DEPRECATED:
`Model.fit` now supports generators, so there is no longer any need to use
this endpoint.
"""
warnings.warn('`model.fit_generator` is deprecated and '
'will be removed in a future version. '
'Please use `Model.fit`, which supports generators.')
return self.fit(
generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
validation_freq=validation_freq,
class_weight=class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch)
def evaluate_generator(self,
generator,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Evaluates the model on a data generator.
DEPRECATED:
`Model.evaluate` now supports generators, so there is no longer any need
to use this endpoint.
"""
warnings.warn('`Model.evaluate_generator` is deprecated and '
'will be removed in a future version. '
'Please use `Model.evaluate`, which supports generators.')
self._check_call_args('evaluate_generator')
return self.evaluate(
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
callbacks=callbacks)
def predict_generator(self,
generator,
steps=None,
callbacks=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""Generates predictions for the input samples from a data generator.
DEPRECATED:
`Model.predict` now supports generators, so there is no longer any need
to use this endpoint.
"""
warnings.warn('`Model.predict_generator` is deprecated and '
'will be removed in a future version. '
'Please use `Model.predict`, which supports generators.')
return self.predict(
generator,
steps=steps,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
verbose=verbose,
callbacks=callbacks)
def _check_call_args(self, method_name):
"""Check that `call` has only one positional arg."""
# Always allow first arg, regardless of arg name.
fullargspec = self._call_full_argspec
if fullargspec.defaults:
positional_args = fullargspec.args[:-len(fullargspec.defaults)]
else:
positional_args = fullargspec.args
if 'training' in positional_args:
positional_args.remove('training')
# self and first arg can be positional.
if len(positional_args) > 2:
extra_args = positional_args[2:]
raise ValueError(
'Models passed to `' + method_name + '` can only have `training` '
'and the first argument in `call` as positional arguments, '
'found: ' + str(extra_args) + '.')
def _set_optimizer(self, optimizer):
"""Sets self.optimizer.
Sets self.optimizer to `optimizer`, potentially wrapping it with a
LossScaleOptimizer.
Args:
optimizer: The optimizer(s) to assign to self.optimizer.
"""
if isinstance(optimizer, (list, tuple)):
self.optimizer = [optimizers.get(opt) for opt in optimizer]
else:
self.optimizer = optimizers.get(optimizer)
if isinstance(self._dtype_policy, policy.PolicyV1):
loss_scale = self._dtype_policy.loss_scale
elif self._dtype_policy.name == 'mixed_float16':
loss_scale = 'dynamic'
else:
loss_scale = None
if (loss_scale is not None and
not isinstance(self.optimizer,
loss_scale_optimizer.LossScaleOptimizer)):
if isinstance(self.optimizer, list):
raise ValueError('When a dtype policy with a loss scale is used, you '
'can only pass a single optimizer. Using policy %s '
'and got optimizers: %s' %
self._dtype_policy, self.optimizer)
if not isinstance(self.optimizer, optimizer_v2.OptimizerV2):
raise ValueError('"optimizer" must be an instance of '
'tf.keras.optimizers.Optimizer when a dype policy '
'with a loss scale used, but got: %s. Using policy: '
'%s' %
(self.optimizer, self._dtype_policy))
if loss_scale == 'dynamic':
self.optimizer = loss_scale_optimizer.LossScaleOptimizer(self.optimizer)
else:
self.optimizer = loss_scale_optimizer.LossScaleOptimizerV1(
self.optimizer, loss_scale)
def _prepare_validation_data(self, validation_data, batch_size,
validation_steps):
"""Unpack and check the validation data."""
val_x, val_y, val_sample_weights = training_utils_v1.unpack_validation_data(
validation_data)
return self._standardize_user_data(
val_x,
val_y,
sample_weight=val_sample_weights,
batch_size=batch_size,
steps=validation_steps,
steps_name='validation_steps')
def _validate_compile_param_for_distribution_strategy(
self, run_eagerly, sample_weight_mode, target_tensors, weighted_metrics):
# Validate that arguments passed by the user to `compile` are supported by
# tf.distribute.Strategy.
if self._distribution_strategy:
if sample_weight_mode:
raise NotImplementedError('sample_weight_mode is not supported with '
'tf.distribute.Strategy.')
if weighted_metrics:
raise NotImplementedError('weighted_metrics is not supported with '
'tf.distribute.Strategy.')
if target_tensors:
raise ValueError('target_tensors is not supported with '
'tf.distribute.Strategy.')
if run_eagerly:
raise ValueError(
'We currently do not support enabling `run_eagerly` with '
'distribution strategy.')
if (distributed_training_utils_v1.is_distributing_by_cloning(self) and
(not self.built or not self.inputs or not self.outputs)):
raise ValueError(
'We currently do not support distribution strategy with a '
'`Sequential` model that is created without `input_shape`/'
'`input_dim` set in its first layer or a subclassed model.')
def _process_target_tensor_for_compile(self, target_tensors):
if self.run_eagerly:
# target tensor is not supported with run_eagerly. Create a list with None
# as placeholder for each output.
return [None for _ in self.output_names]
if target_tensors is not None and not (isinstance(target_tensors, list) and
target_tensors == []): # pylint: disable=g-explicit-bool-comparison
if isinstance(target_tensors, list):
if len(target_tensors) != len(self.outputs):
raise ValueError(
'When passing a list as `target_tensors`, '
'it should have one entry per model output. '
'The model has %s outputs, but you passed target_tensors=%s' %
(len(self.outputs), target_tensors))
elif isinstance(target_tensors, dict):
unexpected_target_tensor_names = set(target_tensors.keys()).difference(
self.output_names)
if unexpected_target_tensor_names:
raise ValueError(
'Unknown entry in `target_tensors` dictionary: "{name}". '
'Only expected the following keys: {keys}'.format(
name=unexpected_target_tensor_names,
keys=str(self.output_names)))
tmp_target_tensors = []
for name in self.output_names:
tmp_target_tensors.append(target_tensors.get(name, None))
target_tensors = tmp_target_tensors
elif tensor_util.is_tf_type(target_tensors):
target_tensors = [target_tensors]
else:
raise TypeError('Expected `target_tensors` to be a list or tuple or '
'dict or a single tensor, but got:', target_tensors)
else:
# In case target tensor is empty or None, create a list with Nones
# that has same length as self.output_names. With that, the None check of
# target tensor can be skipped downstream.
target_tensors = [None for _ in self.output_names]
return target_tensors
def _compile_eagerly(self, metrics, weighted_metrics, sample_weight_mode):
# Prepare sample weight modes. List with the same length as model outputs.
training_utils_v1.prepare_sample_weight_modes(
self._training_endpoints, sample_weight_mode)
# Prepare sample weights.
self._prepare_sample_weights()
# Save all metric attributes per output of the model.
self._cache_output_metric_attributes(metrics, weighted_metrics)
self.total_loss = None
# Set metric attributes on model.
self._set_metric_attributes()
self._collected_trainable_weights = self.trainable_weights
def _update_sample_weight_modes(self, sample_weights=None):
"""Updates sample weight modes based on training/eval inputs.
Sample weight placeholders will be created for all or no outputs
based on whether sample_weight is provided for any output.
If model contains `_sample_weight_modes` we check if the input
`sample_weights` corresponds to the sample weight modes.
1. Set sample weight mode to be 'temporal' for output i, if `compile`
sample_weight_mode was set to `temporal` and sample weight inputs
are given for one or more outputs.
2. Set sample weight mode to be 'samplewise' for output i, if `compile`
sample_weight_mode was not set and sample weight inputs are given for
one or more outputs.
3. Reset sample weight mode to None for output i if sample weight mode
was set but there is no sample weight input.
Args:
sample_weights: List of sample weights of the same length as model outputs
or None.
"""
if not self._is_compiled:
return
if sample_weights and any(s is not None for s in sample_weights):
for endpoint in self._training_endpoints:
endpoint.sample_weight_mode = (
endpoint.sample_weight_mode or 'samplewise')
else:
for endpoint in self._training_endpoints:
endpoint.sample_weight_mode = None
def _recompile_weights_loss_and_weighted_metrics(self):
if not self._is_compiled:
return False
recompile = any(
e.sample_weights_mismatch() for e in self._training_endpoints)
if recompile:
self._compile_weights_loss_and_weighted_metrics()
return recompile
@trackable.no_automatic_dependency_tracking
def _compile_weights_loss_and_weighted_metrics(self, sample_weights=None):
"""Compiles the model loss and weighted metric sub-graphs.
This may be used to set graph tensors as sample weights (instead of creating
placeholders).
Args:
sample_weights: List of tensors to use as the sample weights. Must be the
same length as the number of outputs. If left as `None`, placeholders
are used instead.
"""
with backend.get_graph().as_default():
if sample_weights is not None:
self._update_sample_weight_modes(sample_weights)
self._prepare_sample_weights(sample_weights)
masks = self._prepare_output_masks()
# Compute weighted metrics.
self._handle_metrics(
self.outputs,
targets=self._targets,
skip_target_masks=self._prepare_skip_target_masks(),
sample_weights=self.sample_weights,
masks=masks,
return_weighted_metrics=True)
# Compute total loss.
# Used to keep track of the total loss value (stateless).
# eg., total_loss = loss_weight_1 * output_1_loss_fn(...) +
# loss_weight_2 * output_2_loss_fn(...) +
# layer losses.
self.total_loss = self._prepare_total_loss(masks)
def _prepare_skip_target_masks(self):
"""Boolean mask for whether the target in the output list should be skipped.
If the loss function corresponding to a model output is None, then this
output will be skipped during total loss calculation and feed targets
preparation.
Returns:
A boolean list for whether the corresponding target in the output list
should be skipped during loss calculation.
"""
return [l is None for l in self.loss_functions]
def _prepare_output_masks(self):
"""Returns masks corresponding to model outputs."""
return [getattr(x, '_keras_mask', None) for x in self.outputs]
def _prepare_total_loss(self, masks):
"""Computes total loss from loss functions.
Args:
masks: List of mask values corresponding to each model output.
Returns:
A list of loss weights of python floats.
Raises:
TypeError: If model run_eagerly is True.
"""
if self.run_eagerly:
raise TypeError('total loss can not be computed when compiled with '
'run_eagerly = True.')
loss_list = []
with backend.name_scope('loss'):
for endpoint, mask in zip(self._training_endpoints, masks):
if endpoint.should_skip_target():
continue
y_true = endpoint.training_target.target
y_pred = endpoint.output
loss_fn = endpoint.loss_fn
loss_weight = endpoint.loss_weight
loss_name = endpoint.loss_name()
sample_weight = endpoint.sample_weight
with backend.name_scope(loss_name):
if mask is not None:
mask = math_ops.cast(mask, y_pred.dtype)
# Update weights with mask.
if sample_weight is None:
sample_weight = mask
else:
# Update dimensions of weights to match with mask if possible.
mask, _, sample_weight = (
losses_utils.squeeze_or_expand_dimensions(
mask, sample_weight=sample_weight))
sample_weight *= mask
if hasattr(loss_fn, 'reduction'):
per_sample_losses = loss_fn.call(y_true, y_pred)
weighted_losses = losses_utils.compute_weighted_loss(
per_sample_losses,
sample_weight=sample_weight,
reduction=losses_utils.ReductionV2.NONE)
loss_reduction = loss_fn.reduction
# `AUTO` loss reduction defaults to `SUM_OVER_BATCH_SIZE` for all
# compile use cases.
if loss_reduction == losses_utils.ReductionV2.AUTO:
loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
# Compute the stateless loss value.
output_loss = losses_utils.reduce_weighted_loss(
weighted_losses, reduction=loss_reduction)
else:
# Compute the stateless loss value for a custom loss class.
# Here we assume that the class takes care of loss reduction
# because if this class returns a vector value we cannot
# differentiate between use case where a custom optimizer
# expects a vector loss value vs unreduced per-sample loss value.
output_loss = loss_fn(y_true, y_pred, sample_weight=sample_weight)
loss_reduction = losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
if len(self.outputs) > 1:
# Keep track of stateful result tensor for the loss.
endpoint.output_loss_metric(output_loss)
# Scale output loss for distribution. For custom losses we assume
# reduction was mean.
if loss_reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE:
output_loss = losses_utils.scale_loss_for_distribution(output_loss)
loss_list.append(loss_weight * output_loss)
if not loss_list and not self.losses:
raise ValueError('The model cannot be compiled '
'because it has no loss to optimize.')
# Add regularization penalties and other layer-specific losses.
custom_losses = self.get_losses_for(None) + self.get_losses_for(
self.inputs)
if custom_losses:
total_custom_loss = math_ops.add_n(
losses_utils.cast_losses_to_common_dtype(custom_losses))
loss_list.append(
losses_utils.scale_loss_for_distribution(total_custom_loss))
loss_list = losses_utils.cast_losses_to_common_dtype(loss_list)
if loss_list:
total_loss = math_ops.add_n(loss_list)
else:
total_loss = 0.
return total_loss
def _get_callback_model(self):
"""Returns the Callback Model for this Model."""
if hasattr(self, '_replicated_model') and self._replicated_model:
# When using training_distributed, we set the callback model
# to an instance of the `DistributedModel` that we create in
# the `compile` call. The `DistributedModel` is initialized
# with the first replicated model. We need to set the callback
# model to a DistributedModel to allow us to override saving
# and loading weights when we checkpoint the model during training.
return self._replicated_model
if hasattr(self, 'callback_model') and self.callback_model:
return self.callback_model
return self
@trackable.no_automatic_dependency_tracking
def _make_callback_model(self, grouped_model):
first_replicated_model = self._distribution_strategy.unwrap(
grouped_model)[0]
# We initialize the callback model with the first replicated model.
self._replicated_model = DistributedCallbackModel(first_replicated_model)
self._replicated_model.set_original_model(self)
def _validate_or_infer_batch_size(self, batch_size, steps, x):
"""Validates that the `batch_size` provided is consistent with InputLayer.
It's possible that the user specified a static batch size in their
InputLayer. If so, this method checks the provided `batch_size` and `x`
arguments are consistent with this static batch size. Also, if
`batch_size` is `None`, this method will attempt to infer the batch size
from the static batch size of the InputLayer. Lastly, ValueError will be
raised if `x` is a tf.data.Dataset and `batch_size` is specified as we
expect users to provide batched datasets.
Args:
batch_size: The batch_size provided as an argument to
fit/evaluate/predict.
steps: The steps provided as an argument to fit/evaluate/predict.
x: The data passed as `x` to fit/evaluate/predict.
Returns:
The validated batch_size, auto-inferred from the first layer if not
provided.
"""
if (isinstance(x, (data_types.DatasetV1,
data_types.DatasetV2,
data_utils.Sequence)) or
tf_inspect.isgenerator(x)):
if batch_size is not None:
raise ValueError(
'The `batch_size` argument must not be specified for the given '
'input type. Received input: {}, batch_size: {}'.format(
x, batch_size))
return
# Avoids the override in Sequential.layers which filters Input layers.
# (Which are often the very layers that we're after.)
layers = self._flatten_layers(include_self=False, recursive=False)
first_layer = next(layers, None)
if first_layer:
# The per-replica static batch size.
static_batch_size = training_utils.get_static_batch_size(first_layer)
if static_batch_size is not None:
# Determine number of times the user-supplied batch size will be split.
if (self._distribution_strategy and
distributed_training_utils.global_batch_size_supported(
self._distribution_strategy)):
num_splits_for_ds = self._distribution_strategy.num_replicas_in_sync
else:
num_splits_for_ds = 1
# Check `batch_size` argument is consistent with InputLayer.
if batch_size is not None:
if batch_size % num_splits_for_ds != 0:
raise ValueError('The `batch_size` argument ({}) must be divisible '
'the by number of replicas ({})'.format(
batch_size, num_splits_for_ds))
per_replica_batch_size = batch_size // num_splits_for_ds
if per_replica_batch_size != static_batch_size:
raise ValueError('The `batch_size` argument value {} is '
'incompatible with the specified batch size of '
'your Input Layer: {}'.format(
per_replica_batch_size, static_batch_size))
# Check Dataset/Iterator batch size is consistent with InputLayer.
if isinstance(x, (data_types.DatasetV2, iterator_ops.Iterator,
iterator_ops.IteratorBase)):
ds_batch_size = tensor_shape.Dimension(
nest.flatten(dataset_ops.get_legacy_output_shapes(x))[0][0]).value
if ds_batch_size is not None:
if ds_batch_size % num_splits_for_ds != 0:
raise ValueError(
'The batch output shape of your `Dataset` {} '
'cannot be divisible by number of replicas {}'.format(
ds_batch_size, num_splits_for_ds))
ds_per_replica_batch_size = ds_batch_size // num_splits_for_ds
if ds_per_replica_batch_size != static_batch_size:
raise ValueError('The batch output shape of your `Dataset` is '
'{}, which is incompatible with the specified '
'batch size of your Input Layer: {}'.format(
ds_per_replica_batch_size,
static_batch_size))
# Set inferred batch size from the InputLayer.
if steps is None:
batch_size = static_batch_size * num_splits_for_ds
if batch_size is None and steps is None:
# Backwards compatibility
batch_size = 32
return batch_size
def _prepare_sample_weights(self, sample_weights=None):
"""Sets sample weight attribute on the model."""
# List with the same length as model outputs.
if sample_weights is not None:
if len(sample_weights) != len(self._training_endpoints):
raise ValueError('Provided sample weights must have same length as the '
'number of outputs. Expected: {}, got: {}.'.format(
len(self._training_endpoints),
len(sample_weights)))
else:
sample_weights = [None] * len(self._training_endpoints)
for endpoint, weight in zip(self._training_endpoints, sample_weights):
endpoint.populate_sample_weight(weight, endpoint.sample_weight_mode)
def _cache_output_metric_attributes(self, metrics, weighted_metrics):
"""Caches metric name and function attributes for every model output."""
output_shapes = []
for output in self.outputs:
if output is None or output.shape.rank is None:
output_shapes.append(None)
else:
output_shapes.append(output.shape.as_list())
self._per_output_metrics = training_utils_v1.collect_per_output_metric_info(
metrics, self.output_names, output_shapes, self.loss_functions,
from_serialized=self._from_serialized)
self._per_output_weighted_metrics = (
training_utils_v1.collect_per_output_metric_info(
weighted_metrics,
self.output_names,
output_shapes,
self.loss_functions,
from_serialized=self._from_serialized,
is_weighted=True))
def _add_unique_metric_name(self, metric_name, metric_fn, output_index):
"""Makes the metric name unique.
If there are multiple outputs for which the metrics are calculated, the
metric names have to be made unique by appending an integer.
Args:
metric_name: Metric name that corresponds to the metric specified by the
user. For example: 'acc'.
metric_fn: The Metric object.
output_index: The index of the model output for which the metric name is
being added.
Returns:
string, name of the model's unique metric name
"""
# For multi-output models, prepend the output names to the metric name.
if len(self.output_names) > 1:
# If we're loading from an already-serialized model, we've already
# prepended the output name, and we don't want to do it again.
#
# Alternatively, we may be receiving a stateless metric (e.g. the string
# "accuracy") rather than a `Metric` object, in which case we want to
# prepend the output name even if we are loading a serialized model.
if not getattr(metric_fn, '_from_serialized', False):
metric_name = '%s_%s' % (self.output_names[output_index], metric_name)
j = 1
base_metric_name = metric_name
while metric_name in self.metrics_names:
metric_name = '%s_%d' % (base_metric_name, j)
j += 1
return metric_name
def _init_metric_attributes(self):
"""Initialized model metric attributes."""
# List of stateful metric functions. Used for resetting metric state during
# training/eval.
self._compile_metric_functions = []
def _set_per_output_metric_attributes(self, metrics_dict, output_index):
"""Sets the metric attributes on the model for the given output.
Args:
metrics_dict: A dict with metric names as keys and metric fns as values.
output_index: The index of the model output for which the metric
attributes are added.
Returns:
Metrics dict updated with unique metric names as keys.
"""
updated_metrics_dict = collections.OrderedDict()
for metric_name, metric_fn in metrics_dict.items():
metric_name = self._add_unique_metric_name(
metric_name, metric_fn, output_index)
# Update the name on the metric class to be the unique generated name.
metric_fn._name = metric_name # pylint: disable=protected-access
updated_metrics_dict[metric_name] = metric_fn
# Keep track of metric name and function.
self._compile_metric_functions.append(metric_fn)
return updated_metrics_dict
def _set_metric_attributes(self):
"""Sets the metric attributes on the model for all the model outputs."""
updated_per_output_metrics = []
updated_per_output_weighted_metrics = []
for i, endpoint in enumerate(self._training_endpoints):
if endpoint.should_skip_target():
updated_per_output_metrics.append(self._per_output_metrics[i])
updated_per_output_weighted_metrics.append(
self._per_output_weighted_metrics[i])
continue
updated_per_output_metrics.append(
self._set_per_output_metric_attributes(self._per_output_metrics[i],
i))
updated_per_output_weighted_metrics.append(
self._set_per_output_metric_attributes(
self._per_output_weighted_metrics[i], i))
# Create a metric wrapper for each output loss. This computes mean of an
# output loss across mini-batches (irrespective of how we reduce within a
# batch).
if len(self._training_endpoints) > 1:
for endpoint in self._training_endpoints:
if not endpoint.should_skip_target():
endpoint.output_loss_metric = metrics_module.Mean(
name=endpoint.loss_name())
self._per_output_metrics = updated_per_output_metrics
self._per_output_weighted_metrics = updated_per_output_weighted_metrics
def _handle_per_output_metrics(self,
metrics_dict,
y_true,
y_pred,
mask,
weights=None):
"""Calls metric functions for a single output.
Args:
metrics_dict: A dict with metric names as keys and metric fns as values.
y_true: Target output.
y_pred: Predicted output.
mask: Computed mask value for the current output.
weights: Weights to be applied on the current output.
Returns:
A list of metric result tensors.
"""
metric_results = []
for metric_name, metric_fn in metrics_dict.items():
with backend.name_scope(metric_name):
metric_result = training_utils_v1.call_metric_function(
metric_fn, y_true, y_pred, weights=weights, mask=mask)
metric_results.append(metric_result)
return metric_results
def _handle_metrics(self,
outputs,
targets=None,
skip_target_masks=None,
sample_weights=None,
masks=None,
return_weighted_metrics=False,
return_weighted_and_unweighted_metrics=False):
"""Handles calling metric functions.
Args:
outputs: List of outputs (predictions).
targets: List of targets.
skip_target_masks: Optional. List of boolean for whether the corresponding
target should be ignored or not.
sample_weights: Optional list of sample weight arrays.
masks: List of computed output mask values.
return_weighted_metrics: Flag that indicates whether weighted metrics
should be computed instead of unweighted metrics. This flag is ignored
when `return_weighted_and_unweighted_metrics` is enabled.
return_weighted_and_unweighted_metrics: Flag that is used to indicate
whether both weighted and unweighted metrics should be computed. When
this is not enabled, we use `return_weighted_metrics` param to indicate
whether weighted or unweighted metrics should be returned.
Returns:
A list of metric result tensors.
"""
# TODO(scottzhu): Update this to use the new training_endpoints. Currently
# the eager and graph logic is bit different.
skip_target_masks = skip_target_masks or [False] * len(outputs)
metric_results = []
with backend.name_scope('metrics'):
# Invoke all metrics added using `compile`.
for i in range(len(outputs)):
if skip_target_masks[i]:
continue
output = outputs[i] if outputs else None
target = targets[i] if targets else None
output_mask = masks[i] if masks else None
if (return_weighted_and_unweighted_metrics or
not return_weighted_metrics):
metric_results.extend(
self._handle_per_output_metrics(self._per_output_metrics[i],
target, output, output_mask))
if return_weighted_and_unweighted_metrics or return_weighted_metrics:
metric_results.extend(
self._handle_per_output_metrics(
self._per_output_weighted_metrics[i],
target,
output,
output_mask,
weights=sample_weights[i] if sample_weights else None))
return metric_results
def _check_trainable_weights_consistency(self):
"""Check trainable weights count consistency.
This will raise a warning if `trainable_weights` and
`_collected_trainable_weights` are inconsistent (i.e. have different
number of parameters).
Inconsistency will typically arise when one modifies `model.trainable`
without calling `model.compile` again.
"""
if not hasattr(self, '_collected_trainable_weights'):
return
if len(self.trainable_weights) != len(self._collected_trainable_weights):
logging.log_first_n(
logging.WARN, 'Discrepancy between trainable weights and collected'
' trainable weights, did you set `model.trainable`'
' without calling `model.compile` after ?', 1)
def _make_train_function(self):
has_recompiled = self._recompile_weights_loss_and_weighted_metrics()
self._check_trainable_weights_consistency()
if isinstance(self.optimizer, list):
raise ValueError('The `optimizer` in `compile` should be a single '
'optimizer.')
# If we have re-compiled the loss/weighted metric sub-graphs then create
# train function even if one exists already. This is because
# `_feed_sample_weights` list has been updated on re-compile.
if getattr(self, 'train_function', None) is None or has_recompiled:
# Restore the compiled trainable state.
current_trainable_state = self._get_trainable_state()
self._set_trainable_state(self._compiled_trainable_state)
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
if not isinstance(backend.symbolic_learning_phase(), int):
inputs += [backend.symbolic_learning_phase()]
with backend.get_graph().as_default():
with backend.name_scope('training'):
# Training updates
updates = self.optimizer.get_updates(
params=self._collected_trainable_weights, loss=self.total_loss)
# Unconditional updates
updates += self.get_updates_for(None)
# Conditional updates relevant to this model
updates += self.get_updates_for(self.inputs)
metrics = self._get_training_eval_metrics()
metrics_tensors = [
m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access
]
with backend.name_scope('training'):
# Gets loss and metrics. Updates weights at each call.
fn = backend.function(
inputs, [self.total_loss] + metrics_tensors,
updates=updates,
name='train_function',
**self._function_kwargs)
setattr(self, 'train_function', fn)
# Restore the current trainable state
self._set_trainable_state(current_trainable_state)
def _make_test_function(self):
has_recompiled = self._recompile_weights_loss_and_weighted_metrics()
# If we have re-compiled the loss/weighted metric sub-graphs then create
# test function even if one exists already. This is because
# `_feed_sample_weights` list has been updated on re-compile.
if getattr(self, 'test_function', None) is None or has_recompiled:
inputs = (self._feed_inputs +
self._feed_targets +
self._feed_sample_weights)
with backend.get_graph().as_default():
metrics = self._get_training_eval_metrics()
metrics_tensors = [
m._call_result for m in metrics if hasattr(m, '_call_result') # pylint: disable=protected-access
]
with backend.name_scope('evaluation'):
updates = self.state_updates
# Return loss and metrics, no gradient updates.
# Does update the network states.
fn = backend.function(
inputs, [self.total_loss] + metrics_tensors,
updates=updates,
name='test_function',
**self._function_kwargs)
setattr(self, 'test_function', fn)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
inputs = self._feed_inputs
# Gets network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
with backend.name_scope(ModeKeys.PREDICT):
self.predict_function = backend.function(
inputs,
self.outputs,
updates=self.state_updates,
name='predict_function',
**kwargs)
def _make_execution_function(self, mode):
if mode == ModeKeys.TRAIN:
self._make_train_function()
return self.train_function
if mode == ModeKeys.TEST:
self._make_test_function()
return self.test_function
if mode == ModeKeys.PREDICT:
self._make_predict_function()
return self.predict_function
def _distribution_standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
validation_split=0,
shuffle=False,
epochs=1,
allow_partial_batch=False):
"""Runs validation checks on input and target data passed by the user.
This is called when using tf.distribute.Strategy to train, evaluate or serve
the model.
Args:
x: Input data. A numpy array or `tf.data` dataset.
y: Target data. A numpy array or None if x is a `tf.data` dataset.
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
shuffle: Boolean whether to shuffle the training data before each epoch.
epochs: Integer epochs. If > 1, repeat the numpy training data epochs
times when converting to training dataset.
allow_partial_batch: Boolean whether to enforce that all batches have the
same size.
Returns:
Dataset instance.
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
if class_weight:
raise NotImplementedError('`class_weight` is currently not supported '
'when using tf.distribute.Strategy.')
if (sample_weight is not None and sample_weight.all() and
backend.is_tpu_strategy(self._distribution_strategy)):
raise NotImplementedError('`sample_weight` is currently not supported '
'when using TPUStrategy.')
# Validates `steps` and `shuffle` arguments right at the beginning
# since we use it to construct the dataset object.
# TODO(anjalisridhar): Remove this check once we refactor the
# _standardize_user_data code path. This check is already present elsewhere
# in the codebase.
if isinstance(x, data_types.DatasetV2):
if shuffle:
training_utils_v1.verify_dataset_shuffled(x)
strategy = self._distribution_strategy
with strategy.scope():
# We should be sure to call get_session() inside the strategy.scope()
# so the strategy can affect the session options.
if ops.executing_eagerly_outside_functions():
session = None
else:
session = backend.get_session()
first_x_value = nest.flatten(x)[0]
if isinstance(first_x_value, np.ndarray):
x = training_utils.list_to_tuple(x)
if y is not None:
y = training_utils.list_to_tuple(y)
if sample_weight is not None:
sample_weight = training_utils.list_to_tuple(sample_weight)
in_tuple = (x, y, sample_weight)
else:
in_tuple = (x, y)
else:
in_tuple = x
ds = strategy.extended.experimental_make_numpy_dataset(in_tuple,
session=session)
if shuffle:
# We want a buffer size that is larger than the batch size provided by
# the user and provides sufficient randomness. Note that larger
# numbers introduce more memory usage based on the size of each
# sample.
ds = ds.shuffle(max(1024, batch_size * 8))
if epochs > 1:
ds = ds.repeat(epochs)
# We need to use the drop_remainder argument to get a known static
# input shape which is required for TPUs.
drop_remainder = (not allow_partial_batch and
strategy.extended.experimental_require_static_shapes)
# TODO(b/131720208): We still drop remainder here if number of examples
# is divisible by batch size, as sometimes dynamic padder will time out
# with keras.metrics.CategoricalAccuracy() metric.
if backend.is_tpu_strategy(strategy) and not drop_remainder:
dataset_size = first_x_value.shape[0]
if dataset_size % batch_size == 0:
drop_remainder = True
x = ds.batch(batch_size, drop_remainder=drop_remainder)
else:
assert isinstance(x, data_types.DatasetV2)
training_utils_v1.validate_dataset_input(x, y, sample_weight,
validation_split)
return x
def _standardize_user_data(self,
x,
y=None,
sample_weight=None,
class_weight=None,
batch_size=None,
check_steps=False,
steps_name='steps',
steps=None,
validation_split=0,
shuffle=False,
extract_tensors_from_dataset=False):
"""Runs validation checks on input and target data passed by the user.
Also standardizes the data to lists of arrays, in order.
Also builds and compiles the model on the fly if it is a subclassed model
that has never been called before (and thus has no inputs/outputs).
This is a purely internal method, subject to refactoring at any time.
Args:
x: Input data. It could be:
- A Numpy array (or array-like), or a list of arrays
(in case the model has multiple inputs).
- A TensorFlow tensor, or a list of tensors
(in case the model has multiple inputs).
- A dict mapping input names to the corresponding array/tensors,
if the model has named inputs.
- A `tf.data` dataset.
y: Target data. Like the input data `x`,
it could be either Numpy array(s) or TensorFlow tensor(s).
It should be consistent with `x` (you cannot have Numpy inputs and
tensor targets, or inversely). If `x` is a dataset, `y` should not be
specified (since targets will be obtained from the iterator).
sample_weight: An optional sample-weight array passed by the user to
weight the importance of each sample in `x`.
class_weight: An optional class-weight array by the user to
weight the importance of samples in `x` based on the class they belong
to, as conveyed by `y`. If both `sample_weight` and `class_weight` are
provided, the weights are multiplied.
batch_size: Integer batch size. If provided, it is used to run additional
validation checks on stateful models.
check_steps: boolean, True if we want to check for validity of `steps` and
False, otherwise. For example, when we are standardizing one batch of
data for train_on_batch/predict_on_batch/test_on_batch APIs, `steps`
value is not required and we should not check for its validity in these
cases.
steps_name: The public API's parameter name for `steps`.
steps: Integer or `None`. Total number of steps (batches of samples) to
execute.
validation_split: Float between 0 and 1.
Fraction of the training data to be used as validation data.
shuffle: Boolean whether to shuffle the training data before each epoch.
extract_tensors_from_dataset: Boolean. When `x` is a dataset instance,
this indicates whether to extract actual tensors from the dataset or
instead output the dataset instance itself.
Set to True when calling from `train_on_batch`/etc.
Returns:
A tuple of 3: inputs (arrays or dicts, depending on whether `x` was a dict
or not), target arrays, sample-weight arrays.
If the model's input and targets are symbolic, these lists are empty
(since the model takes no user-provided data, instead the data comes
from the symbolic inputs/targets).
Raises:
ValueError: In case of invalid user-provided data.
RuntimeError: If the model was never compiled.
"""
if isinstance(x, (data_types.DatasetV1, data_types.DatasetV2)):
# Graph mode dataset. We'll pass the dataset as-is (unless
# `extract_tensors_from_dataset` is True, in which case we extract
# the tensors from the dataset and we output them.
training_utils_v1.validate_dataset_input(x, y, sample_weight,
validation_split)
if shuffle:
training_utils_v1.verify_dataset_shuffled(x)
is_dataset = True
if extract_tensors_from_dataset:
# We do this for `train_on_batch`/etc.
x, y, sample_weight = training_utils_v1.extract_tensors_from_dataset(x)
elif isinstance(x, iterator_ops.Iterator):
# Graph mode iterator. We extract the symbolic tensors.
training_utils_v1.validate_dataset_input(x, y, sample_weight,
validation_split)
iterator = x
x, y, sample_weight = training_utils_v1.unpack_iterator_input(iterator)
is_dataset = True
else:
is_dataset = False
# Validates `steps` argument based on x's type.
if check_steps:
training_utils_v1.check_steps_argument(x, steps, steps_name)
# First, we build the model on the fly if necessary.
if not self.inputs:
all_inputs, y_input, dict_inputs = self._build_model_with_inputs(x, y)
is_build_called = True
else:
all_inputs = []
# Whether this is a subclassed model that expects dictionary inputs
# rather than list inputs (e.g. FeatureColumn-based models).
dict_inputs = isinstance(self.inputs, dict)
is_build_called = False
y_input = y
# Second, we compile the model on the fly if necessary, mostly for subclass
# models.
is_compile_called = False
if not self._is_compiled and self.optimizer:
self._compile_from_inputs(all_inputs, y_input, x, y)
is_compile_called = True
# In graph mode, if we had just set inputs and targets as symbolic tensors
# by invoking build and compile on the model respectively, we do not have to
# feed anything to the model. Model already has input and target data as
# part of the graph.
# Note: in this case, `any` and `all` are equivalent since we disallow
# mixed symbolic/value inputs.
# self.run_eagerly is not free to compute, so we want to reuse the value.
run_eagerly = self.run_eagerly
if (not run_eagerly and is_build_called and is_compile_called and
not is_dataset and any(_is_symbolic_tensor(v) for v in all_inputs)):
return [], [], None
return self._standardize_tensors(
x, y, sample_weight,
run_eagerly=run_eagerly,
dict_inputs=dict_inputs,
is_dataset=is_dataset,
class_weight=class_weight,
batch_size=batch_size)
def _standardize_tensors(self, x, y, sample_weight, run_eagerly, dict_inputs,
is_dataset, class_weight=None, batch_size=None):
if run_eagerly:
# In eager mode, do not do shape validation
# since the network has no input nodes (placeholders) to be fed.
feed_input_names = self.input_names
feed_input_shapes = None
elif not self._is_graph_network:
# Case: symbolic-mode subclassed network. Do not do shape validation.
feed_input_names = self._feed_input_names
feed_input_shapes = None
else:
# Case: symbolic-mode graph network.
# In this case, we run extensive shape validation checks.
feed_input_names = self._feed_input_names
feed_input_shapes = self._feed_input_shapes
# Standardize the inputs.
if not isinstance(x, (data_types.DatasetV1, data_types.DatasetV2)):
# TODO(fchollet): run static checks with dataset output shape(s).
x = training_utils_v1.standardize_input_data(
x,
feed_input_names,
feed_input_shapes,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='input')
# Get typespecs for the input data and sanitize it if necessary.
# TODO(momernick): This should be capable of doing full input validation
# at all times - validate that this is so and refactor the standardization
# code.
if isinstance(x, data_types.DatasetV2):
x_shapes = dataset_ops.get_structure(x)
if isinstance(x_shapes, tuple):
# If the output of a Dataset is a tuple, we assume it's either of the
# form (x_data, y_data) or (x_data, y_data, sample_weights). In either
# case, we only care about x_data here.
x_shapes = x_shapes[0]
else:
flat_inputs = nest.flatten(x, expand_composites=False)
flat_expected_inputs = nest.flatten(self.inputs, expand_composites=False)
converted_x = []
for (a, b) in zip(flat_inputs, flat_expected_inputs):
converted_x.append(_convert_scipy_sparse_tensor(a, b))
x = nest.pack_sequence_as(x, converted_x, expand_composites=False)
def _type_spec_from_value(value):
"""Grab type_spec without converting array-likes to tensors."""
if tf_utils.is_extension_type(value):
return value._type_spec # pylint: disable=protected-access
# Get a TensorSpec for array-like data without
# converting the data to a Tensor
if hasattr(value, 'shape') and hasattr(value, 'dtype'):
return tensor_spec.TensorSpec(value.shape, value.dtype)
else:
return type_spec.type_spec_from_value(value)
x_shapes = nest.map_structure(_type_spec_from_value, x)
flat_inputs = nest.flatten(x_shapes, expand_composites=False)
flat_expected_inputs = nest.flatten(self.inputs, expand_composites=False)
for (a, b) in zip(flat_inputs, flat_expected_inputs):
nest.assert_same_structure(a, b, expand_composites=True)
if y is not None:
# Prepare self._sample_weight_modes. List with the same length as
# model outputs.
training_utils_v1.prepare_sample_weight_modes(self._training_endpoints,
self.sample_weight_mode)
feed_output_names = self._feed_output_names
feed_sample_weight_modes = self._sample_weight_modes
if not self._is_graph_network:
feed_output_shapes = None
else:
feed_output_shapes = self._feed_output_shapes
# Standardize the outputs.
y = training_utils_v1.standardize_input_data(
y,
feed_output_names,
# Don't enforce target shapes to match output shapes.
# Precise checks will be run in `check_loss_and_target_compatibility`.
shapes=None,
check_batch_axis=False, # Don't enforce the batch size.
exception_prefix='target')
# Generate sample-wise weight values given the `sample_weight` and
# `class_weight` arguments.
sample_weights = training_utils_v1.standardize_sample_weights(
sample_weight, feed_output_names)
class_weights = training_utils_v1.standardize_class_weights(
class_weight, feed_output_names)
sample_weights = [
training_utils_v1.standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
feed_sample_weight_modes)
]
# Check that all arrays have the same length.
if not self._distribution_strategy:
training_utils_v1.check_array_lengths(x, y, sample_weights)
if self._is_graph_network and not run_eagerly:
# Additional checks to avoid users mistakenly using improper loss fns.
training_utils_v1.check_loss_and_target_compatibility(
y, self._feed_loss_fns, feed_output_shapes)
sample_weights, _, _ = training_utils.handle_partial_sample_weights(
y, sample_weights, feed_sample_weight_modes, check_all_flat=True)
else:
y = []
sample_weights = None
if self.stateful and batch_size and not is_dataset:
# Check that for stateful networks, number of samples is a multiple
# of the static batch size.
if x[0].shape[0] % batch_size != 0:
raise ValueError('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
# If dictionary inputs were provided, we return a dictionary as well.
if dict_inputs and not isinstance(x, (data_types.DatasetV1,
data_types.DatasetV2)):
x = dict(zip(feed_input_names, x))
return x, y, sample_weights
def _build_model_with_inputs(self, inputs, targets):
"""Build the model (set model inputs/outputs), mainly for subclass model."""
processed_inputs = []
is_dict_inputs = False
orig_inputs = inputs
# We need to use `inputs` to set the model inputs.
# If input data is a dataset iterator in graph mode or if it is an eager
# iterator and only one batch of samples is required, we fetch the data
# tensors from the iterator and then standardize them.
if isinstance(inputs, (data_types.DatasetV1, data_types.DatasetV2)):
inputs, targets, _ = training_utils_v1.extract_tensors_from_dataset(
inputs)
# We type-check that `inputs` and `targets` are either single arrays
# or lists of arrays, and extract a flat list of inputs from the passed
# structure.
training_utils_v1.validate_input_types(inputs, orig_inputs)
if isinstance(inputs, (list, tuple)):
processed_inputs += list(inputs)
elif isinstance(inputs, dict):
is_dict_inputs = True
keys = sorted(inputs.keys())
processed_inputs = [inputs[k] for k in keys]
else:
processed_inputs.append(inputs)
# Now that we have a flat set of inputs, we make sure that none of them
# are CompositeTensors or CompositeTensorValues of any type (or scipy
# sparse arrays, which we treat as SparseTensor values). We cannot safely
# infer input data from an arbitrary composite tensor, so we don't try -
# users should explicitly add composite tensor inputs to their subclassed
# models.
for input_tensor in processed_inputs:
if training_utils_v1.is_composite_or_composite_value(input_tensor):
# TODO(b/132691975): Document subclass-model CT input handling.
raise ValueError(
'All SparseTensor and RaggedTensor inputs must be explicitly '
'declared using a keras.Input() with sparse=True or ragged=True. '
'We found an undeclared input %s. For Sequential models, please '
'add a keras.Input() as your first Layer. For subclassed models, '
'please call self._set_inputs() on your input set, which you can '
'create using keras.Input() for each input to your model.' %
(input_tensor,))
# Build the model using the retrieved inputs (value or symbolic).
# If values are generated from a dataset, then in symbolic-mode
# placeholders will be created to match the value shapes.
if isinstance(orig_inputs, (data_types.DatasetV1, data_types.DatasetV2,
iterator_ops.Iterator)):
if not self.inputs:
# For subclassed models, a robust input spec is not available so we
# must cast to the model dtype.
inputs = training_utils_v1.cast_if_floating_dtype(inputs, self.dtype)
def create_tensor_spec(t):
return tensor_spec.TensorSpec(t.shape, t.dtype)
cast_inputs = nest.map_structure(create_tensor_spec, inputs)
elif training_utils_v1.has_tensors(inputs):
cast_inputs = training_utils_v1.cast_if_floating_dtype(inputs)
else:
cast_inputs = inputs
self._set_inputs(cast_inputs)
return processed_inputs, targets, is_dict_inputs
def _compile_from_inputs(self, all_inputs, target, orig_inputs, orig_target):
if target is not None:
# We need to use `y` to set the model targets.
if training_utils_v1.has_tensors(target):
target = training_utils_v1.cast_if_floating_dtype_and_mismatch(
target, self.outputs)
training_utils_v1.validate_input_types(
target, orig_target, allow_dict=False, field_name='target')
if isinstance(target, (list, tuple)):
all_inputs += list(target)
else:
all_inputs.append(target)
# Type check that all inputs are *either* value *or* symbolic.
# TODO(fchollet): this check could be removed in Eager mode?
if any(tensor_util.is_tf_type(v) for v in all_inputs):
if not all(tensor_util.is_tf_type(v) for v in all_inputs):
raise ValueError('Do not pass inputs that mix Numpy arrays and '
'TensorFlow tensors. '
'You passed: x=' + str(orig_inputs) +
'; y=' + str(orig_target))
is_dataset = isinstance(orig_inputs, (data_types.DatasetV1,
data_types.DatasetV2,
iterator_ops.Iterator))
if is_dataset or context.executing_eagerly():
target_tensors = None
else:
# Handle target tensors if any passed.
if target is not None:
if not isinstance(target, (list, tuple)):
target = [target]
target_tensors = [v for v in target if _is_symbolic_tensor(v)]
else:
target_tensors = None
self.compile(
optimizer=self.optimizer,
loss=self.loss,
metrics=self._compile_metrics,
weighted_metrics=self._compile_weighted_metrics,
loss_weights=self.loss_weights,
target_tensors=target_tensors,
sample_weight_mode=self.sample_weight_mode,
run_eagerly=self.run_eagerly,
experimental_run_tf_function=self._experimental_run_tf_function)
# TODO(omalleyt): Consider changing to a more descriptive function name.
def _set_inputs(self, inputs, outputs=None, training=None):
"""Set model's input and output specs based on the input data received.
This is to be used for Model subclasses, which do not know at instantiation
time what their inputs look like.
Args:
inputs: Single array, or list of arrays. The arrays could be placeholders,
Numpy arrays, data tensors, or TensorSpecs.
- if placeholders: the model is built on top of these placeholders,
and we expect Numpy data to be fed for them when calling `fit`/etc.
- if Numpy data or TensorShapes: we create placeholders matching the
TensorShapes or shapes of the Numpy arrays. We expect Numpy data to be
fed for these placeholders when calling `fit`/etc.
- if data tensors: the model is built on top of these tensors.
We do not expect any Numpy data to be provided when calling `fit`/etc.
outputs: None, a data tensor, or a list of tensors. If None, the
outputs will be determined by invoking `self.call()`, otherwise the
provided value will be used.
training: Boolean or None. Only relevant in symbolic mode. Specifies
whether to build the model's graph in inference mode (False), training
mode (True), or using the Keras learning phase (None).
Raises:
ValueError: If dict inputs are passed to a Sequential Model where the
first layer isn't FeatureLayer.
"""
self._set_save_spec(inputs)
inputs = self._set_input_attrs(inputs)
if outputs is None:
kwargs = {}
if self._expects_training_arg:
# In V2 mode, feeding `training=None` is not allowed because any value
# explicitly passed by the user is respected, even `None`.`
if training is None and not ops.executing_eagerly_outside_functions():
training = backend.learning_phase()
if training is not None:
kwargs['training'] = training
try:
outputs = self(inputs, **kwargs)
except NotImplementedError:
# This Model or a submodel is dynamic and hasn't overridden
# `compute_output_shape`.
outputs = None
self._set_output_attrs(outputs)
@trackable.no_automatic_dependency_tracking
def _set_input_attrs(self, inputs):
"""Sets attributes related to the inputs of the Model."""
if self.inputs:
raise ValueError('Model inputs are already set.')
if self.__class__.__name__ == 'Sequential' and not self.built:
if tensor_util.is_tf_type(inputs):
input_shape = (None,) + tuple(inputs.shape.as_list()[1:])
elif isinstance(inputs, tensor_shape.TensorShape):
input_shape = (None,) + tuple(inputs.as_list()[1:])
elif isinstance(inputs, dict):
# We assert that the first layer is a FeatureLayer.
if not training_utils_v1.is_feature_layer(self.layers[0]):
raise ValueError('Passing a dictionary input to a Sequential Model '
'which doesn\'t have FeatureLayer as the first layer'
' is an error.')
input_shape = (None,)
else:
input_shape = (None,) + tuple(inputs.shape[1:])
self._build_input_shape = input_shape
# Cast inputs to the compute dtype. This is primarily used
# when saving to determine the correct dtype in the input signature.
inputs = self._maybe_cast_inputs(inputs)
# On-the-fly setting of symbolic model inputs (either by using the tensor
# provided, or by creating a placeholder if Numpy data was provided).
model_inputs = training_utils_v1.ModelInputs(inputs)
inputs = model_inputs.get_symbolic_inputs()
self.inputs = model_inputs.get_symbolic_inputs(return_single_as_list=True)
self.input_names = model_inputs.get_input_names()
self._feed_inputs = []
self._feed_input_names = []
self._feed_input_shapes = []
for k, v in model_inputs.as_dict():
if backend.is_placeholder(v):
self._feed_input_names.append(k)
self._feed_inputs.append(v)
self._feed_input_shapes.append(backend.int_shape(v))
return inputs
@trackable.no_automatic_dependency_tracking
def _set_output_attrs(self, outputs):
"""Sets attributes related to the outputs of the Model."""
# NOTE(taylorrobie): This convention cannot be changed without updating the
# data adapter since it assumes nest.flatten ordering.
outputs = nest.flatten(outputs)
self.outputs = outputs
self.output_names = training_utils_v1.generic_output_names(outputs)
# TODO(scottzhu): Should we cleanup the self._training_endpoints here?
self.built = True
@property
def _targets(self):
"""The output target tensors for the model."""
return [
e.training_target.target
for e in self._training_endpoints
if e.has_training_target()
]
@property
def _feed_targets(self):
return [
e.training_target.target
for e in self._training_endpoints
if e.has_feedable_training_target()
]
@property
def _feed_output_names(self):
return [
e.output_name
for e in self._training_endpoints
if e.has_feedable_training_target()
]
@property
def _feed_output_shapes(self):
return [
e.feed_output_shape
for e in self._training_endpoints
if e.has_feedable_training_target()
]
@property
def _feed_loss_fns(self):
return [
e.loss_fn
for e in self._training_endpoints
if e.has_feedable_training_target()
]
@property
def _loss_weights_list(self):
return [e.loss_weight for e in self._training_endpoints]
@property
def _output_loss_metrics(self):
if hasattr(self, '_training_endpoints'):
return [
e.output_loss_metric
for e in self._training_endpoints
if e.output_loss_metric is not None
]
return None
@property
def sample_weights(self):
return [e.sample_weight for e in self._training_endpoints]
@property
def _sample_weight_modes(self):
return [e.sample_weight_mode for e in self._training_endpoints]
@property
def _feed_sample_weights(self):
return [e.sample_weight for e in self._training_endpoints
if e.sample_weight is not None]
def _maybe_load_initial_epoch_from_ckpt(self, initial_epoch, mode):
"""Maybe load initial epoch from ckpt considering possible worker recovery.
Refer to tensorflow/python/keras/distribute/worker_training_state.py
for more information.
Args:
initial_epoch: The original initial_epoch user passes in in `fit()`.
mode: The mode for running `model.fit()`.
Returns:
If the training is recovering from previous failure under multi-worker
training setting, return the epoch the training is supposed to continue
at. Otherwise, return the `initial_epoch` the user passes in.
"""
if self._training_state is not None:
return self._training_state.maybe_load_initial_epoch_from_ckpt(
initial_epoch, mode)
return initial_epoch
def _get_training_eval_metrics(self):
"""Returns all the metrics that are to be reported.
This includes the output loss metrics, compile metrics/weighted metrics,
add_metric metrics.
"""
metrics = []
metrics.extend(getattr(self, '_output_loss_metrics', None) or [])
metrics.extend(getattr(self, 'metrics', None) or [])
return metrics
def _assert_compile_was_called(self):
# Checks whether `compile` has been called. If it has been called,
# then the optimizer is set. This is different from whether the
# model is compiled
# (i.e. whether the model is built and its inputs/outputs are set).
if not self._compile_was_called:
raise RuntimeError('You must compile your model before '
'training/testing. '
'Use `model.compile(optimizer, loss)`.')
def _in_multi_worker_mode(self):
"""Method to infer if this `Model` is working in multi-worker settings.
Multi-worker training refers to the setup where the training is
distributed across multiple workers, as opposed to the case where
only a local process performs the training. This function is
used to infer for example whether or not a distribute coordinator
should be run, and thus TensorFlow servers should be started for
communication with other servers in the cluster, or whether or not
saving/restoring checkpoints is relevant for preemption fault tolerance.
Experimental. Signature and implementation are subject to change.
Returns:
Whether this model indicates it's working in multi-worker settings.
"""
strategy = self._distribution_strategy
# Otherwise, use the strategy whose scope this is in.
if not strategy and distribute_lib.has_strategy():
strategy = distribute_lib.get_strategy()
return strategy and strategy.extended._in_multi_worker_mode() # pylint: disable=protected-access
@property
def _trackable_saved_model_saver(self):
return model_serialization.ModelSavedModelSaver(self)
def _get_compile_args(self, user_metrics=True):
del user_metrics
self._assert_compile_was_called()
kwargs = {
'loss': self.loss,
'metrics': self._compile_metrics,
'loss_weights': self.loss_weights,
'sample_weight_mode': self.sample_weight_mode,
'weighted_metrics': self._compile_weighted_metrics,
}
return kwargs
@property
def _compile_was_called(self):
return self._v1_compile_was_called
| Model |
python | RaRe-Technologies__gensim | gensim/models/hdpmodel.py | {
"start": 4149,
"end": 5037
} | class ____:
"""Stores sufficient statistics for the current chunk of document(s) whenever Hdp model is updated with new corpus.
These stats are used when updating lambda and top level sticks. The statistics include number of documents in the
chunk, length of words in the documents and top level truncation level.
"""
def __init__(self, T, Wt, Dt):
"""
Parameters
----------
T : int
Top level truncation level.
Wt : int
Length of words in the documents.
Dt : int
Chunk size.
"""
self.m_chunksize = Dt
self.m_var_sticks_ss = np.zeros(T)
self.m_var_beta_ss = np.zeros((T, Wt))
def set_zero(self):
"""Fill the sticks and beta array with 0 scalar value."""
self.m_var_sticks_ss.fill(0.0)
self.m_var_beta_ss.fill(0.0)
| SuffStats |
python | ray-project__ray | python/ray/tests/test_client_reconnect.py | {
"start": 3630,
"end": 7642
} | class ____(ray_client_pb2_grpc.RayletDriverServicer):
"""
Forwards all requests to the raylet driver servicer. Useful for injecting
errors between a client and server pair.
"""
def __init__(
self, on_request: Optional[Hook] = None, on_response: Optional[Hook] = None
):
"""
Args:
on_request: Optional hook to inject errors before forwarding a
request
on_response: Optional hook to inject errors before sending back a
response
"""
self.stub = None
self.on_request = on_request
self.on_response = on_response
def set_channel(self, channel: grpc.Channel) -> None:
self.stub = ray_client_pb2_grpc.RayletDriverStub(channel)
def _call_inner_function(
self, request: Any, context, method: str
) -> Optional[ray_client_pb2_grpc.RayletDriverStub]:
if self.on_request:
self.on_request(request)
try:
response = getattr(self.stub, method)(
request, metadata=context.invocation_metadata()
)
except grpc.RpcError as e:
context.set_code(e.code())
context.set_details(e.details())
raise
if self.on_response and method != "GetObject":
# GetObject streams response, handle on_response separately
self.on_response(response)
return response
def Init(self, request, context=None) -> ray_client_pb2.InitResponse:
return self._call_inner_function(request, context, "Init")
def KVPut(self, request, context=None) -> ray_client_pb2.KVPutResponse:
return self._call_inner_function(request, context, "KVPut")
def KVGet(self, request, context=None) -> ray_client_pb2.KVGetResponse:
return self._call_inner_function(request, context, "KVGet")
def KVDel(self, request, context=None) -> ray_client_pb2.KVDelResponse:
return self._call_inner_function(request, context, "KVDel")
def KVList(self, request, context=None) -> ray_client_pb2.KVListResponse:
return self._call_inner_function(request, context, "KVList")
def KVExists(self, request, context=None) -> ray_client_pb2.KVExistsResponse:
return self._call_inner_function(request, context, "KVExists")
def ListNamedActors(
self, request, context=None
) -> ray_client_pb2.ClientListNamedActorsResponse:
return self._call_inner_function(request, context, "ListNamedActors")
def ClusterInfo(self, request, context=None) -> ray_client_pb2.ClusterInfoResponse:
# Cluster info is currently used for health checks and isn't retried, so
# don't inject errors.
# TODO(ckw): update ClusterInfo so that retries are only skipped for PING
try:
return self.stub.ClusterInfo(
request, metadata=context.invocation_metadata()
)
except grpc.RpcError as e:
context.set_code(e.code())
context.set_details(e.details())
raise
def Terminate(self, req, context=None):
return self._call_inner_function(req, context, "Terminate")
def GetObject(self, request, context=None):
for response in self._call_inner_function(request, context, "GetObject"):
if self.on_response:
self.on_response(response)
yield response
def PutObject(
self, request: ray_client_pb2.PutRequest, context=None
) -> ray_client_pb2.PutResponse:
return self._call_inner_function(request, context, "PutObject")
def WaitObject(
self, request: ray_client_pb2.WaitRequest, context=None
) -> ray_client_pb2.WaitResponse:
return self._call_inner_function(request, context, "WaitObject")
def Schedule(
self, task: ray_client_pb2.ClientTask, context=None
) -> ray_client_pb2.ClientTaskTicket:
return self._call_inner_function(task, context, "Schedule")
| MiddlemanRayletServicer |
python | encode__django-rest-framework | tests/test_bound_fields.py | {
"start": 3717,
"end": 8368
} | class ____:
def test_nested_empty_bound_field(self):
class Nested(serializers.Serializer):
more_text = serializers.CharField(max_length=100)
amount = serializers.IntegerField()
class ExampleSerializer(serializers.Serializer):
text = serializers.CharField(max_length=100)
nested = Nested()
serializer = ExampleSerializer()
assert serializer['text'].value == ''
assert serializer['text'].errors is None
assert serializer['text'].name == 'text'
assert serializer['nested']['more_text'].value == ''
assert serializer['nested']['more_text'].errors is None
assert serializer['nested']['more_text'].name == 'nested.more_text'
assert serializer['nested']['amount'].value is None
assert serializer['nested']['amount'].errors is None
assert serializer['nested']['amount'].name == 'nested.amount'
def test_as_form_fields(self):
class Nested(serializers.Serializer):
bool_field = serializers.BooleanField()
null_field = serializers.IntegerField(allow_null=True)
json_field = serializers.JSONField()
custom_json_field = CustomJSONField()
class ExampleSerializer(serializers.Serializer):
nested = Nested()
serializer = ExampleSerializer(
data={'nested': {
'bool_field': False, 'null_field': None,
'json_field': {'bool_item': True, 'number': 1, 'text_item': 'text'},
'custom_json_field': {'bool_item': True, 'number': 1, 'text_item': 'text'},
}})
assert serializer.is_valid()
assert serializer['nested']['bool_field'].as_form_field().value == ''
assert serializer['nested']['null_field'].as_form_field().value == ''
assert serializer['nested']['json_field'].as_form_field().value == '''{
"bool_item": true,
"number": 1,
"text_item": "text"
}'''
assert serializer['nested']['custom_json_field'].as_form_field().value == '''{
"bool_item": true,
"number": 1,
"text_item": "text"
}'''
def test_rendering_nested_fields_with_none_value(self):
from rest_framework.renderers import HTMLFormRenderer
class Nested1(serializers.Serializer):
text_field = serializers.CharField()
class Nested2(serializers.Serializer):
nested1 = Nested1(allow_null=True)
text_field = serializers.CharField()
class ExampleSerializer(serializers.Serializer):
nested2 = Nested2()
serializer = ExampleSerializer(data={'nested2': {'nested1': None, 'text_field': 'test'}})
assert serializer.is_valid()
renderer = HTMLFormRenderer()
for field in serializer:
rendered = renderer.render_field(field, {})
expected_packed = (
'<fieldset>'
'<legend>Nested2</legend>'
'<fieldset>'
'<legend>Nested1</legend>'
'<divclass="form-group">'
'<label>Textfield</label>'
'<inputname="nested2.nested1.text_field"class="form-control"type="text"value="">'
'</div>'
'</fieldset>'
'<divclass="form-group">'
'<label>Textfield</label>'
'<inputname="nested2.text_field"class="form-control"type="text"value="test">'
'</div>'
'</fieldset>'
)
rendered_packed = ''.join(rendered.split())
assert rendered_packed == expected_packed
def test_rendering_nested_fields_with_not_mappable_value(self):
from rest_framework.renderers import HTMLFormRenderer
class Nested(serializers.Serializer):
text_field = serializers.CharField()
class ExampleSerializer(serializers.Serializer):
nested = Nested()
serializer = ExampleSerializer(data={'nested': 1})
assert not serializer.is_valid()
renderer = HTMLFormRenderer()
for field in serializer:
rendered = renderer.render_field(field, {})
expected_packed = (
'<fieldset>'
'<legend>Nested</legend>'
'<divclass="form-group">'
'<label>Textfield</label>'
'<inputname="nested.text_field"class="form-control"type="text"value="">'
'</div>'
'</fieldset>'
)
rendered_packed = ''.join(rendered.split())
assert rendered_packed == expected_packed
| TestNestedBoundField |
python | kubernetes-client__python | kubernetes/client/models/v1_daemon_set_status.py | {
"start": 383,
"end": 15485
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'collision_count': 'int',
'conditions': 'list[V1DaemonSetCondition]',
'current_number_scheduled': 'int',
'desired_number_scheduled': 'int',
'number_available': 'int',
'number_misscheduled': 'int',
'number_ready': 'int',
'number_unavailable': 'int',
'observed_generation': 'int',
'updated_number_scheduled': 'int'
}
attribute_map = {
'collision_count': 'collisionCount',
'conditions': 'conditions',
'current_number_scheduled': 'currentNumberScheduled',
'desired_number_scheduled': 'desiredNumberScheduled',
'number_available': 'numberAvailable',
'number_misscheduled': 'numberMisscheduled',
'number_ready': 'numberReady',
'number_unavailable': 'numberUnavailable',
'observed_generation': 'observedGeneration',
'updated_number_scheduled': 'updatedNumberScheduled'
}
def __init__(self, collision_count=None, conditions=None, current_number_scheduled=None, desired_number_scheduled=None, number_available=None, number_misscheduled=None, number_ready=None, number_unavailable=None, observed_generation=None, updated_number_scheduled=None, local_vars_configuration=None): # noqa: E501
"""V1DaemonSetStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._collision_count = None
self._conditions = None
self._current_number_scheduled = None
self._desired_number_scheduled = None
self._number_available = None
self._number_misscheduled = None
self._number_ready = None
self._number_unavailable = None
self._observed_generation = None
self._updated_number_scheduled = None
self.discriminator = None
if collision_count is not None:
self.collision_count = collision_count
if conditions is not None:
self.conditions = conditions
self.current_number_scheduled = current_number_scheduled
self.desired_number_scheduled = desired_number_scheduled
if number_available is not None:
self.number_available = number_available
self.number_misscheduled = number_misscheduled
self.number_ready = number_ready
if number_unavailable is not None:
self.number_unavailable = number_unavailable
if observed_generation is not None:
self.observed_generation = observed_generation
if updated_number_scheduled is not None:
self.updated_number_scheduled = updated_number_scheduled
@property
def collision_count(self):
"""Gets the collision_count of this V1DaemonSetStatus. # noqa: E501
Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
:return: The collision_count of this V1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._collision_count
@collision_count.setter
def collision_count(self, collision_count):
"""Sets the collision_count of this V1DaemonSetStatus.
Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision. # noqa: E501
:param collision_count: The collision_count of this V1DaemonSetStatus. # noqa: E501
:type: int
"""
self._collision_count = collision_count
@property
def conditions(self):
"""Gets the conditions of this V1DaemonSetStatus. # noqa: E501
Represents the latest available observations of a DaemonSet's current state. # noqa: E501
:return: The conditions of this V1DaemonSetStatus. # noqa: E501
:rtype: list[V1DaemonSetCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1DaemonSetStatus.
Represents the latest available observations of a DaemonSet's current state. # noqa: E501
:param conditions: The conditions of this V1DaemonSetStatus. # noqa: E501
:type: list[V1DaemonSetCondition]
"""
self._conditions = conditions
@property
def current_number_scheduled(self):
"""Gets the current_number_scheduled of this V1DaemonSetStatus. # noqa: E501
The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
:return: The current_number_scheduled of this V1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._current_number_scheduled
@current_number_scheduled.setter
def current_number_scheduled(self, current_number_scheduled):
"""Sets the current_number_scheduled of this V1DaemonSetStatus.
The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
:param current_number_scheduled: The current_number_scheduled of this V1DaemonSetStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and current_number_scheduled is None: # noqa: E501
raise ValueError("Invalid value for `current_number_scheduled`, must not be `None`") # noqa: E501
self._current_number_scheduled = current_number_scheduled
@property
def desired_number_scheduled(self):
"""Gets the desired_number_scheduled of this V1DaemonSetStatus. # noqa: E501
The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
:return: The desired_number_scheduled of this V1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._desired_number_scheduled
@desired_number_scheduled.setter
def desired_number_scheduled(self, desired_number_scheduled):
"""Sets the desired_number_scheduled of this V1DaemonSetStatus.
The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
:param desired_number_scheduled: The desired_number_scheduled of this V1DaemonSetStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and desired_number_scheduled is None: # noqa: E501
raise ValueError("Invalid value for `desired_number_scheduled`, must not be `None`") # noqa: E501
self._desired_number_scheduled = desired_number_scheduled
@property
def number_available(self):
"""Gets the number_available of this V1DaemonSetStatus. # noqa: E501
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds) # noqa: E501
:return: The number_available of this V1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._number_available
@number_available.setter
def number_available(self, number_available):
"""Sets the number_available of this V1DaemonSetStatus.
The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds) # noqa: E501
:param number_available: The number_available of this V1DaemonSetStatus. # noqa: E501
:type: int
"""
self._number_available = number_available
@property
def number_misscheduled(self):
"""Gets the number_misscheduled of this V1DaemonSetStatus. # noqa: E501
The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
:return: The number_misscheduled of this V1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._number_misscheduled
@number_misscheduled.setter
def number_misscheduled(self, number_misscheduled):
"""Sets the number_misscheduled of this V1DaemonSetStatus.
The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ # noqa: E501
:param number_misscheduled: The number_misscheduled of this V1DaemonSetStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and number_misscheduled is None: # noqa: E501
raise ValueError("Invalid value for `number_misscheduled`, must not be `None`") # noqa: E501
self._number_misscheduled = number_misscheduled
@property
def number_ready(self):
"""Gets the number_ready of this V1DaemonSetStatus. # noqa: E501
numberReady is the number of nodes that should be running the daemon pod and have one or more of the daemon pod running with a Ready Condition. # noqa: E501
:return: The number_ready of this V1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._number_ready
@number_ready.setter
def number_ready(self, number_ready):
"""Sets the number_ready of this V1DaemonSetStatus.
numberReady is the number of nodes that should be running the daemon pod and have one or more of the daemon pod running with a Ready Condition. # noqa: E501
:param number_ready: The number_ready of this V1DaemonSetStatus. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and number_ready is None: # noqa: E501
raise ValueError("Invalid value for `number_ready`, must not be `None`") # noqa: E501
self._number_ready = number_ready
@property
def number_unavailable(self):
"""Gets the number_unavailable of this V1DaemonSetStatus. # noqa: E501
The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds) # noqa: E501
:return: The number_unavailable of this V1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._number_unavailable
@number_unavailable.setter
def number_unavailable(self, number_unavailable):
"""Sets the number_unavailable of this V1DaemonSetStatus.
The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds) # noqa: E501
:param number_unavailable: The number_unavailable of this V1DaemonSetStatus. # noqa: E501
:type: int
"""
self._number_unavailable = number_unavailable
@property
def observed_generation(self):
"""Gets the observed_generation of this V1DaemonSetStatus. # noqa: E501
The most recent generation observed by the daemon set controller. # noqa: E501
:return: The observed_generation of this V1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this V1DaemonSetStatus.
The most recent generation observed by the daemon set controller. # noqa: E501
:param observed_generation: The observed_generation of this V1DaemonSetStatus. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
@property
def updated_number_scheduled(self):
"""Gets the updated_number_scheduled of this V1DaemonSetStatus. # noqa: E501
The total number of nodes that are running updated daemon pod # noqa: E501
:return: The updated_number_scheduled of this V1DaemonSetStatus. # noqa: E501
:rtype: int
"""
return self._updated_number_scheduled
@updated_number_scheduled.setter
def updated_number_scheduled(self, updated_number_scheduled):
"""Sets the updated_number_scheduled of this V1DaemonSetStatus.
The total number of nodes that are running updated daemon pod # noqa: E501
:param updated_number_scheduled: The updated_number_scheduled of this V1DaemonSetStatus. # noqa: E501
:type: int
"""
self._updated_number_scheduled = updated_number_scheduled
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DaemonSetStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DaemonSetStatus):
return True
return self.to_dict() != other.to_dict()
| V1DaemonSetStatus |
python | numba__numba | numba/core/typing/setdecl.py | {
"start": 2669,
"end": 3209
} | class ____(AbstractTemplate):
def generic(self, args, kws):
if len(args) != 2:
return
a, b = args
if isinstance(a, types.Set) and isinstance(b, types.Set) and a == b:
return signature(types.boolean, *args)
for op_key in (operator.add, operator.invert):
@infer_global(op_key)
class ConcreteSetOperator(SetOperator):
key = op_key
for op_key in (operator.iadd,):
@infer_global(op_key)
class ConcreteInplaceSetOperator(SetOperator):
key = op_key
| SetComparison |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 309599,
"end": 310107
} | class ____(sgqlc.types.Input):
"""Ways in which team discussion comment connections can be ordered."""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(TeamDiscussionCommentOrderField), graphql_name="field")
"""The field by which to order nodes."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The direction in which to order nodes."""
| TeamDiscussionCommentOrder |
python | spyder-ide__spyder | spyder/plugins/statusbar/plugin.py | {
"start": 794,
"end": 8390
} | class ____(SpyderPluginV2):
"""Status bar plugin."""
NAME = 'statusbar'
REQUIRES = [Plugins.Preferences]
CONTAINER_CLASS = StatusBarContainer
CONF_SECTION = NAME
CONF_FILE = False
CONF_WIDGET_CLASS = StatusBarConfigPage
STATUS_WIDGETS = {}
EXTERNAL_RIGHT_WIDGETS = {}
EXTERNAL_LEFT_WIDGETS = {}
INTERNAL_WIDGETS = {}
INTERNAL_WIDGETS_IDS = {
"clock_status",
"cpu_status",
"memory_status",
"read_write_status",
"eol_status",
"encoding_status",
"cursor_position_status",
"vcs_status",
"lsp_status",
"pythonenv_status",
"matplotlib_status",
"update_manager_status",
"inapp_appeal_status",
}
# ---- SpyderPluginV2 API
@staticmethod
def get_name():
return _('Status bar')
@classmethod
def get_icon(cls):
return cls.create_icon('statusbar')
@staticmethod
def get_description():
return _("Display the main window status bar.")
def on_initialize(self):
# --- Status widgets
self.add_status_widget(self.mem_status, StatusBarWidgetPosition.Right)
self.add_status_widget(self.cpu_status, StatusBarWidgetPosition.Right)
self.add_status_widget(
self.clock_status, StatusBarWidgetPosition.Right)
def on_close(self, _unused):
self._statusbar.setVisible(False)
@on_plugin_available(plugin=Plugins.Preferences)
def on_preferences_available(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.register_plugin_preferences(self)
@on_plugin_teardown(plugin=Plugins.Preferences)
def on_preferences_teardown(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.deregister_plugin_preferences(self)
def after_container_creation(self):
container = self.get_container()
container.sig_show_status_bar_requested.connect(
self.show_status_bar
)
# ---- Public API
def add_status_widget(self, widget, position=StatusBarWidgetPosition.Left):
"""
Add status widget to main application status bar.
Parameters
----------
widget: StatusBarWidget
Widget to be added to the status bar.
position: int
Position where the widget will be added given the members of the
StatusBarWidgetPosition enum.
"""
# Check widget class
if not isinstance(widget, StatusBarWidget):
raise SpyderAPIError(
'Any status widget must subclass StatusBarWidget!'
)
# Check ID
id_ = widget.ID
if id_ is None:
raise SpyderAPIError(
f"Status widget `{repr(widget)}` doesn't have an identifier!"
)
# Check it was not added before
if id_ in self.STATUS_WIDGETS and not running_under_pytest():
raise SpyderAPIError(f'Status widget `{id_}` already added!')
if id_ in self.INTERNAL_WIDGETS_IDS:
self.INTERNAL_WIDGETS[id_] = widget
elif position == StatusBarWidgetPosition.Right:
self.EXTERNAL_RIGHT_WIDGETS[id_] = widget
else:
self.EXTERNAL_LEFT_WIDGETS[id_] = widget
self.STATUS_WIDGETS[id_] = widget
self._statusbar.setStyleSheet('QStatusBar::item {border: None;}')
if position == StatusBarWidgetPosition.Right:
self._statusbar.addPermanentWidget(widget)
else:
self._statusbar.insertPermanentWidget(
StatusBarWidgetPosition.Left, widget)
self._statusbar.layout().setContentsMargins(0, 0, 0, 0)
self._statusbar.layout().setSpacing(0)
def remove_status_widget(self, id_):
"""
Remove widget from status bar.
Parameters
----------
id_: str
String identifier for the widget.
"""
try:
widget = self.get_status_widget(id_)
self.STATUS_WIDGETS.pop(id_)
self._statusbar.removeWidget(widget)
except RuntimeError:
# This can happen if the widget was already removed (tests fail
# without this).
pass
def get_status_widget(self, id_):
"""
Return an application status widget by name.
Parameters
----------
id_: str
String identifier for the widget.
"""
if id_ in self.STATUS_WIDGETS:
return self.STATUS_WIDGETS[id_]
else:
raise SpyderAPIError(f'Status widget "{id_}" not found!')
def get_status_widgets(self):
"""Return all status widgets."""
return list(self.STATUS_WIDGETS.keys())
def remove_status_widgets(self):
"""Remove all status widgets."""
for w in self.get_status_widgets():
self.remove_status_widget(w)
@Slot(bool)
def show_status_bar(self, value):
"""
Show/hide status bar.
Parameters
----------
value: bool
Decide whether to show or hide the status bar.
"""
self._statusbar.setVisible(value)
# ---- Default status widgets
@property
def mem_status(self):
return self.get_container().mem_status
@property
def cpu_status(self):
return self.get_container().cpu_status
@property
def clock_status(self):
return self.get_container().clock_status
# ---- Private API
@property
def _statusbar(self):
"""Reference to main window status bar."""
return self._main.statusBar()
def _organize_status_widgets(self):
"""
Organize the status bar widgets once the application is loaded.
"""
# Desired organization
internal_layout = [
"clock_status",
"cpu_status",
"memory_status",
"read_write_status",
"eol_status",
"encoding_status",
"cursor_position_status",
"vcs_status",
"lsp_status",
"pythonenv_status",
"matplotlib_status",
"update_manager_status",
"inapp_appeal_status",
]
external_left = list(self.EXTERNAL_LEFT_WIDGETS.keys())
# Remove all widgets from the statusbar, except the external right
for id_ in self.INTERNAL_WIDGETS:
self._statusbar.removeWidget(self.INTERNAL_WIDGETS[id_])
for id_ in self.EXTERNAL_LEFT_WIDGETS:
self._statusbar.removeWidget(self.EXTERNAL_LEFT_WIDGETS[id_])
# Add the internal widgets in the desired layout
for id_ in internal_layout:
# This is needed in the case kite is installed but not enabled
if id_ in self.INTERNAL_WIDGETS:
self._statusbar.insertPermanentWidget(
StatusBarWidgetPosition.Left, self.INTERNAL_WIDGETS[id_]
)
self.INTERNAL_WIDGETS[id_].setVisible(True)
# Add the external left widgets
for id_ in external_left:
self._statusbar.insertPermanentWidget(
StatusBarWidgetPosition.Left, self.EXTERNAL_LEFT_WIDGETS[id_]
)
self.EXTERNAL_LEFT_WIDGETS[id_].setVisible(True)
def before_mainwindow_visible(self):
"""Perform actions before the mainwindow is visible"""
# Organize widgets in the expected order
self._statusbar.setVisible(False)
self._organize_status_widgets()
| StatusBar |
python | dask__dask | dask/tests/test_task_spec.py | {
"start": 5878,
"end": 11933
} | class ____:
deserialized = False
serialized = False
def __getstate__(self):
if SerializeOnlyOnce.serialized:
raise RuntimeError()
SerializeOnlyOnce.serialized = True
return {}
def __setstate__(self, state):
if SerializeOnlyOnce.deserialized:
raise RuntimeError()
SerializeOnlyOnce.deserialized = True
def __call__(self, a, b):
return a + b
def test_pickle():
def assert_slots_equal(a, b):
def get_all_slots(obj):
slots = set()
for cls in obj.__class__.mro():
slots.update(getattr(cls, "__slots__", ()))
return slots
all_slots = get_all_slots(a) | get_all_slots(a)
assert all_slots == get_all_slots(a) == get_all_slots(a)
assert all(getattr(a, slot) == getattr(b, slot) for slot in all_slots)
assert not hasattr(a, "__dict__")
assert not hasattr(b, "__dict__")
t1 = Task("key-1", func, "a", "b")
t2 = Task("key-2", func, "c", "d")
rtt1 = pickle.loads(pickle.dumps(t1))
assert repr(rtt1) == repr(t1)
rtt2 = pickle.loads(pickle.dumps(t2))
assert_slots_equal(t1, rtt1)
assert_slots_equal(t2, rtt2)
assert t1 == rtt1
assert t1.func == rtt1.func
assert t1.func is rtt1.func
assert t1.func is rtt2.func
l = Tuple(t1, t2)
rtl = pickle.loads(pickle.dumps(l))
assert l == rtl
assert l() == rtl()
d = Dict(key=t1)
rtd = pickle.loads(pickle.dumps(d))
assert d == rtd
assert d() == rtd()
def test_pickle_size():
# We will serialize many of these objects which drives both memory usage and
# serialization runtime performance.
# Reducing pickle size is beneficial but the numbers below are determined
# empirically
# Analyzing the output with pickletools.dis is useful to debug memoization
# and serialization by value
a = Alias("a", "b")
# We cannot shrink it to nothing
assert len(pickle.dumps(a)) < 55
b = Alias("b", "c")
# But most of it should be overhead that is memoized
assert len(pickle.dumps((a, b))) <= 70
# Pickle should be able to memoize this. On py3.10 that's 2 additional bytes
assert len(pickle.dumps((a, b, b))) <= len(pickle.dumps((a, b))) + 10
t1 = Task("key-1", func, "a", "b")
assert len(pickle.dumps(t1)) < 120
t2 = Task("key-2", func, TaskRef("key-1"), "c")
assert len(pickle.dumps(t2)) < 140
assert len(pickle.dumps((t1, t2))) < 170
l = List(t1, t2)
assert len(pickle.dumps(l)) <= 272
sizes = []
growth = []
inner = List(t1, t2)
for depth in range(20):
inner = List(inner, t1)
size = len(pickle.dumps(inner))
if len(sizes) > 0:
growth.append(size - sizes[-1][1])
sizes.append((depth, size))
growth = set(growth)
# If this breaks, something cannot be memoized. That's very concerning
assert len(growth) == 1
# If this goes up, that's not great but not a disaster
assert growth.pop() <= 32
def test_tokenize():
t = Task("key-1", func, "a", "b")
assert tokenize(t) == tokenize(t)
t2 = Task("key-1", func, "a", "b")
assert tokenize(t) == tokenize(t2)
tokenize(t)
# Literals are often generated with random/anom names but that should not
# impact hashing. Otherwise identical submits would end up with different
# tokens
l = DataNode("key-1", "a")
l2 = DataNode("key-2", "a")
assert tokenize(l) == tokenize(l2)
async def afunc(a, b):
return a + b
def test_async_func():
pytest.importorskip("distributed")
from distributed.utils_test import gen_test
@gen_test()
async def _():
t = Task("key-1", afunc, "a", "b")
assert t.is_coro
assert await t() == "ab"
assert await pickle.loads(pickle.dumps(t))() == "ab"
_()
def test_parse_curry():
def curry(func, *args, **kwargs):
return func(*args, **kwargs) + "c"
dsk = {
"key-1": (curry, func, "a", "b"),
}
converted = convert_and_verify_keys(dsk)
t = Task("key-1", curry, func, "a", "b")
assert converted["key-1"]() == t()
assert t() == "a-bc"
def test_curry():
def curry(func, *args, **kwargs):
return func(*args, **kwargs) + "c"
t = Task("key-1", curry, func, "a", "b")
assert t() == "a-bc"
def test_avoid_cycles():
pytest.importorskip("distributed")
from dask._task_spec import TaskRef
dsk = {
"key": TaskRef("key"), # e.g. a persisted key
}
new_dsk = convert_and_verify_keys(dsk)
assert not new_dsk
def test_runnable_as_kwarg():
def func_kwarg(a, b, c=""):
return a + b + str(c)
t = Task(
"key-1",
func_kwarg,
"a",
"b",
c=Task("key-2", sum, [1, 2]),
)
assert t() == "ab3"
def test_dependency_as_kwarg():
def func_kwarg(a, b, c=""):
return a + b + str(c)
t1 = Task("key-1", sum, [1, 2])
t2 = Task(
"key-2",
func_kwarg,
"a",
"b",
c=t1.ref(),
)
with pytest.raises(RuntimeError, match="missing"):
t2()
# It isn't sufficient to raise. We also rely on the attribute being set
# properly since distribute will use this to infer actual dependencies The
# exception may be raised recursively
assert t2.dependencies
assert t2({"key-1": t1()}) == "ab3"
def test_array_as_argument():
np = pytest.importorskip("numpy")
t = Task("key-1", func, np.array([1, 2]), "b")
assert t() == "[1 2]-b"
# This will **not** work since we do not want to recurse into an array!
t2 = Task("key-2", func, np.array([1, t.ref()]), "b")
assert t2({"key-1": "foo"}) != "[1 foo]-b"
assert not _get_dependencies(np.array([1, t.ref()]))
@pytest.mark.parametrize(
"inst",
[
Task("key-1", func, "a", "b"),
Alias("key-1"),
DataNode("key-1", 1),
],
)
def test_ensure_slots(inst):
assert not hasattr(inst, "__dict__")
| SerializeOnlyOnce |
python | huggingface__transformers | src/transformers/models/longformer/modeling_longformer.py | {
"start": 9855,
"end": 12713
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where `x` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first `x` values) and to every token in the attention window (remaining `attention_window
+ 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the
remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a
token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding
(succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens.
If the attention window contains a token with global attention, the attention weight at the corresponding
index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global
attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be
accessed from `global_attentions`.
global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`,
where `x` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: Optional[torch.FloatTensor] = None
end_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
global_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of sentence classification models.
"""
)
| LongformerQuestionAnsweringModelOutput |
python | doocs__leetcode | solution/1300-1399/1331.Rank Transform of an Array/Solution.py | {
"start": 0,
"end": 157
} | class ____:
def arrayRankTransform(self, arr: List[int]) -> List[int]:
t = sorted(set(arr))
return [bisect_right(t, x) for x in arr]
| Solution |
python | matplotlib__matplotlib | lib/matplotlib/patches.py | {
"start": 41622,
"end": 44411
} | class ____(Patch):
"""Wedge shaped patch."""
def __str__(self):
pars = (self.center[0], self.center[1], self.r,
self.theta1, self.theta2, self.width)
fmt = "Wedge(center=(%g, %g), r=%g, theta1=%g, theta2=%g, width=%s)"
return fmt % pars
@_docstring.interpd
def __init__(self, center, r, theta1, theta2, *, width=None, **kwargs):
"""
A wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid keyword arguments are:
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self.center = center
self.r, self.width = r, width
self.theta1, self.theta2 = theta1, theta2
self._patch_transform = transforms.IdentityTransform()
self._recompute_path()
def _recompute_path(self):
# Inner and outer rings are connected unless the annulus is complete
if abs((self.theta2 - self.theta1) - 360) <= 1e-12:
theta1, theta2 = 0, 360
connector = Path.MOVETO
else:
theta1, theta2 = self.theta1, self.theta2
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1, theta2)
if self.width is not None:
# Partial annulus needs to draw the outer ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1] * (self.r - self.width) / self.r
v = np.concatenate([v1, v2, [(0, 0)]])
c = [*arc.codes, connector, *arc.codes[1:], Path.CLOSEPOLY]
else:
# Wedge doesn't need an inner ring
v = np.concatenate([arc.vertices, [(0, 0), (0, 0)]])
c = [*arc.codes, connector, Path.CLOSEPOLY]
# Shift and scale the wedge to the final location.
self._path = Path(v * self.r + self.center, c)
def set_center(self, center):
self._path = None
self.center = center
self.stale = True
def set_radius(self, radius):
self._path = None
self.r = radius
self.stale = True
def set_theta1(self, theta1):
self._path = None
self.theta1 = theta1
self.stale = True
def set_theta2(self, theta2):
self._path = None
self.theta2 = theta2
self.stale = True
def set_width(self, width):
self._path = None
self.width = width
self.stale = True
def get_path(self):
if self._path is None:
self._recompute_path()
return self._path
# COVERAGE NOTE: Not used internally or from examples
| Wedge |
python | fastapi__sqlmodel | docs_src/tutorial/relationship_attributes/cascade_delete_relationships/tutorial001_py39.py | {
"start": 114,
"end": 353
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
headquarters: str
heroes: list["Hero"] = Relationship(back_populates="team", cascade_delete=True)
| Team |
python | fluentpython__example-code-2e | 24-class-metaprog/tinyenums/microenum.py | {
"start": 728,
"end": 1084
} | class ____(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__next_value = 0
def __missing__(self, key):
if key.startswith('__') and key.endswith('__'):
raise KeyError(key)
self[key] = value = self.__next_value
self.__next_value += 1
return value
| WilyDict |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py | {
"start": 218,
"end": 1197
} | class ____(Benchmark):
r"""
Matyas objective function.
This class defines the Matyas [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Matyas}}(x) = 0.26(x_1^2 + x_2^2) - 0.48 x_1 x_2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1]
| Matyas |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_ip_address.py | {
"start": 383,
"end": 6626
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1beta1IPAddressSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1beta1IPAddress - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1beta1IPAddress. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1IPAddress. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1IPAddress.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1IPAddress. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1beta1IPAddress. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1IPAddress. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1IPAddress.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1IPAddress. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1IPAddress. # noqa: E501
:return: The metadata of this V1beta1IPAddress. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1IPAddress.
:param metadata: The metadata of this V1beta1IPAddress. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1beta1IPAddress. # noqa: E501
:return: The spec of this V1beta1IPAddress. # noqa: E501
:rtype: V1beta1IPAddressSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1beta1IPAddress.
:param spec: The spec of this V1beta1IPAddress. # noqa: E501
:type: V1beta1IPAddressSpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1IPAddress):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1IPAddress):
return True
return self.to_dict() != other.to_dict()
| V1beta1IPAddress |
python | fastai__fastai | fastai/callback/schedule.py | {
"start": 3661,
"end": 7825
} | class ____(Callback):
"Schedule hyper-parameters according to `scheds`"
order,run_valid = 60,False
def __init__(self, scheds): self.scheds = scheds
def before_fit(self): self.hps = {p:[] for p in self.scheds.keys()}
def before_batch(self): self._update_val(self.pct_train)
def _update_val(self, pct):
for n,f in self.scheds.items(): self.opt.set_hyper(n, f(pct))
def after_batch(self):
for p in self.scheds.keys(): self.hps[p].append(self.opt.hypers[-1][p])
def after_fit(self):
if hasattr(self.learn, 'recorder') and hasattr(self, 'hps'): self.recorder.hps = self.hps
_docs = {"before_fit": "Initialize container for hyper-parameters",
"before_batch": "Set the proper hyper-parameters in the optimizer",
"after_batch": "Record hyper-parameters of this batch",
"after_fit": "Save the hyper-parameters in the recorder if there is one"}
# %% ../../nbs/14_callback.schedule.ipynb 46
@patch
def fit_one_cycle(self:Learner, n_epoch, lr_max=None, div=25., div_final=1e5, pct_start=0.25, wd=None,
moms=None, cbs=None, reset_opt=False, start_epoch=0):
"Fit `self.model` for `n_epoch` using the 1cycle policy."
if self.opt is None: self.create_opt()
self.opt.set_hyper('lr', self.lr if lr_max is None else lr_max)
lr_max = np.array([h['lr'] for h in self.opt.hypers])
scheds = {'lr': combined_cos(pct_start, lr_max/div, lr_max, lr_max/div_final),
'mom': combined_cos(pct_start, *(self.moms if moms is None else moms))}
self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd, start_epoch=start_epoch)
# %% ../../nbs/14_callback.schedule.ipynb 50
@patch
def plot_sched(self:Recorder, keys=None, figsize=None):
keys = self.hps.keys() if keys is None else L(keys)
rows,cols = (len(keys)+1)//2, min(2, len(keys))
figsize = figsize or (6*cols,4*rows)
_, axs = plt.subplots(rows, cols, figsize=figsize)
axs = axs.flatten() if len(keys) > 1 else L(axs)
for p,ax in zip(keys, axs):
ax.plot(self.hps[p])
ax.set_ylabel(p)
# %% ../../nbs/14_callback.schedule.ipynb 54
@patch
def fit_flat_cos(self:Learner, n_epoch, lr=None, div_final=1e5, pct_start=0.75, wd=None,
cbs=None, reset_opt=False, start_epoch=0):
"Fit `self.model` for `n_epoch` at flat `lr` before a cosine annealing."
if self.opt is None: self.create_opt()
self.opt.set_hyper('lr', self.lr if lr is None else lr)
lr = np.array([h['lr'] for h in self.opt.hypers])
scheds = {'lr': combined_cos(pct_start, lr, lr, lr/div_final)}
self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd, start_epoch=0)
# %% ../../nbs/14_callback.schedule.ipynb 57
@patch
def fit_sgdr(self:Learner, n_cycles, cycle_len, lr_max=None, cycle_mult=2, cbs=None, reset_opt=False, wd=None,
start_epoch=0):
"Fit `self.model` for `n_cycles` of `cycle_len` using SGDR."
if self.opt is None: self.create_opt()
self.opt.set_hyper('lr', self.lr if lr_max is None else lr_max)
lr_max = np.array([h['lr'] for h in self.opt.hypers])
n_epoch = cycle_len * (cycle_mult**n_cycles-1)//(cycle_mult-1)
pcts = [cycle_len * cycle_mult**i / n_epoch for i in range(n_cycles)]
scheds = [SchedCos(lr_max, 0) for _ in range(n_cycles)]
scheds = {'lr': combine_scheds(pcts, scheds)}
self.fit(n_epoch, cbs=ParamScheduler(scheds)+L(cbs), reset_opt=reset_opt, wd=wd, start_epoch=start_epoch)
# %% ../../nbs/14_callback.schedule.ipynb 60
@patch
@delegates(Learner.fit_one_cycle)
def fine_tune(self:Learner, epochs, base_lr=2e-3, freeze_epochs=1, lr_mult=100,
pct_start=0.3, div=5.0, **kwargs):
"Fine tune with `Learner.freeze` for `freeze_epochs`, then with `Learner.unfreeze` for `epochs`, using discriminative LR."
self.freeze()
self.fit_one_cycle(freeze_epochs, slice(base_lr), pct_start=0.99, **kwargs)
base_lr /= 2
self.unfreeze()
self.fit_one_cycle(epochs, slice(base_lr/lr_mult, base_lr), pct_start=pct_start, div=div, **kwargs)
# %% ../../nbs/14_callback.schedule.ipynb 67
@docs
| ParamScheduler |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 37441,
"end": 38187
} | class ____(BaseModel):
"""
Request body for bulk update, and delete task instances.
"""
model_config = ConfigDict(
extra="forbid",
)
new_state: TaskInstanceState | None = None
note: Annotated[Note | None, Field(title="Note")] = None
include_upstream: Annotated[bool | None, Field(title="Include Upstream")] = False
include_downstream: Annotated[bool | None, Field(title="Include Downstream")] = False
include_future: Annotated[bool | None, Field(title="Include Future")] = False
include_past: Annotated[bool | None, Field(title="Include Past")] = False
task_id: Annotated[str, Field(title="Task Id")]
map_index: Annotated[int | None, Field(title="Map Index")] = None
| BulkTaskInstanceBody |
python | allegroai__clearml | clearml/backend_api/services/v2_13/workers.py | {
"start": 25100,
"end": 28049
} | class ____(NonStrictDataModel):
"""
:param id: ID
:type id: str
:param name: Name
:type name: str
:param running_time: Task running time
:type running_time: int
:param last_iteration: Last task iteration
:type last_iteration: int
"""
_schema = {
"properties": {
"id": {"description": "ID", "type": ["string", "null"]},
"last_iteration": {
"description": "Last task iteration",
"type": ["integer", "null"],
},
"name": {"description": "Name", "type": ["string", "null"]},
"running_time": {
"description": "Task running time",
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
running_time: Optional[int] = None,
last_iteration: Optional[int] = None,
**kwargs: Any
) -> None:
super(CurrentTaskEntry, self).__init__(**kwargs)
self.id = id
self.name = name
self.running_time = running_time
self.last_iteration = last_iteration
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("running_time")
def running_time(self) -> Optional[int]:
return self._property_running_time
@running_time.setter
def running_time(self, value: Optional[int]) -> None:
if value is None:
self._property_running_time = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "running_time", six.integer_types)
self._property_running_time = value
@schema_property("last_iteration")
def last_iteration(self) -> Optional[int]:
return self._property_last_iteration
@last_iteration.setter
def last_iteration(self, value: Optional[int]) -> None:
if value is None:
self._property_last_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "last_iteration", six.integer_types)
self._property_last_iteration = value
| CurrentTaskEntry |
python | pola-rs__polars | py-polars/src/polars/datatypes/classes.py | {
"start": 32079,
"end": 35530
} | class ____(NestedType):
"""
Fixed length list type.
Parameters
----------
inner
The `DataType` of the values within each array.
shape
The shape of the arrays.
width
The length of the arrays.
.. deprecated:: 0.20.31
The `width` parameter for `Array` is deprecated. Use `shape` instead.
Examples
--------
>>> s = pl.Series("a", [[1, 2], [4, 3]], dtype=pl.Array(pl.Int64, 2))
>>> s
shape: (2,)
Series: 'a' [array[i64, 2]]
[
[1, 2]
[4, 3]
]
"""
inner: PolarsDataType
size: int
shape: tuple[int, ...]
def __init__(
self,
inner: PolarsDataType | PythonDataType,
shape: int | tuple[int, ...] | None = None,
*,
width: int | None = None,
) -> None:
if width is not None:
from polars._utils.deprecation import issue_deprecation_warning
issue_deprecation_warning(
"the `width` parameter for `Array` is deprecated. Use `shape` instead.",
version="0.20.31",
)
shape = width
elif shape is None:
msg = "Array constructor is missing the required argument `shape`"
raise TypeError(msg)
inner_parsed = polars.datatypes.parse_into_dtype(inner)
inner_shape = inner_parsed.shape if isinstance(inner_parsed, Array) else ()
if isinstance(shape, int):
self.inner = inner_parsed
self.size = shape
self.shape = (shape,) + inner_shape
elif isinstance(shape, tuple) and isinstance(shape[0], int): # type: ignore[redundant-expr]
if len(shape) > 1:
inner_parsed = Array(inner_parsed, shape[1:])
self.inner = inner_parsed
self.size = shape[0]
self.shape = shape + inner_shape
else:
msg = f"invalid input for shape: {shape!r}"
raise TypeError(msg)
def __eq__(self, other: PolarsDataType) -> bool: # type: ignore[override]
# This equality check allows comparison of type classes and type instances.
# If a parent type is not specific about its inner type, we infer it as equal:
# > array[i64] == array[i64] -> True
# > array[i64] == array[f32] -> False
# > array[i64] == array -> True
# allow comparing object instances to class
if type(other) is DataTypeClass and issubclass(other, Array):
return True
elif isinstance(other, Array):
if self.shape != other.shape:
return False
else:
return self.inner == other.inner
else:
return False
def __hash__(self) -> int:
return hash((self.__class__, self.inner, self.size))
def __repr__(self) -> str:
# Get leaf type
dtype = self.inner
while isinstance(dtype, Array):
dtype = dtype.inner
class_name = self.__class__.__name__
return f"{class_name}({dtype!r}, shape={self.shape})"
@property
def width(self) -> int:
"""The size of the Array."""
from polars._utils.deprecation import issue_deprecation_warning
issue_deprecation_warning(
"the `width` attribute for `Array` is deprecated. Use `size` instead.",
version="0.20.31",
)
return self.size
| Array |
python | run-llama__llama_index | llama-index-experimental/llama_index/experimental/param_tuner/base.py | {
"start": 3789,
"end": 6374
} | class ____(BaseParamTuner):
"""
Async Parameter tuner.
Args:
param_dict(Dict): A dictionary of parameters to iterate over.
Example param_dict:
{
"num_epochs": [10, 20],
"batch_size": [8, 16, 32],
}
fixed_param_dict(Dict): A dictionary of fixed parameters passed to each job.
aparam_fn (Callable): An async function to run with parameters.
num_workers (int): Number of workers to use.
"""
aparam_fn: Callable[[Dict[str, Any]], Awaitable[RunResult]] = Field(
..., description="Async function to run with parameters."
)
num_workers: int = Field(2, description="Number of workers to use.")
_semaphore: asyncio.Semaphore = PrivateAttr()
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self._semaphore = asyncio.Semaphore(self.num_workers)
async def atune(self) -> TunedResult:
"""Run tuning."""
# each key in param_dict is a parameter to tune, each val
# is a list of values to try
# generate combinations of parameters from the param_dict
param_combinations = generate_param_combinations(self.param_dict)
# for each combination, run the job with the arguments
# in args_dict
async def aparam_fn_worker(
semaphore: asyncio.Semaphore,
full_param_dict: Dict[str, Any],
) -> RunResult:
"""Async param fn worker."""
async with semaphore:
return await self.aparam_fn(full_param_dict)
all_run_results = []
run_jobs = []
for param_combination in param_combinations:
full_param_dict = {
**self.fixed_param_dict,
**param_combination,
}
run_jobs.append(aparam_fn_worker(self._semaphore, full_param_dict))
# run_jobs.append(self.aparam_fn(full_param_dict))
if self.show_progress:
from tqdm.asyncio import tqdm_asyncio
all_run_results = await tqdm_asyncio.gather(*run_jobs)
else:
all_run_results = await asyncio.gather(*run_jobs)
# sort the results by score
sorted_run_results = sorted(
all_run_results, key=lambda x: x.score, reverse=True
)
return TunedResult(run_results=sorted_run_results, best_idx=0)
def tune(self) -> TunedResult:
"""Run tuning."""
return asyncio.run(self.atune())
| AsyncParamTuner |
python | sympy__sympy | sympy/codegen/ast.py | {
"start": 36897,
"end": 40787
} | class ____(FloatBaseType):
""" Represents a floating point type with fixed bit width.
Base 2 & one sign bit is assumed.
Parameters
==========
name : str
Name of the type.
nbits : integer
Number of bits used (storage).
nmant : integer
Number of bits used to represent the mantissa.
nexp : integer
Number of bits used to represent the mantissa.
Examples
========
>>> from sympy import S
>>> from sympy.codegen.ast import FloatType
>>> half_precision = FloatType('f16', nbits=16, nmant=10, nexp=5)
>>> half_precision.max
65504
>>> half_precision.tiny == S(2)**-14
True
>>> half_precision.eps == S(2)**-10
True
>>> half_precision.dig == 3
True
>>> half_precision.decimal_dig == 5
True
>>> half_precision.cast_check(1.0)
1.0
>>> half_precision.cast_check(1e5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Maximum value for data type smaller than new value.
"""
__slots__ = ('nbits', 'nmant', 'nexp',)
_fields = Type._fields + __slots__
_construct_nbits = _construct_nmant = _construct_nexp = Integer
@property
def max_exponent(self):
""" The largest positive number n, such that 2**(n - 1) is a representable finite value. """
# cf. C++'s ``std::numeric_limits::max_exponent``
return two**(self.nexp - 1)
@property
def min_exponent(self):
""" The lowest negative number n, such that 2**(n - 1) is a valid normalized number. """
# cf. C++'s ``std::numeric_limits::min_exponent``
return 3 - self.max_exponent
@property
def max(self):
""" Maximum value representable. """
return (1 - two**-(self.nmant+1))*two**self.max_exponent
@property
def tiny(self):
""" The minimum positive normalized value. """
# See C macros: FLT_MIN, DBL_MIN, LDBL_MIN
# or C++'s ``std::numeric_limits::min``
# or numpy.finfo(dtype).tiny
return two**(self.min_exponent - 1)
@property
def eps(self):
""" Difference between 1.0 and the next representable value. """
return two**(-self.nmant)
@property
def dig(self):
""" Number of decimal digits that are guaranteed to be preserved in text.
When converting text -> float -> text, you are guaranteed that at least ``dig``
number of digits are preserved with respect to rounding or overflow.
"""
from sympy.functions import floor, log
return floor(self.nmant * log(2)/log(10))
@property
def decimal_dig(self):
""" Number of digits needed to store & load without loss.
Explanation
===========
Number of decimal digits needed to guarantee that two consecutive conversions
(float -> text -> float) to be idempotent. This is useful when one do not want
to loose precision due to rounding errors when storing a floating point value
as text.
"""
from sympy.functions import ceiling, log
return ceiling((self.nmant + 1) * log(2)/log(10) + 1)
def cast_nocheck(self, value):
""" Casts without checking if out of bounds or subnormal. """
if value == oo: # float(oo) or oo
return float(oo)
elif value == -oo: # float(-oo) or -oo
return float(-oo)
return Float(str(sympify(value).evalf(self.decimal_dig)), self.decimal_dig)
def _check(self, value):
if value < -self.max:
raise ValueError("Value is too small: %d < %d" % (value, -self.max))
if value > self.max:
raise ValueError("Value is too big: %d > %d" % (value, self.max))
if abs(value) < self.tiny:
raise ValueError("Smallest (absolute) value for data type bigger than new value.")
| FloatType |
python | allegroai__clearml | clearml/backend_api/services/v2_23/dataviews.py | {
"start": 148373,
"end": 173123
} | class ____(Request):
"""
Get dataview information
:param dataview: Datatview ID
:type dataview: str
:param name: Dataview name
:type name: str
:param description: Dataview description
:type description: str
:param project: Project ID of the project to which this task is assigned
:type project: str
:param filters: List of FilterRule ('OR' connection)
:type filters: Sequence[FilterRule]
:param output_rois: 'all_in_frame' - all rois for a frame are returned
'only_filtered' - only rois which led this frame to be selected 'frame_per_roi'
- single roi per frame. Frame can be returned multiple times with a different
roi each time. Note: this should be used for Training tasks only Note:
frame_per_roi implies that only filtered rois will be returned
:type output_rois: OutputRoisEnum
:param versions: List of dataview entries. All tasks must have at least one
dataview.
:type versions: Sequence[DataviewEntry]
:param iteration: Iteration parameters. Not applicable for register (import)
tasks.
:type iteration: Iteration
:param augmentation: Augmentation parameters. Only for training and testing
tasks.
:type augmentation: Augmentation
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param mapping: Mapping parameters
:type mapping: Mapping
:param labels_enumeration: Labels enumerations, specifies numbers to be
assigned to ROI labels when getting frames
:type labels_enumeration: dict
:param status: Dataview status
:type status: str
:param force: Allow update of the published dataview
:type force: bool
"""
_service = "dataviews"
_action = "update"
_version = "2.23"
_schema = {
"definitions": {
"augmentation": {
"properties": {
"crop_around_rois": {
"description": "Crop image data around all frame ROIs",
"type": ["boolean", "null"],
},
"sets": {
"description": "List of augmentation sets",
"items": {"$ref": "#/definitions/augmentation_set"},
"type": ["array", "null"],
},
},
"type": "object",
},
"augmentation_set": {
"properties": {
"arguments": {
"additionalProperties": {
"additionalProperties": True,
"type": "object",
},
"description": "Arguments dictionary per custom augmentation type.",
"type": ["object", "null"],
},
"cls": {
"description": "Augmentation class",
"type": ["string", "null"],
},
"strength": {
"description": "Augmentation strength. Range [0,).",
"minimum": 0,
"type": ["number", "null"],
},
"types": {
"description": "Augmentation type",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
},
"dataview_entry": {
"properties": {
"dataset": {
"description": "Existing Dataset id",
"type": "string",
},
"merge_with": {
"description": "Version ID to merge with",
"type": "string",
},
"version": {
"description": "Version id of a version belonging to the dataset",
"type": "string",
},
},
"required": ["dataset", "version"],
"type": "object",
},
"filter_by_roi_enum": {
"default": "label_rules",
"enum": ["disabled", "no_rois", "label_rules"],
"type": "string",
},
"filter_label_rule": {
"properties": {
"conf_range": {
"description": (
"Range of ROI confidence level in the frame (min, max). -1 for not applicable\n "
" Both min and max can be either -1 or positive.\n 2nd number (max) must be"
" either -1 or larger than or equal to the 1st number (min)"
),
"items": {"type": "number"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"count_range": {
"description": (
"Range of times ROI appears in the frame (min, max). -1 for not applicable.\n "
" Both integers must be larger than or equal to -1.\n 2nd integer (max) must be"
" either -1 or larger than or equal to the 1st integer (min)"
),
"items": {"type": "integer"},
"maxItems": 2,
"minItems": 1,
"type": "array",
},
"label": {
"description": (
"Lucene format query (see lucene query syntax).\nDefault search field is label.keyword and"
" default operator is AND, so searching for:\n\n'Bus Stop' Blue\n\nis equivalent"
" to:\n\nLabel.keyword:'Bus Stop' AND label.keyword:'Blue'"
),
"type": "string",
},
"must_not": {
"default": False,
"description": (
"If set then the label must not exist or lucene query must not be true.\n The"
" default value is false"
),
"type": "boolean",
},
},
"required": ["label"],
"type": "object",
},
"filter_rule": {
"properties": {
"dataset": {
"description": (
"Dataset ID. Must be a dataset which is in the task's view. If set to '*' all datasets in"
" View are used."
),
"type": "string",
},
"filter_by_roi": {
"description": "Type of filter. Optional, the default value is 'label_rules'",
"oneOf": [
{"$ref": "#/definitions/filter_by_roi_enum"},
{"type": "null"},
],
},
"frame_query": {
"description": "Frame filter, in Lucene query syntax",
"type": ["string", "null"],
},
"label_rules": {
"description": (
"List of FilterLabelRule ('AND' connection)\n\ndisabled - No filtering by ROIs. Select all"
" frames, even if they don't have ROIs (all frames)\n\nno_rois - Select only frames without"
" ROIs (empty frames)\n\nlabel_rules - Select frames according to label rules"
),
"items": {"$ref": "#/definitions/filter_label_rule"},
"type": ["array", "null"],
},
"sources_query": {
"description": "Sources filter, in Lucene query syntax. Filters sources in each frame.",
"type": ["string", "null"],
},
"version": {
"description": (
"Dataset version to apply rule to. Must belong to the dataset and be in the task's view. If"
" set to '*' all version of the datasets in View are used."
),
"type": "string",
},
"weight": {
"description": "Rule weight. Default is 1",
"type": "number",
},
},
"required": ["dataset"],
"type": "object",
},
"iteration": {
"description": "Sequential Iteration API configuration",
"properties": {
"infinite": {
"description": "Infinite iteration",
"type": ["boolean", "null"],
},
"jump": {
"description": "Jump entry",
"oneOf": [{"$ref": "#/definitions/jump"}, {"type": "null"}],
},
"limit": {
"description": (
"Maximum frames per task. If not passed, frames will end when no more matching frames are"
" found, unless infinite is True."
),
"type": ["integer", "null"],
},
"min_sequence": {
"description": (
"Length (in ms) of video clips to return. This is used in random order, and in sequential"
" order only if jumping is provided and only for video frames"
),
"type": ["integer", "null"],
},
"order": {
"description": (
"\n Input frames order. Values: 'sequential', 'random'\n In"
" Sequential mode frames will be returned according to the order in which the frames were"
" added to the dataset."
),
"oneOf": [
{"$ref": "#/definitions/iteration_order_enum"},
{"type": "null"},
],
},
"random_seed": {
"description": "Random seed used when iterating over the dataview",
"type": ["integer", "null"],
},
},
"type": "object",
},
"iteration_order_enum": {
"enum": ["sequential", "random"],
"type": "string",
},
"jump": {
"properties": {
"time": {
"description": "Max time in milliseconds between frames",
"type": ["integer", "null"],
}
},
"type": "object",
},
"label_source": {
"properties": {
"dataset": {
"description": "Source dataset id. '*' for all datasets in view",
"type": ["string", "null"],
},
"labels": {
"description": (
"List of source labels (AND connection). '*' indicates any label. Labels must exist in at"
" least one of the dataset versions in the task's view"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"version": {
"description": (
"Source dataset version id. Default is '*' (for all versions in dataset in the view)"
" Version must belong to the selected dataset, and must be in the task's view[i]"
),
"type": ["string", "null"],
},
},
"type": "object",
},
"mapping": {
"properties": {
"rules": {
"description": "Rules list",
"items": {"$ref": "#/definitions/mapping_rule"},
"type": ["array", "null"],
}
},
"type": "object",
},
"mapping_rule": {
"properties": {
"source": {
"description": "Source label info",
"oneOf": [
{"$ref": "#/definitions/label_source"},
{"type": "null"},
],
},
"target": {
"description": "Target label name",
"type": ["string", "null"],
},
},
"type": "object",
},
"output_rois_enum": {
"enum": ["all_in_frame", "only_filtered", "frame_per_roi"],
"type": "string",
},
},
"properties": {
"augmentation": {
"$ref": "#/definitions/augmentation",
"description": "Augmentation parameters. Only for training and testing tasks.",
},
"dataview": {"description": "Datatview ID", "type": "string"},
"description": {"description": "Dataview description", "type": "string"},
"filters": {
"description": "List of FilterRule ('OR' connection)",
"items": {"$ref": "#/definitions/filter_rule"},
"type": "array",
},
"force": {
"default": False,
"description": "Allow update of the published dataview",
"type": "boolean",
},
"iteration": {
"$ref": "#/definitions/iteration",
"description": "Iteration parameters. Not applicable for register (import) tasks.",
},
"labels_enumeration": {
"additionalProperties": {"type": "integer"},
"description": (
"Labels enumerations, specifies numbers to be assigned to ROI labels when getting frames"
),
"type": "object",
},
"mapping": {
"$ref": "#/definitions/mapping",
"description": "Mapping parameters",
},
"name": {"description": "Dataview name", "type": "string"},
"output_rois": {
"$ref": "#/definitions/output_rois_enum",
"description": (
"'all_in_frame' - all rois for a frame are returned\n 'only_filtered' - only"
" rois which led this frame to be selected\n 'frame_per_roi' - single roi per"
" frame. Frame can be returned multiple times with a different roi each time.\n "
" Note: this should be used for Training tasks only\n Note: frame_per_roi"
" implies that only filtered rois will be returned\n "
),
},
"project": {
"description": "Project ID of the project to which this task is assigned",
"type": "string",
},
"status": {
"description": "Dataview status",
"enum": ["draft", "published"],
"type": "string",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"versions": {
"description": "List of dataview entries. All tasks must have at least one dataview.",
"items": {"$ref": "#/definitions/dataview_entry"},
"type": "array",
},
},
"required": ["dataview"],
"type": "object",
}
def __init__(
self,
dataview,
name=None,
description=None,
project=None,
filters=None,
output_rois=None,
versions=None,
iteration=None,
augmentation=None,
tags=None,
system_tags=None,
mapping=None,
labels_enumeration=None,
status=None,
force=False,
**kwargs
):
super(UpdateRequest, self).__init__(**kwargs)
self.dataview = dataview
self.name = name
self.description = description
self.project = project
self.filters = filters
self.output_rois = output_rois
self.versions = versions
self.iteration = iteration
self.augmentation = augmentation
self.tags = tags
self.system_tags = system_tags
self.mapping = mapping
self.labels_enumeration = labels_enumeration
self.status = status
self.force = force
@schema_property("dataview")
def dataview(self):
return self._property_dataview
@dataview.setter
def dataview(self, value):
if value is None:
self._property_dataview = None
return
self.assert_isinstance(value, "dataview", six.string_types)
self._property_dataview = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("description")
def description(self):
return self._property_description
@description.setter
def description(self, value):
if value is None:
self._property_description = None
return
self.assert_isinstance(value, "description", six.string_types)
self._property_description = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("filters")
def filters(self):
return self._property_filters
@filters.setter
def filters(self, value):
if value is None:
self._property_filters = None
return
self.assert_isinstance(value, "filters", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [
FilterRule.from_dict(v) if isinstance(v, dict) else v for v in value
]
else:
self.assert_isinstance(value, "filters", FilterRule, is_array=True)
self._property_filters = value
@schema_property("output_rois")
def output_rois(self):
return self._property_output_rois
@output_rois.setter
def output_rois(self, value):
if value is None:
self._property_output_rois = None
return
if isinstance(value, six.string_types):
try:
value = OutputRoisEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "output_rois", enum.Enum)
self._property_output_rois = value
@schema_property("versions")
def versions(self):
return self._property_versions
@versions.setter
def versions(self, value):
if value is None:
self._property_versions = None
return
self.assert_isinstance(value, "versions", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [
DataviewEntry.from_dict(v) if isinstance(v, dict) else v for v in value
]
else:
self.assert_isinstance(value, "versions", DataviewEntry, is_array=True)
self._property_versions = value
@schema_property("iteration")
def iteration(self):
return self._property_iteration
@iteration.setter
def iteration(self, value):
if value is None:
self._property_iteration = None
return
if isinstance(value, dict):
value = Iteration.from_dict(value)
else:
self.assert_isinstance(value, "iteration", Iteration)
self._property_iteration = value
@schema_property("augmentation")
def augmentation(self):
return self._property_augmentation
@augmentation.setter
def augmentation(self, value):
if value is None:
self._property_augmentation = None
return
if isinstance(value, dict):
value = Augmentation.from_dict(value)
else:
self.assert_isinstance(value, "augmentation", Augmentation)
self._property_augmentation = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("mapping")
def mapping(self):
return self._property_mapping
@mapping.setter
def mapping(self, value):
if value is None:
self._property_mapping = None
return
if isinstance(value, dict):
value = Mapping.from_dict(value)
else:
self.assert_isinstance(value, "mapping", Mapping)
self._property_mapping = value
@schema_property("labels_enumeration")
def labels_enumeration(self):
return self._property_labels_enumeration
@labels_enumeration.setter
def labels_enumeration(self, value):
if value is None:
self._property_labels_enumeration = None
return
self.assert_isinstance(value, "labels_enumeration", (dict,))
self._property_labels_enumeration = value
@schema_property("status")
def status(self):
return self._property_status
@status.setter
def status(self, value):
if value is None:
self._property_status = None
return
self.assert_isinstance(value, "status", six.string_types)
self._property_status = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
| UpdateRequest |
python | PyCQA__pylint | tests/functional/m/membership_protocol.py | {
"start": 1590,
"end": 1833
} | class ____:
valid_values = None
def validate(self, value):
if self.valid_values is None:
return True
else:
# error should not be emitted here
return value in self.valid_values
| BaseThing |
python | urllib3__urllib3 | src/urllib3/exceptions.py | {
"start": 2161,
"end": 2910
} | class ____(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param str url: The requested Url
:param reason: The underlying error
:type reason: :class:`Exception`
"""
def __init__(
self, pool: ConnectionPool, url: str | None, reason: Exception | None = None
) -> None:
self.reason = reason
message = f"Max retries exceeded with url: {url} (Caused by {reason!r})"
super().__init__(pool, url, message)
def __reduce__(self) -> _TYPE_REDUCE_RESULT:
# For pickling purposes.
return self.__class__, (None, self.url, self.reason)
| MaxRetryError |
python | openai__openai-python | src/openai/resources/images.py | {
"start": 47429,
"end": 93923
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncImagesWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncImagesWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncImagesWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncImagesWithStreamingResponse(self)
async def create_variation(
self,
*,
image: FileTypes,
model: Union[str, ImageModel, None] | Omit = omit,
n: Optional[int] | Omit = omit,
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
size: Optional[Literal["256x256", "512x512", "1024x1024"]] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ImagesResponse:
"""Creates a variation of a given image.
This endpoint only supports `dall-e-2`.
Args:
image: The image to use as the basis for the variation(s). Must be a valid PNG file,
less than 4MB, and square.
model: The model to use for image generation. Only `dall-e-2` is supported at this
time.
n: The number of images to generate. Must be between 1 and 10.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated.
size: The size of the generated images. Must be one of `256x256`, `512x512`, or
`1024x1024`.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
body = deepcopy_minimal(
{
"image": image,
"model": model,
"n": n,
"response_format": response_format,
"size": size,
"user": user,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["image"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/images/variations",
body=await async_maybe_transform(body, image_create_variation_params.ImageCreateVariationParams),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ImagesResponse,
)
@overload
async def edit(
self,
*,
image: Union[FileTypes, SequenceNotStr[FileTypes]],
prompt: str,
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
mask: FileTypes | Omit = omit,
model: Union[str, ImageModel, None] | Omit = omit,
n: Optional[int] | Omit = omit,
output_compression: Optional[int] | Omit = omit,
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
partial_images: Optional[int] | Omit = omit,
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
stream: Optional[Literal[False]] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ImagesResponse:
"""Creates an edited or extended image given one or more source images and a
prompt.
This endpoint only supports `gpt-image-1` and `dall-e-2`.
Args:
image: The image(s) to edit. Must be a supported image file or an array of images.
For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
50MB. You can provide up to 16 images.
For `dall-e-2`, you can only provide one image, and it should be a square `png`
file less than 4MB.
prompt: A text description of the desired image(s). The maximum length is 1000
characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
background: Allows to set transparency for the background of the generated image(s). This
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
`opaque` or `auto` (default value). When `auto` is used, the model will
automatically determine the best background for the image.
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
input_fidelity: Control how much effort the model will exert to match the style and features,
especially facial features, of input images. This parameter is only supported
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
`low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
the mask will be applied on the first image. Must be a valid PNG file, less than
4MB, and have the same dimensions as `image`.
model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
is used.
n: The number of images to generate. Must be between 1 and 10.
output_compression: The compression level (0-100%) for the generated images. This parameter is only
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
defaults to 100.
output_format: The format in which the generated images are returned. This parameter is only
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
default value is `png`.
partial_images: The number of partial images to generate. This parameter is used for streaming
responses that return partial images. Value must be between 0 and 3. When set to
0, the response will be a single image sent in one streaming event.
Note that the final image may be sent before the full number of partial images
are generated if the full image is generated more quickly.
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
Defaults to `auto`.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
will always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for
`gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
stream: Edit the image in streaming mode. Defaults to `false`. See the
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
for more information.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
async def edit(
self,
*,
image: Union[FileTypes, SequenceNotStr[FileTypes]],
prompt: str,
stream: Literal[True],
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
mask: FileTypes | Omit = omit,
model: Union[str, ImageModel, None] | Omit = omit,
n: Optional[int] | Omit = omit,
output_compression: Optional[int] | Omit = omit,
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
partial_images: Optional[int] | Omit = omit,
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncStream[ImageEditStreamEvent]:
"""Creates an edited or extended image given one or more source images and a
prompt.
This endpoint only supports `gpt-image-1` and `dall-e-2`.
Args:
image: The image(s) to edit. Must be a supported image file or an array of images.
For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
50MB. You can provide up to 16 images.
For `dall-e-2`, you can only provide one image, and it should be a square `png`
file less than 4MB.
prompt: A text description of the desired image(s). The maximum length is 1000
characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
stream: Edit the image in streaming mode. Defaults to `false`. See the
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
for more information.
background: Allows to set transparency for the background of the generated image(s). This
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
`opaque` or `auto` (default value). When `auto` is used, the model will
automatically determine the best background for the image.
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
input_fidelity: Control how much effort the model will exert to match the style and features,
especially facial features, of input images. This parameter is only supported
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
`low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
the mask will be applied on the first image. Must be a valid PNG file, less than
4MB, and have the same dimensions as `image`.
model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
is used.
n: The number of images to generate. Must be between 1 and 10.
output_compression: The compression level (0-100%) for the generated images. This parameter is only
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
defaults to 100.
output_format: The format in which the generated images are returned. This parameter is only
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
default value is `png`.
partial_images: The number of partial images to generate. This parameter is used for streaming
responses that return partial images. Value must be between 0 and 3. When set to
0, the response will be a single image sent in one streaming event.
Note that the final image may be sent before the full number of partial images
are generated if the full image is generated more quickly.
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
Defaults to `auto`.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
will always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for
`gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
async def edit(
self,
*,
image: Union[FileTypes, SequenceNotStr[FileTypes]],
prompt: str,
stream: bool,
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
mask: FileTypes | Omit = omit,
model: Union[str, ImageModel, None] | Omit = omit,
n: Optional[int] | Omit = omit,
output_compression: Optional[int] | Omit = omit,
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
partial_images: Optional[int] | Omit = omit,
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]:
"""Creates an edited or extended image given one or more source images and a
prompt.
This endpoint only supports `gpt-image-1` and `dall-e-2`.
Args:
image: The image(s) to edit. Must be a supported image file or an array of images.
For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
50MB. You can provide up to 16 images.
For `dall-e-2`, you can only provide one image, and it should be a square `png`
file less than 4MB.
prompt: A text description of the desired image(s). The maximum length is 1000
characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
stream: Edit the image in streaming mode. Defaults to `false`. See the
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
for more information.
background: Allows to set transparency for the background of the generated image(s). This
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
`opaque` or `auto` (default value). When `auto` is used, the model will
automatically determine the best background for the image.
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
input_fidelity: Control how much effort the model will exert to match the style and features,
especially facial features, of input images. This parameter is only supported
for `gpt-image-1`. Unsupported for `gpt-image-1-mini`. Supports `high` and
`low`. Defaults to `low`.
mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
indicate where `image` should be edited. If there are multiple images provided,
the mask will be applied on the first image. Must be a valid PNG file, less than
4MB, and have the same dimensions as `image`.
model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
is used.
n: The number of images to generate. Must be between 1 and 10.
output_compression: The compression level (0-100%) for the generated images. This parameter is only
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
defaults to 100.
output_format: The format in which the generated images are returned. This parameter is only
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
default value is `png`.
partial_images: The number of partial images to generate. This parameter is used for streaming
responses that return partial images. Value must be between 0 and 3. When set to
0, the response will be a single image sent in one streaming event.
Note that the final image may be sent before the full number of partial images
are generated if the full image is generated more quickly.
quality: The quality of the image that will be generated. `high`, `medium` and `low` are
only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
Defaults to `auto`.
response_format: The format in which the generated images are returned. Must be one of `url` or
`b64_json`. URLs are only valid for 60 minutes after the image has been
generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
will always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for
`gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@required_args(["image", "prompt"], ["image", "prompt", "stream"])
async def edit(
self,
*,
image: Union[FileTypes, SequenceNotStr[FileTypes]],
prompt: str,
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
input_fidelity: Optional[Literal["high", "low"]] | Omit = omit,
mask: FileTypes | Omit = omit,
model: Union[str, ImageModel, None] | Omit = omit,
n: Optional[int] | Omit = omit,
output_compression: Optional[int] | Omit = omit,
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
partial_images: Optional[int] | Omit = omit,
quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | Omit = omit,
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
size: Optional[Literal["256x256", "512x512", "1024x1024", "1536x1024", "1024x1536", "auto"]] | Omit = omit,
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ImagesResponse | AsyncStream[ImageEditStreamEvent]:
body = deepcopy_minimal(
{
"image": image,
"prompt": prompt,
"background": background,
"input_fidelity": input_fidelity,
"mask": mask,
"model": model,
"n": n,
"output_compression": output_compression,
"output_format": output_format,
"partial_images": partial_images,
"quality": quality,
"response_format": response_format,
"size": size,
"stream": stream,
"user": user,
}
)
files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
# It should be noted that the actual Content-Type header that will be
# sent to the server will contain a `boundary` parameter, e.g.
# multipart/form-data; boundary=---abc--
extra_headers = {"Content-Type": "multipart/form-data", **(extra_headers or {})}
return await self._post(
"/images/edits",
body=await async_maybe_transform(
body,
image_edit_params.ImageEditParamsStreaming if stream else image_edit_params.ImageEditParamsNonStreaming,
),
files=files,
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ImagesResponse,
stream=stream or False,
stream_cls=AsyncStream[ImageEditStreamEvent],
)
@overload
async def generate(
self,
*,
prompt: str,
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
model: Union[str, ImageModel, None] | Omit = omit,
moderation: Optional[Literal["low", "auto"]] | Omit = omit,
n: Optional[int] | Omit = omit,
output_compression: Optional[int] | Omit = omit,
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
partial_images: Optional[int] | Omit = omit,
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
size: Optional[
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
]
| Omit = omit,
stream: Optional[Literal[False]] | Omit = omit,
style: Optional[Literal["vivid", "natural"]] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ImagesResponse:
"""
Creates an image given a prompt.
[Learn more](https://platform.openai.com/docs/guides/images).
Args:
prompt: A text description of the desired image(s). The maximum length is 32000
characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
for `dall-e-3`.
background: Allows to set transparency for the background of the generated image(s). This
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
`opaque` or `auto` (default value). When `auto` is used, the model will
automatically determine the best background for the image.
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
`gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
`gpt-image-1` is used.
moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
be either `low` for less restrictive filtering or `auto` (default value).
n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
`n=1` is supported.
output_compression: The compression level (0-100%) for the generated images. This parameter is only
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
defaults to 100.
output_format: The format in which the generated images are returned. This parameter is only
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
partial_images: The number of partial images to generate. This parameter is used for streaming
responses that return partial images. Value must be between 0 and 3. When set to
0, the response will be a single image sent in one streaming event.
Note that the final image may be sent before the full number of partial images
are generated if the full image is generated more quickly.
quality: The quality of the image that will be generated.
- `auto` (default value) will automatically select the best quality for the
given model.
- `high`, `medium` and `low` are supported for `gpt-image-1`.
- `hd` and `standard` are supported for `dall-e-3`.
- `standard` is the only option for `dall-e-2`.
response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
after the image has been generated. This parameter isn't supported for
`gpt-image-1` which will always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for
`gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
stream: Generate the image in streaming mode. Defaults to `false`. See the
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
for more information. This parameter is only supported for `gpt-image-1`.
style: The style of the generated images. This parameter is only supported for
`dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
towards generating hyper-real and dramatic images. Natural causes the model to
produce more natural, less hyper-real looking images.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
async def generate(
self,
*,
prompt: str,
stream: Literal[True],
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
model: Union[str, ImageModel, None] | Omit = omit,
moderation: Optional[Literal["low", "auto"]] | Omit = omit,
n: Optional[int] | Omit = omit,
output_compression: Optional[int] | Omit = omit,
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
partial_images: Optional[int] | Omit = omit,
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
size: Optional[
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
]
| Omit = omit,
style: Optional[Literal["vivid", "natural"]] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncStream[ImageGenStreamEvent]:
"""
Creates an image given a prompt.
[Learn more](https://platform.openai.com/docs/guides/images).
Args:
prompt: A text description of the desired image(s). The maximum length is 32000
characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
for `dall-e-3`.
stream: Generate the image in streaming mode. Defaults to `false`. See the
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
for more information. This parameter is only supported for `gpt-image-1`.
background: Allows to set transparency for the background of the generated image(s). This
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
`opaque` or `auto` (default value). When `auto` is used, the model will
automatically determine the best background for the image.
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
`gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
`gpt-image-1` is used.
moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
be either `low` for less restrictive filtering or `auto` (default value).
n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
`n=1` is supported.
output_compression: The compression level (0-100%) for the generated images. This parameter is only
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
defaults to 100.
output_format: The format in which the generated images are returned. This parameter is only
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
partial_images: The number of partial images to generate. This parameter is used for streaming
responses that return partial images. Value must be between 0 and 3. When set to
0, the response will be a single image sent in one streaming event.
Note that the final image may be sent before the full number of partial images
are generated if the full image is generated more quickly.
quality: The quality of the image that will be generated.
- `auto` (default value) will automatically select the best quality for the
given model.
- `high`, `medium` and `low` are supported for `gpt-image-1`.
- `hd` and `standard` are supported for `dall-e-3`.
- `standard` is the only option for `dall-e-2`.
response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
after the image has been generated. This parameter isn't supported for
`gpt-image-1` which will always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for
`gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
style: The style of the generated images. This parameter is only supported for
`dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
towards generating hyper-real and dramatic images. Natural causes the model to
produce more natural, less hyper-real looking images.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
async def generate(
self,
*,
prompt: str,
stream: bool,
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
model: Union[str, ImageModel, None] | Omit = omit,
moderation: Optional[Literal["low", "auto"]] | Omit = omit,
n: Optional[int] | Omit = omit,
output_compression: Optional[int] | Omit = omit,
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
partial_images: Optional[int] | Omit = omit,
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
size: Optional[
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
]
| Omit = omit,
style: Optional[Literal["vivid", "natural"]] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]:
"""
Creates an image given a prompt.
[Learn more](https://platform.openai.com/docs/guides/images).
Args:
prompt: A text description of the desired image(s). The maximum length is 32000
characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
for `dall-e-3`.
stream: Generate the image in streaming mode. Defaults to `false`. See the
[Image generation guide](https://platform.openai.com/docs/guides/image-generation)
for more information. This parameter is only supported for `gpt-image-1`.
background: Allows to set transparency for the background of the generated image(s). This
parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
`opaque` or `auto` (default value). When `auto` is used, the model will
automatically determine the best background for the image.
If `transparent`, the output format needs to support transparency, so it should
be set to either `png` (default value) or `webp`.
model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
`gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
`gpt-image-1` is used.
moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
be either `low` for less restrictive filtering or `auto` (default value).
n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
`n=1` is supported.
output_compression: The compression level (0-100%) for the generated images. This parameter is only
supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
defaults to 100.
output_format: The format in which the generated images are returned. This parameter is only
supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
partial_images: The number of partial images to generate. This parameter is used for streaming
responses that return partial images. Value must be between 0 and 3. When set to
0, the response will be a single image sent in one streaming event.
Note that the final image may be sent before the full number of partial images
are generated if the full image is generated more quickly.
quality: The quality of the image that will be generated.
- `auto` (default value) will automatically select the best quality for the
given model.
- `high`, `medium` and `low` are supported for `gpt-image-1`.
- `hd` and `standard` are supported for `dall-e-3`.
- `standard` is the only option for `dall-e-2`.
response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
after the image has been generated. This parameter isn't supported for
`gpt-image-1` which will always return base64-encoded images.
size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
(landscape), `1024x1536` (portrait), or `auto` (default value) for
`gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
style: The style of the generated images. This parameter is only supported for
`dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
towards generating hyper-real and dramatic images. Natural causes the model to
produce more natural, less hyper-real looking images.
user: A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@required_args(["prompt"], ["prompt", "stream"])
async def generate(
self,
*,
prompt: str,
background: Optional[Literal["transparent", "opaque", "auto"]] | Omit = omit,
model: Union[str, ImageModel, None] | Omit = omit,
moderation: Optional[Literal["low", "auto"]] | Omit = omit,
n: Optional[int] | Omit = omit,
output_compression: Optional[int] | Omit = omit,
output_format: Optional[Literal["png", "jpeg", "webp"]] | Omit = omit,
partial_images: Optional[int] | Omit = omit,
quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | Omit = omit,
response_format: Optional[Literal["url", "b64_json"]] | Omit = omit,
size: Optional[
Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
]
| Omit = omit,
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
style: Optional[Literal["vivid", "natural"]] | Omit = omit,
user: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ImagesResponse | AsyncStream[ImageGenStreamEvent]:
return await self._post(
"/images/generations",
body=await async_maybe_transform(
{
"prompt": prompt,
"background": background,
"model": model,
"moderation": moderation,
"n": n,
"output_compression": output_compression,
"output_format": output_format,
"partial_images": partial_images,
"quality": quality,
"response_format": response_format,
"size": size,
"stream": stream,
"style": style,
"user": user,
},
image_generate_params.ImageGenerateParamsStreaming
if stream
else image_generate_params.ImageGenerateParamsNonStreaming,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ImagesResponse,
stream=stream or False,
stream_cls=AsyncStream[ImageGenStreamEvent],
)
| AsyncImages |
python | pytorch__pytorch | torch/fx/passes/backends/cudagraphs.py | {
"start": 354,
"end": 2079
} | class ____(OperatorSupport):
# TODO: why is submodules passed here
def is_node_supported(self, submodules, node: torch.fx.Node) -> bool:
if node.op not in CALLABLE_NODE_OPS:
return False
if node.target is torch.ops.aten.embedding_dense_backward.default:
return False
if node.target is operator.getitem:
return True
found_not_cuda = False
def meta_fk(meta):
return meta["val"] if "val" in meta else meta["fake_result"]
def find_not_cuda(t):
nonlocal found_not_cuda
if isinstance(t, torch.Tensor) and t.device.type != "cuda":
found_not_cuda = True
for n in node.all_input_nodes:
pytree.tree_map_(find_not_cuda, meta_fk(n.meta))
pytree.tree_map_(find_not_cuda, meta_fk(node.meta))
# NB: factory function is accounted for because the result would be
# cpu or cuda
return not found_not_cuda
def partition_cudagraphs(gm, inputs):
"""
Partition an FX graph into sub-GraphModules that can be validly run under
CUDA graphs. For a subgraph to be runnable under CUDA, all of the operations
must involve CUDA tensors only/
"""
FakeTensorProp(gm).propagate(*inputs)
supported_ops = CudaGraphsSupport()
# TODO: single node partition may be wrong due to the pessimization
# from copying in and out the data. Check in benchmarks, perhaps
partitioner = CapabilityBasedPartitioner(
gm, supported_ops, allows_single_node_partition=True
)
partitions = partitioner.propose_partitions()
fused_graph = partitioner.fuse_partitions(partitions)
return fused_graph
| CudaGraphsSupport |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.