language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PyCQA__pylint | pylint/extensions/_check_docs_utils.py | {
"start": 25288,
"end": 29676
} | class ____(GoogleDocstring):
_re_section_template = r"""
^([ ]*) {0} \s*?$ # Numpy parameters header
\s* [-=]+ \s*?$ # underline
( .* ) # section
"""
re_param_section = re.compile(
_re_section_template.format(r"(?:Args|Arguments|Parameters)"),
re.X | re.S | re.M,
)
re_default_value = r"""((['"]\w+\s*['"])|(\d+)|(True)|(False)|(None))"""
re_param_line = re.compile(
rf"""
\s* (?P<param_name>\*{{0,2}}\w+)(\s?(:|\n)) # identifier with potential asterisks
\s*
(?P<param_type>
(
({GoogleDocstring.re_multiple_type}) # default type declaration
(,\s+optional)? # optional 'optional' indication
)?
(
{{({re_default_value},?\s*)+}} # set of default values
)?
(?:$|\n)
)?
(
\s* (?P<param_desc>.*) # optional description
)?
""",
re.X | re.S,
)
re_raise_section = re.compile(
_re_section_template.format(r"Raises"), re.X | re.S | re.M
)
re_raise_line = re.compile(
rf"""
\s* ({GoogleDocstring.re_type})$ # type declaration
\s* (.*) # optional description
""",
re.X | re.S | re.M,
)
re_returns_section = re.compile(
_re_section_template.format(r"Returns?"), re.X | re.S | re.M
)
re_returns_line = re.compile(
rf"""
\s* (?:\w+\s+:\s+)? # optional name
({GoogleDocstring.re_multiple_type})$ # type declaration
\s* (.*) # optional description
""",
re.X | re.S | re.M,
)
re_yields_section = re.compile(
_re_section_template.format(r"Yields?"), re.X | re.S | re.M
)
re_yields_line = re_returns_line
supports_yields = True
def match_param_docs(self) -> tuple[set[str], set[str]]:
"""Matches parameter documentation section to parameter documentation rules."""
params_with_doc = set()
params_with_type = set()
entries = self._parse_section(self.re_param_section)
entries.extend(self._parse_section(self.re_keyword_param_section))
for entry in entries:
match = self.re_param_line.match(entry)
if not match:
continue
# check if parameter has description only
re_only_desc = re.match(r"\s*(\*{0,2}\w+)\s*:?\n\s*\w*$", entry)
if re_only_desc:
param_name = match.group("param_name")
param_desc = match.group("param_type")
param_type = None
else:
param_name = match.group("param_name")
param_type = match.group("param_type")
param_desc = match.group("param_desc")
# The re_param_line pattern needs to match multi-line which removes the ability
# to match a single line description like 'arg : a number type.'
# We are not trying to determine whether 'a number type' is correct typing
# but we do accept it as typing as it is in the place where typing
# should be
if param_type is None and re.match(r"\s*(\*{0,2}\w+)\s*:.+$", entry):
param_type = param_desc
# If the description is "" but we have a type description
# we consider the description to be the type
if not param_desc and param_type:
param_desc = param_type
if param_type:
params_with_type.add(param_name)
if param_desc:
params_with_doc.add(param_name)
return params_with_doc, params_with_type
@staticmethod
def min_section_indent(section_match: re.Match[str]) -> int:
return len(section_match.group(1))
@staticmethod
def _is_section_header(line: str) -> bool:
return bool(re.match(r"\s*-+$", line))
DOCSTRING_TYPES = {
"sphinx": SphinxDocstring,
"epytext": EpytextDocstring,
"google": GoogleDocstring,
"numpy": NumpyDocstring,
"default": Docstring,
}
"""A map of the name of the docstring type to its class.
:type: dict(str, type)
"""
| NumpyDocstring |
python | justquick__django-activity-stream | runtests/testapp/models.py | {
"start": 464,
"end": 892
} | class ____(BaseUserManager):
def create_user(self, username, password=None):
user = self.model(username=username)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, password):
user = self.create_user(username, password=password)
user.is_superuser = True
user.save(using=self._db)
return user
| MyUserManager |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-amazon-sqs/destination_amazon_sqs/destination.py | {
"start": 376,
"end": 8052
} | class ____(Destination):
def queue_is_fifo(self, url: str) -> bool:
return url.endswith(".fifo")
def parse_queue_name(self, url: str) -> str:
return url.rsplit("/", 1)[-1]
def send_single_message(self, queue, message) -> dict:
return queue.send_message(**message)
def build_sqs_message(self, record, message_body_key=None):
data = None
if message_body_key:
data = record.data.get(message_body_key)
if data is None:
raise Exception("Message had no attribute of the configured Message Body Key: " + message_body_key)
else:
data = json.dumps(record.data)
message = {"MessageBody": data}
return message
def add_attributes_to_message(self, record, message):
attributes = {"airbyte_emitted_at": {"StringValue": str(record.emitted_at), "DataType": "String"}}
message["MessageAttributes"] = attributes
return message
def set_message_delay(self, message, message_delay):
message["DelaySeconds"] = message_delay
return message
# MessageGroupID and MessageDeduplicationID are required properties for FIFO queues
# https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html
def set_message_fifo_properties(self, message, message_group_id, use_content_dedupe=False):
# https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagegroupid-property.html
if not message_group_id:
raise Exception("Failed to build message - Message Group ID is required for FIFO queues")
else:
message["MessageGroupId"] = message_group_id
# https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/using-messagededuplicationid-property.html
if not use_content_dedupe:
message["MessageDeduplicationId"] = str(uuid4())
# TODO: Support getting MessageDeduplicationId from a key in the record
# if message_dedupe_id:
# message['MessageDeduplicationId'] = message_dedupe_id
return message
# TODO: Support batch send
# def send_batch_messages(messages, queue):
# entry = {
# 'Id': "1",
# 'MessageBody': str(record.data),
# }
# response = queue.send_messages(Entries=messages)
# if 'Successful' in response:
# for status in response['Successful']:
# print("Message sent: " + status['MessageId'])
# if 'Failed' in response:
# for status in response['Failed']:
# print("Message sent: " + status['MessageId'])
# https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessage.html
def write(
self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage]
) -> Iterable[AirbyteMessage]:
# Required propeties
queue_url = config["queue_url"]
queue_region = config["region"]
# TODO: Implement optional params for batch
# Optional Properties
# max_batch_size = config.get("max_batch_size", 10)
# send_as_batch = config.get("send_as_batch", False)
message_delay = config.get("message_delay")
message_body_key = config.get("message_body_key")
# FIFO Properties
message_group_id = config.get("message_group_id")
# Senstive Properties
access_key = config["access_key"]
secret_key = config["secret_key"]
session = boto3.Session(aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=queue_region)
sqs = session.resource("sqs")
queue = sqs.Queue(url=queue_url)
# TODO: Make access/secret key optional, support public access & profiles
# TODO: Support adding/setting attributes in the UI
# TODO: Support extract a specific path as message attributes
for message in input_messages:
if message.type == Type.RECORD:
sqs_message = self.build_sqs_message(message.record, message_body_key)
if message_delay:
sqs_message = self.set_message_delay(sqs_message, message_delay)
sqs_message = self.add_attributes_to_message(message.record, sqs_message)
if self.queue_is_fifo(queue_url):
use_content_dedupe = False if queue.attributes.get("ContentBasedDeduplication") == "false" else "true"
self.set_message_fifo_properties(sqs_message, message_group_id, use_content_dedupe)
self.send_single_message(queue, sqs_message)
if message.type == Type.STATE:
yield message
def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus:
try:
# Required propeties
queue_url = config["queue_url"]
logger.debug("Amazon SQS Destination Config Check - queue_url: " + queue_url)
queue_region = config["region"]
logger.debug("Amazon SQS Destination Config Check - region: " + queue_region)
# Senstive Properties
access_key = config["access_key"]
logger.debug("Amazon SQS Destination Config Check - access_key (ends with): " + access_key[-1])
secret_key = config["secret_key"]
logger.debug("Amazon SQS Destination Config Check - secret_key (ends with): " + secret_key[-1])
logger.debug("Amazon SQS Destination Config Check - Starting connection test ---")
session = boto3.Session(aws_access_key_id=access_key, aws_secret_access_key=secret_key, region_name=queue_region)
sqs = session.resource("sqs")
queue = sqs.Queue(url=queue_url)
if hasattr(queue, "attributes"):
logger.debug("Amazon SQS Destination Config Check - Connection test successful ---")
if self.queue_is_fifo(queue_url):
fifo = queue.attributes.get("FifoQueue", False)
if not fifo:
raise Exception("FIFO Queue URL set but Queue is not FIFO")
message_group_id = config.get("message_group_id")
if message_group_id is None:
raise Exception("Message Group ID is not set, but is required for FIFO Queues.")
# TODO: Support referencing an ID inside the Record to use as de-dupe ID
# message_dedupe_key = config.get("message_dedupe_key")
# content_dedupe = queue.attributes.get('ContentBasedDeduplication')
# if content_dedupe == "false":
# if message_dedupe_id is None:
# raise Exception("You must provide a Message Deduplication ID when ContentBasedDeduplication is not used.")
return AirbyteConnectionStatus(status=Status.SUCCEEDED)
else:
return AirbyteConnectionStatus(
status=Status.FAILED, message="Amazon SQS Destination Config Check - Could not connect to queue"
)
except ClientError as e:
return AirbyteConnectionStatus(
status=Status.FAILED, message=f"Amazon SQS Destination Config Check - Error in AWS Client: {str(e)}"
)
except Exception as e:
return AirbyteConnectionStatus(
status=Status.FAILED, message=f"Amazon SQS Destination Config Check - An exception occurred: {str(e)}"
)
| DestinationAmazonSqs |
python | huggingface__transformers | src/transformers/models/levit/modeling_levit.py | {
"start": 5278,
"end": 5757
} | class ____(nn.Module):
def __init__(self, stride, resolution):
super().__init__()
self.stride = stride
self.resolution = resolution
def forward(self, hidden_state):
batch_size, _, channels = hidden_state.shape
hidden_state = hidden_state.view(batch_size, self.resolution, self.resolution, channels)[
:, :: self.stride, :: self.stride
].reshape(batch_size, -1, channels)
return hidden_state
| LevitSubsample |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 87192,
"end": 88913
} | class ____(ModelOutput):
"""
Output type of [`Wav2Vec2ForXVector`].
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`):
Classification hidden states before AMSoftmax.
embeddings (`torch.FloatTensor` of shape `(batch_size, config.xvector_output_dim)`):
Utterance embeddings used for vector similarity-based retrieval.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
embeddings: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
| XVectorOutput |
python | pandas-dev__pandas | asv_bench/benchmarks/series_methods.py | {
"start": 5168,
"end": 5502
} | class ____:
params = [[10**3, 10**4, 10**5], [True, False]]
param_names = ["N", "dropna"]
def setup(self, N, dropna):
self.s = Series(np.random.randint(0, N, size=10 * N), dtype="Int64")
self.s.loc[1] = NA
def time_value_counts(self, N, dropna):
self.s.value_counts(dropna=dropna)
| ValueCountsEA |
python | cherrypy__cherrypy | cherrypy/test/logtest.py | {
"start": 790,
"end": 8574
} | class ____(object):
"""unittest.TestCase mixin for testing log messages.
logfile: a filename for the desired log. Yes, I know modes are evil,
but it makes the test functions so much cleaner to set this once.
lastmarker: the last marker in the log. This can be used to search for
messages since the last marker.
markerPrefix: a string with which to prefix log markers. This should be
unique enough from normal log output to use for marker identification.
"""
interactive = False
logfile = None
lastmarker = None
markerPrefix = b'test suite marker: '
def _handleLogError(self, msg, data, marker, pattern):
print('')
print(' ERROR: %s' % msg)
if not self.interactive:
raise pytest.fail(msg)
p = (
' Show: '
'[L]og [M]arker [P]attern; '
'[I]gnore, [R]aise, or sys.e[X]it >> '
)
sys.stdout.write(p + ' ')
# ARGH
sys.stdout.flush()
while True:
i = getchar().upper()
if i not in 'MPLIRX':
continue
print(i.upper()) # Also prints new line
if i == 'L':
for x, line in enumerate(data):
if (x + 1) % self.console_height == 0:
# The \r and comma should make the next line overwrite
sys.stdout.write('<-- More -->\r ')
m = getchar().lower()
# Erase our "More" prompt
sys.stdout.write(' \r ')
if m == 'q':
break
print(line.rstrip())
elif i == 'M':
print(repr(marker or self.lastmarker))
elif i == 'P':
print(repr(pattern))
elif i == 'I':
# return without raising the normal exception
return
elif i == 'R':
raise pytest.fail(msg)
elif i == 'X':
self.exit()
sys.stdout.write(p + ' ')
def exit(self):
"""Terminate the program."""
sys.exit()
def emptyLog(self):
"""Overwrite self.logfile with 0 bytes."""
with open(self.logfile, 'wb') as f:
f.write('')
def markLog(self, key=None):
"""Insert a marker line into the log and set self.lastmarker."""
if key is None:
key = str(time.time())
self.lastmarker = key
with open(self.logfile, 'ab+') as f:
f.write(b'%s%s\n' % (self.markerPrefix, key.encode('utf-8')))
def _read_marked_region(self, marker=None):
"""Return lines from self.logfile in the marked region.
If marker is None, self.lastmarker is used. If the log hasn't
been marked (using self.markLog), the entire log will be
returned.
"""
# Give the logger time to finish writing?
# time.sleep(0.5)
logfile = self.logfile
marker = marker or self.lastmarker
if marker is None:
with open(logfile, 'rb') as f:
return f.readlines()
if isinstance(marker, str):
marker = marker.encode('utf-8')
data = []
in_region = False
with open(logfile, 'rb') as f:
for line in f:
if in_region:
if (
line.startswith(self.markerPrefix)
and marker not in line
):
break
else:
data.append(line)
elif marker in line:
in_region = True
return data
def assertInLog(self, line, marker=None):
"""Fail if the given (partial) line is not in the log.
The log will be searched from the given marker to the next
marker. If marker is None, self.lastmarker is used. If the log
hasn't been marked (using self.markLog), the entire log will be
searched.
"""
data = self._read_marked_region(marker)
for logline in data:
if line in logline:
return
msg = '%r not found in log' % line
self._handleLogError(msg, data, marker, line)
def assertNotInLog(self, line, marker=None):
"""Fail if the given (partial) line is in the log.
The log will be searched from the given marker to the next
marker. If marker is None, self.lastmarker is used. If the log
hasn't been marked (using self.markLog), the entire log will be
searched.
"""
data = self._read_marked_region(marker)
for logline in data:
if line in logline:
msg = '%r found in log' % line
self._handleLogError(msg, data, marker, line)
def assertValidUUIDv4(self, marker=None):
"""Fail if the given UUIDv4 is not valid.
The log will be searched from the given marker to the next
marker. If marker is None, self.lastmarker is used. If the log
hasn't been marked (using self.markLog), the entire log will be
searched.
"""
data = self._read_marked_region(marker)
data = [
chunk.decode('utf-8').rstrip('\n').rstrip('\r') for chunk in data
]
for log_chunk in data:
try:
uuid_log = data[-1]
uuid_obj = UUID(uuid_log, version=4)
except (TypeError, ValueError):
pass # it might be in other chunk
else:
if str(uuid_obj) == uuid_log:
return
msg = '%r is not a valid UUIDv4' % uuid_log
self._handleLogError(msg, data, marker, log_chunk)
msg = 'UUIDv4 not found in log'
self._handleLogError(msg, data, marker, log_chunk)
def assertLog(self, sliceargs, lines, marker=None):
"""Fail if log.readlines()[sliceargs] is not contained in 'lines'.
The log will be searched from the given marker to the next
marker. If marker is None, self.lastmarker is used. If the log
hasn't been marked (using self.markLog), the entire log will be
searched.
"""
data = self._read_marked_region(marker)
if isinstance(sliceargs, int):
# Single arg. Use __getitem__ and allow lines to be str or list.
if isinstance(lines, (tuple, list)):
lines = lines[0]
if isinstance(lines, str):
lines = lines.encode('utf-8')
if lines not in data[sliceargs]:
msg = '%r not found on log line %r' % (lines, sliceargs)
self._handleLogError(
msg,
[data[sliceargs], '--EXTRA CONTEXT--']
+ data[sliceargs + 1 : sliceargs + 6],
marker,
lines,
)
else:
# Multiple args. Use __getslice__ and require lines to be list.
if isinstance(lines, tuple):
lines = list(lines)
elif isinstance(lines, text_or_bytes):
raise TypeError(
"The 'lines' arg must be a list when "
"'sliceargs' is a tuple.",
)
start, stop = sliceargs
for line, logline in zip(lines, data[start:stop]):
if isinstance(line, str):
line = line.encode('utf-8')
if line not in logline:
msg = '%r not found in log' % line
self._handleLogError(msg, data[start:stop], marker, line)
| LogCase |
python | pennersr__django-allauth | allauth/socialaccount/providers/eventbrite/views.py | {
"start": 217,
"end": 1052
} | class ____(OAuth2Adapter):
"""OAuth2Adapter for Eventbrite API v3."""
provider_id = "eventbrite"
authorize_url = "https://www.eventbrite.com/oauth/authorize"
access_token_url = "https://www.eventbrite.com/oauth/token" # nosec
profile_url = "https://www.eventbriteapi.com/v3/users/me/"
def complete_login(self, request, app, token, **kwargs):
"""Complete login."""
resp = (
get_adapter()
.get_requests_session()
.get(self.profile_url, params={"token": token.token})
)
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(EventbriteOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(EventbriteOAuth2Adapter)
| EventbriteOAuth2Adapter |
python | scipy__scipy | scipy/signal/tests/test_max_len_seq.py | {
"start": 202,
"end": 3318
} | class ____:
def test_mls_inputs(self):
# can't all be zero state
assert_raises(ValueError, max_len_seq,
10, state=np.zeros(10))
# wrong size state
assert_raises(ValueError, max_len_seq, 10,
state=np.ones(3))
# wrong length
assert_raises(ValueError, max_len_seq, 10, length=-1)
xp_assert_equal(max_len_seq(10, length=0)[0],
np.asarray([], dtype=np.int8)
)
# unknown taps
assert_raises(ValueError, max_len_seq, 64)
# bad taps
assert_raises(ValueError, max_len_seq, 10, taps=[-1, 1])
def test_mls_output(self):
# define some alternate working taps
alt_taps = {2: [1], 3: [2], 4: [3], 5: [4, 3, 2], 6: [5, 4, 1], 7: [4],
8: [7, 5, 3]}
# assume the other bit levels work, too slow to test higher orders...
for nbits in range(2, 8):
for state in [None, np.round(np.random.rand(nbits))]:
for taps in [None, alt_taps[nbits]]:
if state is not None and np.all(state == 0):
state[0] = 1 # they can't all be zero
orig_m = max_len_seq(nbits, state=state,
taps=taps)[0]
m = 2. * orig_m - 1. # convert to +/- 1 representation
# First, make sure we got all 1's or -1
err_msg = "mls had non binary terms"
xp_assert_equal(np.abs(m), np.ones_like(m),
err_msg=err_msg)
# Test via circular cross-correlation, which is just mult.
# in the frequency domain with one signal conjugated
tester = np.real(ifft(fft(m) * np.conj(fft(m))))
out_len = 2**nbits - 1
# impulse amplitude == test_len
err_msg = "mls impulse has incorrect value"
xp_assert_close(tester[0],
float(out_len),
err_msg=err_msg
)
# steady-state is -1
err_msg = "mls steady-state has incorrect value"
xp_assert_close(tester[1:],
np.full(out_len - 1, -1, dtype=tester.dtype),
err_msg=err_msg)
# let's do the split thing using a couple options
for n in (1, 2**(nbits - 1)):
m1, s1 = max_len_seq(nbits, state=state, taps=taps,
length=n)
m2, s2 = max_len_seq(nbits, state=s1, taps=taps,
length=1)
m3, s3 = max_len_seq(nbits, state=s2, taps=taps,
length=out_len - n - 1)
new_m = np.concatenate((m1, m2, m3))
xp_assert_equal(orig_m, new_m)
| TestMLS |
python | numba__numba | numba/tests/test_ssa.py | {
"start": 15636,
"end": 18974
} | class ____(MemoryLeakMixin, TestCase):
# This tests issues related to the SROA optimization done in lowering, which
# reduces time spent in the LLVM SROA pass. The optimization is related to
# SSA and tries to reduce the number of alloca statements for variables with
# only a single assignment.
def test_issue7258_multiple_assignment_post_SSA(self):
# This test adds a pass that will duplicate assignment statements to
# variables named "foobar".
# In the reported issue, the bug will cause a memory leak.
cloned = []
@register_pass(analysis_only=False, mutates_CFG=True)
class CloneFoobarAssignments(FunctionPass):
# A pass that clones variable assignments into "foobar"
_name = "clone_foobar_assignments_pass"
def __init__(self):
FunctionPass.__init__(self)
def run_pass(self, state):
mutated = False
for blk in state.func_ir.blocks.values():
to_clone = []
# find assignments to "foobar"
for assign in blk.find_insts(ir.Assign):
if assign.target.name == "foobar":
to_clone.append(assign)
# clone
for assign in to_clone:
clone = copy.deepcopy(assign)
blk.insert_after(clone, assign)
mutated = True
# keep track of cloned statements
cloned.append(clone)
return mutated
class CustomCompiler(CompilerBase):
def define_pipelines(self):
pm = DefaultPassBuilder.define_nopython_pipeline(
self.state, "custom_pipeline",
)
pm._finalized = False
# Insert the cloning pass after SSA
pm.add_pass_after(CloneFoobarAssignments, ReconstructSSA)
# Capture IR post lowering
pm.add_pass_after(PreserveIR, NativeLowering)
pm.finalize()
return [pm]
@njit(pipeline_class=CustomCompiler)
def udt(arr):
foobar = arr + 1 # this assignment will be cloned
return foobar
arr = np.arange(10)
# Verify that the function works as expected
self.assertPreciseEqual(udt(arr), arr + 1)
# Verify that the expected statement is cloned
self.assertEqual(len(cloned), 1)
self.assertEqual(cloned[0].target.name, "foobar")
# Verify in the Numba IR that the expected statement is cloned
nir = udt.overloads[udt.signatures[0]].metadata['preserved_ir']
self.assertEqual(len(nir.blocks), 1,
"only one block")
[blk] = nir.blocks.values()
assigns = blk.find_insts(ir.Assign)
foobar_assigns = [stmt for stmt in assigns
if stmt.target.name == "foobar"]
self.assertEqual(
len(foobar_assigns), 2,
"expected two assignment statements into 'foobar'",
)
self.assertEqual(
foobar_assigns[0], foobar_assigns[1],
"expected the two assignment statements to be the same",
)
| TestSROAIssues |
python | joke2k__faker | faker/providers/phone_number/fr_CH/__init__.py | {
"start": 49,
"end": 1065
} | class ____(PhoneNumberProvider):
formats = (
# source: https://de.wikipedia.org/wiki/Telefonnummer_(Schweiz)#Schreibweisen
"+41 2# ### ## ##",
"+41 3# ### ## ##",
"+41 4# ### ## ##",
"+41 5# ### ## ##",
"+41 6# ### ## ##",
"+41 7# ### ## ##",
"+41 8# ### ## ##",
"+41 9# ### ## ##",
"+41 (0)2# ### ## ##",
"+41 (0)3% ### ## ##",
"+41 (0)4% ### ## ##",
"+41 (0)5# ### ## ##",
"+41 (0)6# ### ## ##",
"+41 (0)7% ### ## ##",
"+41 (0)8# ### ## ##",
"+41 (0)9# ### ## ##",
"02# ### ## ##",
"03% ### ## ##",
"04% ### ## ##",
"05# ### ## ##",
"06# ### ## ##",
"07% ### ## ##",
"08# ### ## ##",
"09# ### ## ##",
# see: http://www.bakom.admin.ch/themen/telekom/00479/00607/index.html
"084# ### ###",
"0878 ### ###",
"0900 ### ###",
"0901 ### ###",
"0906 ### ###",
)
| Provider |
python | mlflow__mlflow | tests/models/test_model_input_examples.py | {
"start": 10908,
"end": 16510
} | class ____(BaseEstimator, ClassifierMixin):
def __init__(self, output_shape=(1,)):
self.output_shape = output_shape
def fit(self, X, y=None):
return self
def predict(self, X):
n_samples = X.shape[0]
full_output_shape = (n_samples,) + self.output_shape
return np.zeros(full_output_shape, dtype=np.dtype("int64"))
@pytest.mark.parametrize(
("input_is_tabular", "output_shape", "expected_signature"),
[
# When the input example is column-based, output 1D numpy arrays are interpreted `ColSpec`s
(
True,
(),
ModelSignature(
inputs=Schema([ColSpec(name="feature", type=DataType.string)]),
outputs=Schema([ColSpec(type=DataType.long)]),
),
),
# But if the output numpy array has higher dimensions, fallback to interpreting the model
# output as `TensorSpec`s.
(
True,
(2,),
ModelSignature(
inputs=Schema([ColSpec(name="feature", type=DataType.string)]),
outputs=Schema([TensorSpec(np.dtype("int64"), (-1, 2))]),
),
),
# If the input example is tensor-based, interpret output numpy arrays as `TensorSpec`s
(
False,
(),
ModelSignature(
inputs=Schema([TensorSpec(np.dtype("int64"), (-1, 1))]),
outputs=Schema([TensorSpec(np.dtype("int64"), (-1,))]),
),
),
],
)
def test_infer_signature_with_input_example(input_is_tabular, output_shape, expected_signature):
model = DummySklearnModel(output_shape=output_shape)
artifact_path = "model"
example = pd.DataFrame({"feature": ["value"]}) if input_is_tabular else np.array([[1]])
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(model, name=artifact_path, input_example=example)
mlflow_model = Model.load(model_info.model_uri)
assert mlflow_model.signature == expected_signature
def test_infer_signature_from_example_can_be_disabled():
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
DummySklearnModel(output_shape=()),
name=artifact_path,
input_example=np.array([[1]]),
signature=False,
)
mlflow_model = Model.load(model_info.model_uri)
assert mlflow_model.signature is None
def test_infer_signature_raises_if_predict_on_input_example_fails(monkeypatch):
monkeypatch.setenv("MLFLOW_TESTING", "false")
class ErrorModel(BaseEstimator, ClassifierMixin):
def fit(self, X, y=None):
return self
def predict(self, X):
raise Exception("oh no!")
with mock.patch("mlflow.models.model._logger.warning") as mock_warning:
with mlflow.start_run():
mlflow.sklearn.log_model(ErrorModel(), name="model", input_example=np.array([[1]]))
assert any(
"Failed to validate serving input example" in call[0][0]
for call in mock_warning.call_args_list
)
@pytest.fixture(scope="module")
def iris_model():
X, y = datasets.load_iris(return_X_y=True, as_frame=True)
return knn.KNeighborsClassifier().fit(X, y)
@pytest.mark.parametrize(
"input_example",
[
{
"sepal length (cm)": 5.1,
"sepal width (cm)": 3.5,
"petal length (cm)": 1.4,
"petal width (cm)": 0.2,
},
pd.DataFrame([[5.1, 3.5, 1.4, 0.2]]),
pd.DataFrame(
{
"sepal length (cm)": 5.1,
"sepal width (cm)": 3.5,
"petal length (cm)": 1.4,
"petal width (cm)": 0.2,
},
index=[0],
),
],
)
def test_infer_signature_on_multi_column_input_examples(input_example, iris_model):
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
iris_model, name=artifact_path, input_example=input_example
)
mlflow_model = Model.load(model_info.model_uri)
input_columns = mlflow_model.signature.inputs.inputs
assert len(input_columns) == 4
assert all(col.type == DataType.double for col in input_columns)
assert mlflow_model.signature.outputs == Schema([ColSpec(type=DataType.long)])
@pytest.mark.parametrize(
"input_example",
["some string", bytes([1, 2, 3])],
)
def test_infer_signature_on_scalar_input_examples(input_example):
class IdentitySklearnModel(BaseEstimator, ClassifierMixin):
def fit(self, X, y=None):
return self
def predict(self, X):
if isinstance(X, pd.DataFrame):
return X
raise Exception("Unsupported input type")
artifact_path = "model"
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(
IdentitySklearnModel(), name=artifact_path, input_example=input_example
)
mlflow_model = Model.load(model_info.model_uri)
signature = mlflow_model.signature
assert isinstance(signature, ModelSignature)
assert signature.inputs.inputs[0].name is None
t = DataType.string if isinstance(input_example, str) else DataType.binary
assert signature == ModelSignature(
inputs=Schema([ColSpec(type=t)]),
outputs=Schema([ColSpec(name=0, type=t)]),
)
# test that a single string still passes pyfunc schema enforcement
mlflow.pyfunc.load_model(model_info.model_uri).predict(input_example)
| DummySklearnModel |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_twodim_base.py | {
"start": 5959,
"end": 10446
} | class ____(TestCase):
def test_simple(self):
x = array([0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])
y = array([0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673])
xedges = np.linspace(0, 1, 10)
yedges = np.linspace(0, 1, 10)
H = histogram2d(x, y, (xedges, yedges))[0]
answer = array(
[
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
assert_array_equal(H.T, answer)
H = histogram2d(x, y, xedges)[0]
assert_array_equal(H.T, answer)
H, xedges, yedges = histogram2d(list(range(10)), list(range(10)))
assert_array_equal(H, eye(10, 10))
assert_array_equal(xedges, np.linspace(0, 9, 11))
assert_array_equal(yedges, np.linspace(0, 9, 11))
def test_asym(self):
x = array([1, 1, 2, 3, 4, 4, 4, 5])
y = array([1, 3, 2, 0, 1, 2, 3, 4])
H, xed, yed = histogram2d(x, y, (6, 5), range=[[0, 6], [0, 5]], density=True)
answer = array(
[
[0.0, 0, 0, 0, 0],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 1],
]
)
assert_array_almost_equal(H, answer / 8.0, 3)
assert_array_equal(xed, np.linspace(0, 6, 7))
assert_array_equal(yed, np.linspace(0, 5, 6))
def test_density(self):
x = array([1, 2, 3, 1, 2, 3, 1, 2, 3])
y = array([1, 1, 1, 2, 2, 2, 3, 3, 3])
H, xed, yed = histogram2d(x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], density=True)
answer = array([[1, 1, 0.5], [1, 1, 0.5], [0.5, 0.5, 0.25]]) / 9.0
assert_array_almost_equal(H, answer, 3)
def test_all_outliers(self):
r = np.random.rand(100) + 1.0 + 1e6 # histogramdd rounds by decimal=6
H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1]))
assert_array_equal(H, 0)
def test_empty(self):
a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1]))
# assert_array_max_ulp(a, array([[0.]]))
assert_allclose(a, np.array([[0.0]]), atol=1e-15)
a, edge1, edge2 = histogram2d([], [], bins=4)
# assert_array_max_ulp(a, np.zeros((4, 4)))
assert_allclose(a, np.zeros((4, 4)), atol=1e-15)
@xpassIfTorchDynamo_np # (reason="pytorch does not support bins = [int, array]")
def test_binparameter_combination(self):
x = array([0, 0.09207008, 0.64575234, 0.12875982, 0.47390599, 0.59944483, 1])
y = array([0, 0.14344267, 0.48988575, 0.30558665, 0.44700682, 0.15886423, 1])
edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
H, xe, ye = histogram2d(x, y, (edges, 4))
answer = array(
[
[2.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
assert_array_equal(H, answer)
assert_array_equal(ye, array([0.0, 0.25, 0.5, 0.75, 1]))
H, xe, ye = histogram2d(x, y, (4, edges))
answer = array(
[
[1.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0],
]
)
assert_array_equal(H, answer)
assert_array_equal(xe, array([0.0, 0.25, 0.5, 0.75, 1]))
@skip(reason="NP_VER: fails on CI with older NumPy")
@parametrize("x_len, y_len", [(10, 11), (20, 19)])
def test_bad_length(self, x_len, y_len):
x, y = np.ones(x_len), np.ones(y_len)
with pytest.raises(ValueError, match="x and y must have the same length."):
histogram2d(x, y)
| TestHistogram2d |
python | getsentry__sentry | tests/sentry/notifications/notification_action/test_issue_alert_registry_handlers.py | {
"start": 14188,
"end": 15865
} | class ____(BaseWorkflowTest):
def setUp(self) -> None:
super().setUp()
self.handler = SlackIssueAlertHandler()
self.detector = self.create_detector(project=self.project)
self.action = self.create_action(
type=Action.Type.SLACK,
integration_id="1234567890",
data={"tags": "environment,user", "notes": "Important alert"},
config={
"target_identifier": "channel789",
"target_display": "#general",
"target_type": ActionTarget.SPECIFIC,
},
)
def test_build_rule_action_blob(self) -> None:
"""Test that build_rule_action_blob creates correct Slack action data"""
blob = self.handler.build_rule_action_blob(self.action, self.organization.id)
assert blob == {
"id": "sentry.integrations.slack.notify_action.SlackNotifyServiceAction",
"workspace": "1234567890",
"channel_id": "channel789",
"channel": "#general",
"tags": "environment,user",
"notes": "Important alert",
}
def test_build_rule_action_blob_no_data(self) -> None:
"""Test that build_rule_action_blob handles missing data"""
self.action.data = {}
blob = self.handler.build_rule_action_blob(self.action, self.organization.id)
assert blob == {
"id": "sentry.integrations.slack.notify_action.SlackNotifyServiceAction",
"workspace": "1234567890",
"channel_id": "channel789",
"channel": "#general",
"tags": "",
"notes": "",
}
| TestSlackIssueAlertHandler |
python | arrow-py__arrow | arrow/locales.py | {
"start": 83660,
"end": 84997
} | class ____(Locale):
names = ["eu", "eu-eu"]
past = "duela {0}"
future = "{0}" # I don't know what's the right phrase in Basque for the future.
timeframes = {
"now": "Orain",
"second": "segundo bat",
"seconds": "{0} segundu",
"minute": "minutu bat",
"minutes": "{0} minutu",
"hour": "ordu bat",
"hours": "{0} ordu",
"day": "egun bat",
"days": "{0} egun",
"month": "hilabete bat",
"months": "{0} hilabet",
"year": "urte bat",
"years": "{0} urte",
}
month_names = [
"",
"urtarrilak",
"otsailak",
"martxoak",
"apirilak",
"maiatzak",
"ekainak",
"uztailak",
"abuztuak",
"irailak",
"urriak",
"azaroak",
"abenduak",
]
month_abbreviations = [
"",
"urt",
"ots",
"mar",
"api",
"mai",
"eka",
"uzt",
"abu",
"ira",
"urr",
"aza",
"abe",
]
day_names = [
"",
"astelehena",
"asteartea",
"asteazkena",
"osteguna",
"ostirala",
"larunbata",
"igandea",
]
day_abbreviations = ["", "al", "ar", "az", "og", "ol", "lr", "ig"]
| BasqueLocale |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/__init__.py | {
"start": 22783,
"end": 22982
} | class ____(TypeError):
def __init__(self, value):
super(NotTaggableError, self).__init__('{} is not taggable'.format(value))
@dataclasses.dataclass(**_tag_dataclass_kwargs)
| NotTaggableError |
python | huggingface__transformers | src/transformers/models/idefics/image_processing_idefics.py | {
"start": 2366,
"end": 9894
} | class ____(BaseImageProcessor):
r"""
Constructs a Idefics image processor.
Args:
image_size (`int`, *optional*, defaults to 224):
Resize to image size
image_mean (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
image_num_channels (`int`, *optional*, defaults to 3):
Number of image channels.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
"""
model_input_names = ["pixel_values"]
valid_kwargs = IdeficsImageProcessorKwargs
def __init__(
self,
image_size: int = 224,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
image_num_channels: Optional[int] = 3,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.image_size = image_size
self.image_num_channels = image_num_channels
self.image_mean = image_mean if image_mean is not None else IDEFICS_STANDARD_MEAN
self.image_std = image_std if image_std is not None else IDEFICS_STANDARD_STD
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
def preprocess(
self,
images: ImageInput,
image_num_channels: Optional[int] = 3,
image_size: Optional[dict[str, int]] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
transform: Optional[Callable] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
**kwargs,
) -> TensorType:
"""
Preprocess a batch of images.
Args:
images (`ImageInput`):
A list of images to preprocess.
image_size (`int`, *optional*, defaults to `self.image_size`):
Resize to image size
image_num_channels (`int`, *optional*, defaults to `self.image_num_channels`):
Number of image channels.
image_mean (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can
be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IDEFICS_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess`
method. Can be overridden by the `image_std` parameter in the `preprocess` method.
transform (`Callable`, *optional*, defaults to `None`):
A custom transform function that accepts a single image can be passed for training. For example,
`torchvision.Compose` can be used to compose multiple transforms. If `None` - an inference mode is
assumed - and then a preset of inference-specific transforms will be applied to the images
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
Returns:
a PyTorch tensor of the processed images
"""
image_size = image_size if image_size is not None else self.image_size
image_num_channels = image_num_channels if image_num_channels is not None else self.image_num_channels
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
size = (image_size, image_size)
if isinstance(images, list) and len(images) == 0:
return []
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
# For training a user needs to pass their own set of transforms as a Callable.
# For reference this is what was used in the original IDEFICS training:
# transform = transforms.Compose([
# convert_to_rgb,
# transforms.RandomResizedCrop((size, size), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),
# transforms.ToTensor(),
# transforms.Normalize(mean=image_mean, std=image_std),
# ])
if transform is not None:
if not is_torch_available():
raise ImportError("To pass in `transform` torch must be installed")
import torch
images = [transform(x) for x in images]
return torch.stack(images)
# for inference we do the exact transforms that were used to train IDEFICS
images = [convert_to_rgb(x) for x in images]
# further transforms expect numpy arrays
images = [to_numpy_array(x) for x in images]
images = [resize(x, size, resample=PILImageResampling.BICUBIC) for x in images]
images = [self.rescale(image=image, scale=rescale_factor) for image in images]
images = [self.normalize(x, mean=image_mean, std=image_std) for x in images]
images = [to_channel_dimension_format(x, ChannelDimension.FIRST) for x in images]
images = BatchFeature(data={"pixel_values": images}, tensor_type=return_tensors)["pixel_values"]
return images
__all__ = ["IdeficsImageProcessor"]
| IdeficsImageProcessor |
python | ray-project__ray | rllib/examples/envs/classes/windy_maze_env.py | {
"start": 2699,
"end": 5762
} | class ____(MultiAgentEnv):
def __init__(self, env_config):
super().__init__()
self.flat_env = WindyMazeEnv(env_config)
def reset(self, *, seed=None, options=None):
self.cur_obs, infos = self.flat_env.reset()
self.current_goal = None
self.steps_remaining_at_level = None
self.num_high_level_steps = 0
# current low level agent id. This must be unique for each high level
# step since agent ids cannot be reused.
self.low_level_agent_id = "low_level_{}".format(self.num_high_level_steps)
return {
"high_level_agent": self.cur_obs,
}, {"high_level_agent": infos}
def step(self, action_dict):
assert len(action_dict) == 1, action_dict
if "high_level_agent" in action_dict:
return self._high_level_step(action_dict["high_level_agent"])
else:
return self._low_level_step(list(action_dict.values())[0])
def _high_level_step(self, action):
logger.debug("High level agent sets goal")
self.current_goal = action
self.steps_remaining_at_level = 25
self.num_high_level_steps += 1
self.low_level_agent_id = "low_level_{}".format(self.num_high_level_steps)
obs = {self.low_level_agent_id: [self.cur_obs, self.current_goal]}
rew = {self.low_level_agent_id: 0}
done = truncated = {"__all__": False}
return obs, rew, done, truncated, {}
def _low_level_step(self, action):
logger.debug("Low level agent step {}".format(action))
self.steps_remaining_at_level -= 1
cur_pos = tuple(self.cur_obs[0])
goal_pos = self.flat_env._get_new_pos(cur_pos, self.current_goal)
# Step in the actual env
f_obs, f_rew, f_terminated, f_truncated, info = self.flat_env.step(action)
new_pos = tuple(f_obs[0])
self.cur_obs = f_obs
# Calculate low-level agent observation and reward
obs = {self.low_level_agent_id: [f_obs, self.current_goal]}
if new_pos != cur_pos:
if new_pos == goal_pos:
rew = {self.low_level_agent_id: 1}
else:
rew = {self.low_level_agent_id: -1}
else:
rew = {self.low_level_agent_id: 0}
# Handle env termination & transitions back to higher level.
terminated = {"__all__": False}
truncated = {"__all__": False}
if f_terminated or f_truncated:
terminated["__all__"] = f_terminated
truncated["__all__"] = f_truncated
logger.debug("high level final reward {}".format(f_rew))
rew["high_level_agent"] = f_rew
obs["high_level_agent"] = f_obs
elif self.steps_remaining_at_level == 0:
terminated[self.low_level_agent_id] = True
truncated[self.low_level_agent_id] = False
rew["high_level_agent"] = 0
obs["high_level_agent"] = f_obs
return obs, rew, terminated, truncated, {self.low_level_agent_id: info}
| HierarchicalWindyMazeEnv |
python | lxml__lxml | src/lxml/html/__init__.py | {
"start": 25160,
"end": 25218
} | class ____(HtmlMixin, etree.EntityBase):
pass
| HtmlEntity |
python | numba__numba | numba/_version.py | {
"start": 1645,
"end": 23620
} | class ____(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY: Dict[str, str] = {}
HANDLERS: Dict[str, Dict[str, Callable]] = {}
def register_vcs_handler(vcs, method): # decorator
"""Create decorator to mark a method as the handler of a VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
process = None
popen_kwargs = {}
if sys.platform == "win32":
# This hides the console window if pythonw.exe is used
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_kwargs["startupinfo"] = startupinfo
for command in commands:
try:
dispcmd = str([command] + args)
# remember shell=False, so use git.cmd on windows, not just git
process = subprocess.Popen([command] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None), **popen_kwargs)
break
except OSError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = process.communicate()[0].strip().decode()
if process.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, process.returncode
return stdout, process.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as fobj:
for line in fobj:
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except OSError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if "refnames" not in keywords:
raise NotThisMethod("Short version file found")
date = keywords.get("date")
if date is not None:
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG):] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r'\d', r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
# Filter out refs that exactly match prefix or that don't start
# with a number once the prefix is stripped (mostly a concern
# when prefix is '')
if not re.match(r'\d', r):
continue
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# GIT_DIR can interfere with correct operation of Versioneer.
# It may be intended to be passed to the Versioneer-versioned project,
# but that should not change where we get our version from.
env = os.environ.copy()
env.pop("GIT_DIR", None)
runner = functools.partial(runner, env=env)
_, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=not verbose)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = runner(GITS, [
"describe", "--tags", "--dirty", "--always", "--long",
"--match", f"{tag_prefix}[[:digit:]]*"
], cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root)
# --abbrev-ref was added in git-1.6.3
if rc != 0 or branch_name is None:
raise NotThisMethod("'git rev-parse --abbrev-ref' returned error")
branch_name = branch_name.strip()
if branch_name == "HEAD":
# If we aren't exactly on a branch, pick a branch which represents
# the current commit. If all else fails, we are on a branchless
# commit.
branches, rc = runner(GITS, ["branch", "--contains"], cwd=root)
# --contains was added in git-1.5.4
if rc != 0 or branches is None:
raise NotThisMethod("'git branch --contains' returned error")
branches = branches.split("\n")
# Remove the first line if we're running detached
if "(" in branches[0]:
branches.pop(0)
# Strip off the leading "* " from the list of branches.
branches = [branch[2:] for branch in branches]
if "master" in branches:
branch_name = "master"
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces["branch"] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparsable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root)
pieces["distance"] = len(out.split()) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip()
# Use only the last line. Previous lines may contain GPG signature
# information.
date = date.splitlines()[-1]
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_branch(pieces):
"""TAG[[.dev0]+DISTANCE.gHEX[.dirty]] .
The ".dev0" means not master branch. Note that .dev0 sorts backwards
(a feature branch will appear "older" than the master branch).
Exceptions:
1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0"
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def pep440_split_post(ver):
"""Split pep440 version string at the post-release segment.
Returns the release segments before the post-release and the
post-release version number (or -1 if no post-release segment is present).
"""
vc = str.split(ver, ".post")
return vc[0], int(vc[1] or 0) if len(vc) == 2 else None
def render_pep440_pre(pieces):
"""TAG[.postN.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post0.devDISTANCE
"""
if pieces["closest-tag"]:
if pieces["distance"]:
# update the post release segment
tag_version, post_version = pep440_split_post(pieces["closest-tag"])
rendered = tag_version
if post_version is not None:
rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"])
else:
rendered += ".post0.dev%d" % (pieces["distance"])
else:
# no commits, use the tag as the version
rendered = pieces["closest-tag"]
else:
# exception #1
rendered = "0.post0.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_post_branch(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] .
The ".dev0" means not master branch.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["branch"] != "master":
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-branch":
rendered = render_pep440_branch(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-post-branch":
rendered = render_pep440_post_branch(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for _ in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| NotThisMethod |
python | kubernetes-client__python | kubernetes/client/models/v1alpha2_lease_candidate.py | {
"start": 383,
"end": 6764
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1alpha2LeaseCandidateSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1alpha2LeaseCandidate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1alpha2LeaseCandidate. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha2LeaseCandidate. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha2LeaseCandidate.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha2LeaseCandidate. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1alpha2LeaseCandidate. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha2LeaseCandidate. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha2LeaseCandidate.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha2LeaseCandidate. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha2LeaseCandidate. # noqa: E501
:return: The metadata of this V1alpha2LeaseCandidate. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha2LeaseCandidate.
:param metadata: The metadata of this V1alpha2LeaseCandidate. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1alpha2LeaseCandidate. # noqa: E501
:return: The spec of this V1alpha2LeaseCandidate. # noqa: E501
:rtype: V1alpha2LeaseCandidateSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1alpha2LeaseCandidate.
:param spec: The spec of this V1alpha2LeaseCandidate. # noqa: E501
:type: V1alpha2LeaseCandidateSpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha2LeaseCandidate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha2LeaseCandidate):
return True
return self.to_dict() != other.to_dict()
| V1alpha2LeaseCandidate |
python | chroma-core__chroma | chromadb/test/data_loader/test_data_loader.py | {
"start": 461,
"end": 3708
} | class ____(DataLoader[List[Optional[Image]]]):
def __call__(self, uris: Sequence[Optional[URI]]) -> List[Optional[Image]]:
# Convert each URI to a numpy array
return [None if uri is None else encode_data(uri) for uri in uris]
def record_set_with_uris(n: int = 3) -> Dict[str, Union[IDs, Documents, URIs]]:
return {
"ids": [f"{i}" for i in range(n)],
"documents": [f"document_{i}" for i in range(n)],
"uris": [f"uri_{i}" for i in range(n)],
}
@pytest.fixture()
def collection_with_data_loader(
client: ClientAPI,
) -> Generator[chromadb.Collection, None, None]:
reset(client)
collection = client.create_collection(
name="collection_with_data_loader",
data_loader=DefaultDataLoader(),
embedding_function=hashing_multimodal_ef(),
)
yield collection
client.delete_collection(collection.name)
@pytest.fixture
def collection_without_data_loader(
client: ClientAPI,
) -> Generator[chromadb.Collection, None, None]:
reset(client)
collection = client.create_collection(
name="collection_without_data_loader",
embedding_function=hashing_multimodal_ef(),
)
yield collection
client.delete_collection(collection.name)
def test_without_data_loader(
collection_without_data_loader: chromadb.Collection,
n_examples: int = 3,
) -> None:
record_set = record_set_with_uris(n=n_examples)
# Can't embed data in URIs without a data loader
with pytest.raises(ValueError):
collection_without_data_loader.add(
ids=record_set["ids"],
uris=record_set["uris"],
)
# Can't get data from URIs without a data loader
with pytest.raises(ValueError):
collection_without_data_loader.get(include=["data"])
def test_without_uris(
collection_with_data_loader: chromadb.Collection, n_examples: int = 3
) -> None:
record_set = record_set_with_uris(n=n_examples)
collection_with_data_loader.add(
ids=record_set["ids"],
documents=record_set["documents"],
)
get_result = collection_with_data_loader.get(include=["data"])
assert get_result["data"] is not None
for data in get_result["data"]:
assert data is None
def test_data_loader(
collection_with_data_loader: chromadb.Collection, n_examples: int = 3
) -> None:
record_set = record_set_with_uris(n=n_examples)
collection_with_data_loader.add(
ids=record_set["ids"],
uris=record_set["uris"],
)
# Get with "data"
get_result = collection_with_data_loader.get(include=["data"])
assert get_result["data"] is not None
for i, data in enumerate(get_result["data"]):
assert data is not None
assert data == encode_data(record_set["uris"][i])
# Query by URI
query_result = collection_with_data_loader.query(
query_uris=record_set["uris"],
n_results=len(record_set["uris"][0]),
include=["data", "uris"],
)
assert query_result["data"] is not None
for i, data in enumerate(query_result["data"][0]):
assert data is not None
assert query_result["uris"] is not None
assert data == encode_data(query_result["uris"][0][i])
| DefaultDataLoader |
python | kamyu104__LeetCode-Solutions | Python/shortest-uncommon-substring-in-an-array.py | {
"start": 42,
"end": 1636
} | class ____(object):
def shortestSubstrings(self, arr):
"""
:type arr: List[str]
:rtype: List[str]
"""
class Trie(object):
def __init__(self):
self.__nodes = []
self.__cnts = []
self.__new_node()
def __new_node(self):
self.__nodes.append([-1]*26)
self.__cnts.append(0)
return len(self.__nodes)-1
def add(self, s, d):
for i in xrange(len(s)):
curr = 0
for j in xrange(i, len(s)):
x = ord(s[j])-ord('a')
if self.__nodes[curr][x] == -1:
self.__nodes[curr][x] = self.__new_node()
curr = self.__nodes[curr][x]
self.__cnts[curr] += d
def query(self, s):
result = (float("inf"), "")
for i in xrange(len(s)):
curr = 0
for j in xrange(i, len(s)):
curr = self.__nodes[curr][ord(s[j])-ord('a')]
if self.__cnts[curr] == 0:
result = min(result, (j-i+1, s[i:j+1]))
break
return result[1]
trie = Trie()
for x in arr:
trie.add(x, +1)
result = []
for x in arr:
trie.add(x, -1)
result.append(trie.query(x))
trie.add(x, +1)
return result
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 475855,
"end": 476791
} | class ____(sgqlc.types.Type):
"""Represents a range of information from a Git blame."""
__schema__ = github_schema
__field_names__ = ("age", "commit", "ending_line", "starting_line")
age = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="age")
"""Identifies the recency of the change, from 1 (new) to 10 (old).
This is calculated as a 2-quantile and determines the length of
distance between the median age of all the changes in the file and
the recency of the current range's change.
"""
commit = sgqlc.types.Field(sgqlc.types.non_null("Commit"), graphql_name="commit")
"""Identifies the line author"""
ending_line = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="endingLine")
"""The ending line for the range"""
starting_line = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="startingLine")
"""The starting line for the range"""
| BlameRange |
python | pytorch__pytorch | torch/distributed/elastic/events/api.py | {
"start": 488,
"end": 626
} | class ____(str, Enum):
"""Known identifiers of the event producers."""
AGENT = "AGENT"
WORKER = "WORKER"
@dataclass
| EventSource |
python | facebookresearch__faiss | tests/test_build_blocks.py | {
"start": 2160,
"end": 3515
} | class ____(unittest.TestCase):
def test_recons_orthonormal(self):
lt = faiss.LinearTransform(20, 10, True)
rs = np.random.RandomState(10)
A, _ = np.linalg.qr(rs.randn(20, 20))
A = A[:10].astype('float32')
faiss.copy_array_to_vector(A.ravel(), lt.A)
faiss.copy_array_to_vector(rs.randn(10).astype('float32'), lt.b)
lt.set_is_orthonormal()
lt.is_trained = True
assert lt.is_orthonormal
x = rs.rand(30, 20).astype('float32')
xt = lt.apply_py(x)
xtt = lt.reverse_transform(xt)
xttt = lt.apply_py(xtt)
err = ((xt - xttt)**2).sum()
self.assertGreater(1e-5, err)
def test_recons_orthogonal_impossible(self):
lt = faiss.LinearTransform(20, 10, True)
rs = np.random.RandomState(10)
A = rs.randn(10 * 20).astype('float32')
faiss.copy_array_to_vector(A.ravel(), lt.A)
faiss.copy_array_to_vector(rs.randn(10).astype('float32'), lt.b)
lt.is_trained = True
lt.set_is_orthonormal()
assert not lt.is_orthonormal
x = rs.rand(30, 20).astype('float32')
xt = lt.apply_py(x)
try:
lt.reverse_transform(xt)
except Exception:
pass
else:
self.assertFalse('should do an exception')
| TestOrthogonalReconstruct |
python | readthedocs__readthedocs.org | readthedocs/proxito/exceptions.py | {
"start": 2964,
"end": 3548
} | class ____(ContextualizedHttp404):
"""Raised if a subproject was not found."""
template_name = "errors/proxito/404/no_project.html"
not_found_subject = pgettext_lazy("Names an object not found in a 404 error", "subproject")
def __init__(self, project, **kwargs):
"""
Raised if a subproject was not found.
:param project: The project in which the subproject could not be found
:param kwargs: Context dictionary of the rendered template
"""
kwargs["project"] = project
super().__init__(**kwargs)
| SubprojectHttp404 |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 37186,
"end": 37369
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("GENERAL", "MALWARE")
| SecurityAdvisoryClassification |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 68087,
"end": 68676
} | class ____(BiffRecord):
"""
This record is part of the Calculation Settings Block.
It stores which method is used to show cell addresses in formulas.
The “RC” mode uses numeric indexes for rows and columns,
i.e. “R(1)C(-1)”, or “R1C1:R2C2”.
The “A1” mode uses characters for columns and numbers for rows,
i.e. “B1”, or “$A$1:$B$2”.
Record REFMODE, BIFF2-BIFF8:
Offset Size Contents
0 2 0 = RC mode; 1 = A1 mode
"""
_REC_ID = 0x00F
def __init__(self, ref_mode):
self._rec_data = pack('<H', ref_mode)
| RefModeRecord |
python | joke2k__faker | tests/providers/test_person.py | {
"start": 22370,
"end": 22665
} | class ____(unittest.TestCase):
"""Tests person in the es locale."""
def setUp(self):
self.fake = Faker("es")
Faker.seed(0)
def test_language_name(self):
language_name = self.fake.language_name()
assert language_name in EsProvider.language_names
| TestEs |
python | django__django | django/contrib/admin/widgets.py | {
"start": 1651,
"end": 1933
} | class ____:
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
context["widget"]["attrs"][
"aria-describedby"
] = f"id_{name}_timezone_warning_helptext"
return context
| DateTimeWidgetContextMixin |
python | eth-brownie__brownie | brownie/utils/docopt.py | {
"start": 14360,
"end": 25129
} | class ____(list):
def __init__(
self,
source: list[str] | str,
error: Type[DocoptExit] | Type[DocoptLanguageError] = DocoptExit,
) -> None:
self += source if isinstance(source, list) else source.split()
self.error = error
@staticmethod
def from_pattern(source: str) -> _Tokens:
source = regex_sub(r"([\[\]\(\)\|]|\.\.\.)", r" \1 ", source)
fragments = [s for s in re.split(r"\s+|(\S*<.*?>)", source) if s]
return _Tokens(fragments, error=DocoptLanguageError)
def move(self) -> str | None:
return self.pop(0) if len(self) else None
def current(self) -> str | None:
return self[0] if len(self) else None
def _parse_longer(
tokens: _Tokens,
options: list[_Option],
argv: bool = False,
more_magic: bool = False,
) -> list[_Pattern]:
"""longer ::= '--' chars [ ( ' ' | '=' ) chars ] ;"""
current_token = tokens.move()
if current_token is None or not current_token.startswith("--"):
raise ValueError(f"parse_longer got what appears to be an invalid token: {current_token}")
longer, maybe_eq, maybe_value = current_token.partition("=")
value = None if maybe_eq == maybe_value == "" else maybe_value
similar = [o for o in options if o.longer and longer == o.longer]
start_collision = (
len([o for o in options if o.longer and longer in o.longer and o.longer.startswith(longer)])
> 1
)
if argv and not len(similar) and not start_collision:
similar = [
o for o in options if o.longer and longer in o.longer and o.longer.startswith(longer)
]
# try advanced matching
if more_magic and not similar:
corrected = [
(longer, o) for o in options if o.longer and levenshtein_norm(longer, o.longer) < 0.25
]
if corrected:
print(f"NB: Corrected {corrected[0][0]} to {corrected[0][1].longer}")
similar = [correct for (original, correct) in corrected]
if len(similar) > 1:
raise DocoptLanguageError(f"{longer} is not a unique prefix: {similar}?")
elif not similar:
argcount = 1 if maybe_eq == "=" else 0
o = _Option(None, longer, argcount)
options.append(o)
if tokens.error is DocoptExit:
o = _Option(None, longer, argcount, value if argcount else True)
else:
o = _Option(similar[0].short, similar[0].longer, similar[0].argcount, similar[0].value)
if o.argcount == 0:
if value is not None:
raise tokens.error(f"{o.longer} must not have an argument")
elif value is None:
if tokens.current() in [None, "--"]:
raise tokens.error(f"{o.longer} requires argument")
value = tokens.move()
if tokens.error is DocoptExit:
o.value = value if value is not None else True
return [o]
def _parse_shorts(
tokens: _Tokens, options: list[_Option], more_magic: bool = False
) -> list[_Pattern]:
"""shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ;"""
token = tokens.move()
if token is None or not token.startswith("-") or token.startswith("--"):
raise ValueError(f"parse_shorts got what appears to be an invalid token: {token}")
left = token.lstrip("-")
parsed: list[_Pattern] = []
while left != "":
short, left = f"-{left[0]}", left[1:]
transformations: dict[str | None, Callable[[str], str]] = {None: lambda x: x}
if more_magic:
transformations["lowercase"] = lambda x: x.lower()
transformations["uppercase"] = lambda x: x.upper()
# try identity, lowercase, uppercase, iff such resolves uniquely
# (ie if upper and lowercase are not both defined)
similar: list[_Option] = []
de_abbreviated = False
for transform_name, transform in transformations.items():
shortened = list(filter(None, (o.short for o in options)))
transformed = list(set(map(transform, shortened)))
no_collisions = sum(
filter(lambda ct: ct == 1, map(transformed.count, map(transform, shortened)))
) # == len(transformed)
if no_collisions:
similar = [o for o in options if o.short and transform(o.short) == transform(short)]
if similar:
if transform_name:
print(
f"NB: Corrected {short} to {similar[0].short} " f"via {transform_name}"
)
break
# if transformations do not resolve, try abbreviations of 'longer' forms
# iff such resolves uniquely (ie if no two longer forms begin with the
# same letter)
if not similar and more_magic:
abbreviated = [
transform(o.longer[1:3]) for o in options if o.longer and not o.short
] + [transform(o.short) for o in options if o.short and not o.longer]
nonredundantly_abbreviated_options = [
o for o in options if o.longer and abbreviated.count(short) == 1
]
no_collisions = len(nonredundantly_abbreviated_options) == len(abbreviated)
if no_collisions:
for o in options:
if (
not o.short
and o.longer
and transform(short) == transform(o.longer[1:3])
):
similar = [o]
print(
f"NB: Corrected {short} to {similar[0].longer} "
f"via abbreviation (case change: {transform_name})"
)
break
if len(similar):
de_abbreviated = True
break
if len(similar) > 1:
raise DocoptLanguageError(f"{short} is specified ambiguously {len(similar)} times")
elif len(similar) < 1:
o = _Option(short, None, 0)
options.append(o)
if tokens.error is DocoptExit:
o = _Option(short, None, 0, True)
else:
option_short_value = None if de_abbreviated else transform(short)
o = _Option(
option_short_value,
similar[0].longer,
similar[0].argcount,
similar[0].value,
)
value = None
current_token = tokens.current()
if o.argcount != 0:
if left == "":
if current_token is None or current_token == "--":
raise tokens.error(f"{short} requires argument")
else:
value = tokens.move()
else:
value = left
left = ""
if tokens.error is DocoptExit:
o.value = value if value is not None else True
parsed.append(o)
return parsed
def _parse_pattern(source: str, options: list[_Option]) -> _Required:
tokens = _Tokens.from_pattern(source)
result = _parse_expr(tokens, options)
if tokens.current() is not None:
raise tokens.error("unexpected ending: %r" % " ".join(tokens))
return _Required(*result)
def _parse_expr(tokens: _Tokens, options: list[_Option]) -> list[_Pattern]:
"""expr ::= seq ( '|' seq )* ;"""
result: list[_Pattern] = []
seq_0: list[_Pattern] = _parse_seq(tokens, options)
if tokens.current() != "|":
return seq_0
if len(seq_0) > 1:
result.append(_Required(*seq_0))
else:
result += seq_0
while tokens.current() == "|":
tokens.move()
seq_1 = _parse_seq(tokens, options)
result += [_Required(*seq_1)] if len(seq_1) > 1 else seq_1
return [_Either(*result)]
def _parse_seq(tokens: _Tokens, options: list[_Option]) -> list[_Pattern]:
"""seq ::= ( atom [ '...' ] )* ;"""
result: list[_Pattern] = []
while tokens.current() not in [None, "]", ")", "|"]:
atom = _parse_atom(tokens, options)
if tokens.current() == "...":
atom = [_OneOrMore(*atom)]
tokens.move()
result += atom
return result
def _parse_atom(tokens: _Tokens, options: list[_Option]) -> list[_Pattern]:
"""atom ::= '(' expr ')' | '[' expr ']' | 'options'
| longer | shorts | argument | command ;
"""
token = tokens.current()
if not token:
return [_Command(tokens.move())] # pragma: no cover
elif token in "([":
tokens.move()
matching = {"(": ")", "[": "]"}[token]
pattern = {"(": _Required, "[": _NotRequired}[token]
matched_pattern = pattern(*_parse_expr(tokens, options))
if tokens.move() != matching:
raise tokens.error(f"unmatched '{token}'")
return [matched_pattern]
elif token == "options":
tokens.move()
return [_OptionsShortcut()]
elif token.startswith("--") and token != "--":
return _parse_longer(tokens, options)
elif token.startswith("-") and token not in ("-", "--"):
return _parse_shorts(tokens, options)
elif token.startswith("<") and token.endswith(">") or token.isupper():
return [_Argument(tokens.move())]
else:
return [_Command(tokens.move())]
def _parse_argv(
tokens: _Tokens,
options: list[_Option],
options_first: bool = False,
more_magic: bool = False,
) -> list[_Pattern]:
"""Parse command-line argument vector.
If options_first:
argv ::= [ longer | shorts ]* [ argument ]* [ '--' [ argument ]* ] ;
else:
argv ::= [ longer | shorts | argument ]* [ '--' [ argument ]* ] ;
"""
def isanumber(x):
try:
float(x)
return True
except ValueError:
return False
parsed: list[_Pattern] = []
current_token = tokens.current()
while current_token is not None:
if current_token == "--":
return parsed + [_Argument(None, v) for v in tokens]
elif current_token.startswith("--"):
parsed += _parse_longer(tokens, options, argv=True, more_magic=more_magic)
elif (
current_token.startswith("-") and current_token != "-" and not isanumber(current_token)
):
parsed += _parse_shorts(tokens, options, more_magic=more_magic)
elif options_first:
return parsed + [_Argument(None, v) for v in tokens]
else:
parsed.append(_Argument(None, tokens.move()))
current_token = tokens.current()
return parsed
| _Tokens |
python | celery__celery | t/unit/app/test_amqp.py | {
"start": 5516,
"end": 6560
} | class ____:
@pytest.mark.parametrize('name,exchange,rkey', [
('default', 'foo', None),
('default', 'foo', 'routing_key'),
])
def test_setting_default_exchange(self, name, exchange, rkey):
q = Queue(name, routing_key=rkey)
self.app.conf.task_queues = {q}
self.app.conf.task_default_exchange = exchange
queues = dict(self.app.amqp.queues)
queue = queues[name]
assert queue.exchange.name == exchange
@pytest.mark.parametrize('name,extype,rkey', [
('default', 'direct', None),
('default', 'direct', 'routing_key'),
('default', 'topic', None),
('default', 'topic', 'routing_key'),
])
def test_setting_default_exchange_type(self, name, extype, rkey):
q = Queue(name, routing_key=rkey)
self.app.conf.task_queues = {q}
self.app.conf.task_default_exchange_type = extype
queues = dict(self.app.amqp.queues)
queue = queues[name]
assert queue.exchange.type == extype
| test_default_exchange |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/check_ops_test.py | {
"start": 35391,
"end": 37217
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_negative(self):
freddie = constant_op.constant([-1, -2], name="freddie")
with self.assertRaisesOpError("fail"):
with ops.control_dependencies(
[check_ops.assert_positive(
freddie, message="fail")]):
out = array_ops.identity(freddie)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_positive(self):
remmy = constant_op.constant([1, 2], name="remmy")
with ops.control_dependencies([check_ops.assert_positive(remmy)]):
out = array_ops.identity(remmy)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_zero(self):
meechum = constant_op.constant([0], name="meechum")
with self.assertRaisesOpError("x > 0 did not hold"):
with ops.control_dependencies([check_ops.assert_positive(meechum)]):
out = array_ops.identity(meechum)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_doesnt_raise(self):
# A tensor is positive when it satisfies:
# For every element x_i in x, x_i > 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_positive(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
def test_static_check_in_graph_mode(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Custom error message"):
check_ops.assert_positive(-1, message="Custom error message")
| AssertPositiveTest |
python | spyder-ide__spyder | spyder/utils/snippets/nodes.py | {
"start": 11018,
"end": 11845
} | class ____(VariableSnippetNode):
"""
Node that represents a variable placeholder snippet.
This node represents the expression ${var: placeholder}, where placeholder
can be a snippet or text.
"""
KIND = SnippetKind.VARIABLE_PLACEHOLDER
def __init__(self, variable, placeholder):
VariableSnippetNode.__init__(self, variable)
self._placeholder = placeholder
def update(self, placeholder):
self._placeholder = placeholder
def text(self):
if isinstance(self._placeholder, str):
return self._placeholder
elif isinstance(self._placeholder, ASTNode):
# FIXME: Implement placeholder composition once
# microsoft/language-server-protocol#801 is clarified
return self._placeholder.text()
| VariablePlaceholderNode |
python | PrefectHQ__prefect | src/prefect/server/events/actions.py | {
"start": 59939,
"end": 61294
} | class ____(AutomationAction, ExternalDataAction):
_action_description: ClassVar[str]
async def act(self, triggered_action: "TriggeredAction") -> None:
automation_id = await self.automation_id_to_use(triggered_action)
self._resulting_related_resources += [
RelatedResource.model_validate(
{
"prefect.resource.id": f"prefect.automation.{automation_id}",
"prefect.resource.role": "target",
}
)
]
logger.info(
self._action_description,
extra={
"automation_id": automation_id,
**self.logging_context(triggered_action),
},
)
async with await self.events_api_client(triggered_action) as events:
response = await self.command(events, automation_id, triggered_action)
self._result_details["status_code"] = response.status_code
if response.status_code >= 300:
raise ActionFailed(self.reason_from_response(response))
@abc.abstractmethod
async def command(
self,
events: PrefectServerEventsAPIClient,
automation_id: UUID,
triggered_action: "TriggeredAction",
) -> Response:
"""Issue the command to the Work Queue"""
| AutomationCommandAction |
python | huggingface__transformers | src/transformers/models/altclip/configuration_altclip.py | {
"start": 840,
"end": 6478
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`AltCLIPTextModel`]. It is used to instantiate a
AltCLIP text model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the AltCLIP
[BAAI/AltCLIP](https://huggingface.co/BAAI/AltCLIP) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 250002):
Vocabulary size of the AltCLIP model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`AltCLIPTextModel`].
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 514):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 1):
The vocabulary size of the `token_type_ids` passed when calling [`AltCLIPTextModel`]
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 0.02):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
pad_token_id (`int`, *optional*, defaults to 1): The id of the *padding* token.
bos_token_id (`int`, *optional*, defaults to 0): The id of the *beginning-of-sequence* token.
eos_token_id (`Union[int, list[int]]`, *optional*, defaults to 2):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
project_dim (`int`, *optional*, defaults to 768):
The dimensions of the teacher model before the mapping layer.
Examples:
```python
>>> from transformers import AltCLIPTextModel, AltCLIPTextConfig
>>> # Initializing a AltCLIPTextConfig with BAAI/AltCLIP style configuration
>>> configuration = AltCLIPTextConfig()
>>> # Initializing a AltCLIPTextModel (with random weights) from the BAAI/AltCLIP style configuration
>>> model = AltCLIPTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "altclip_text_model"
def __init__(
self,
vocab_size=250002,
hidden_size=1024,
num_hidden_layers=24,
num_attention_heads=16,
intermediate_size=4096,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=514,
type_vocab_size=1,
initializer_range=0.02,
initializer_factor=0.02,
layer_norm_eps=1e-05,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
use_cache=True,
project_dim=768,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.project_dim = project_dim
| AltCLIPTextConfig |
python | ethereum__web3.py | web3/middleware/gas_price_strategy.py | {
"start": 2355,
"end": 3784
} | class ____(Web3Middleware):
"""
- Uses a gas price strategy if one is set. This is only supported for
legacy transactions. It is recommended to send dynamic fee transactions
(EIP-1559) whenever possible.
- Validates transaction params against legacy and dynamic fee txn values.
"""
def request_processor(self, method: RPCEndpoint, params: Any) -> Any:
if method == "eth_sendTransaction":
transaction = params[0]
generated_gas_price = self._w3.eth.generate_gas_price(transaction)
w3 = cast("Web3", self._w3)
latest_block = w3.eth.get_block("latest")
transaction = validate_transaction_params(
transaction, latest_block, generated_gas_price
)
params = (transaction,)
return method, params
# -- async -- #
async def async_request_processor(self, method: RPCEndpoint, params: Any) -> Any:
if method == "eth_sendTransaction":
transaction = params[0]
w3 = cast("AsyncWeb3[Any]", self._w3)
generated_gas_price = w3.eth.generate_gas_price(transaction)
latest_block = await w3.eth.get_block("latest")
transaction = validate_transaction_params(
transaction, latest_block, generated_gas_price
)
params = (transaction,)
return method, params
| GasPriceStrategyMiddleware |
python | Pylons__pyramid | docs/tutorials/wiki/src/authorization/tutorial/security.py | {
"start": 661,
"end": 1989
} | class ____:
def __init__(self, secret):
self.authtkt = AuthTktCookieHelper(secret)
self.acl = ACLHelper()
def identity(self, request):
identity = self.authtkt.identify(request)
if identity is not None and identity['userid'] in USERS:
return identity
def authenticated_userid(self, request):
identity = self.identity(request)
if identity is not None:
return identity['userid']
def remember(self, request, userid, **kw):
return self.authtkt.remember(request, userid, **kw)
def forget(self, request, **kw):
return self.authtkt.forget(request, **kw)
def permits(self, request, context, permission):
principals = self.effective_principals(request)
return self.acl.permits(context, principals, permission)
def effective_principals(self, request):
principals = [Everyone]
identity = self.identity(request)
if identity is not None:
principals.append(Authenticated)
principals.append('u:' + identity['userid'])
principals.extend(GROUPS.get(identity['userid'], []))
return principals
def includeme(config):
settings = config.get_settings()
config.set_security_policy(MySecurityPolicy(settings['auth.secret']))
| MySecurityPolicy |
python | numba__numba | numba/cuda/tests/cudapy/test_alignment.py | {
"start": 133,
"end": 1218
} | class ____(CUDATestCase):
def test_record_alignment(self):
rec_dtype = np.dtype([('a', 'int32'), ('b', 'float64')], align=True)
rec = from_dtype(rec_dtype)
@cuda.jit((rec[:],))
def foo(a):
i = cuda.grid(1)
a[i].a = a[i].b
a_recarray = np.recarray(3, dtype=rec_dtype)
for i in range(a_recarray.size):
a_rec = a_recarray[i]
a_rec.a = 0
a_rec.b = (i + 1) * 123
foo[1, 3](a_recarray)
self.assertTrue(np.all(a_recarray.a == a_recarray.b))
@skip_on_cudasim('Simulator does not check alignment')
def test_record_alignment_error(self):
rec_dtype = np.dtype([('a', 'int32'), ('b', 'float64')])
rec = from_dtype(rec_dtype)
with self.assertRaises(Exception) as raises:
@cuda.jit((rec[:],))
def foo(a):
i = cuda.grid(1)
a[i].a = a[i].b
self.assertTrue('type float64 is not aligned' in str(raises.exception))
if __name__ == '__main__':
unittest.main()
| TestAlignment |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/sensors/dataflow.py | {
"start": 12083,
"end": 18079
} | class ____(BaseSensorOperator):
"""
Checks for job messages associated with a single job in Google Cloud Dataflow.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataflowJobMessagesSensor`
:param job_id: ID of the Dataflow job to be checked.
:param callback: a function that can accept a list of serialized job messages.
It can do whatever you want it to do. If the callback function is not provided,
then on successful completion the task will exit with True value.
For more info about the job message content see:
https://cloud.google.com/python/docs/reference/dataflow/latest/google.cloud.dataflow_v1beta3.types.JobMessage
:param fail_on_terminal_state: If set to True the sensor will raise an exception when the job reaches a terminal state.
No job messages will be returned.
:param project_id: Optional, the Google Cloud project ID in which to start a job.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param location: The location of the Dataflow job (for example europe-west1).
If set to None then the value of DEFAULT_DATAFLOW_LOCATION will be used.
See: https://cloud.google.com/dataflow/docs/concepts/regional-endpoints
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:param deferrable: If True, run the sensor in the deferrable mode.
:param poll_interval: Time (seconds) to wait between two consecutive calls to check the job.
"""
template_fields: Sequence[str] = ("job_id",)
def __init__(
self,
*,
job_id: str,
callback: Callable | None = None,
fail_on_terminal_state: bool = True,
project_id: str = PROVIDE_PROJECT_ID,
location: str = DEFAULT_DATAFLOW_LOCATION,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
poll_interval: int = 10,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_id = job_id
self.project_id = project_id
self.callback = callback
self.fail_on_terminal_state = fail_on_terminal_state
self.location = location
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
self.deferrable = deferrable
self.poll_interval = poll_interval
def poke(self, context: Context) -> PokeReturnValue | bool:
if self.fail_on_terminal_state:
job = self.hook.get_job(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
job_status = job["currentState"]
if job_status in DataflowJobStatus.TERMINAL_STATES:
message = f"Job with id '{self.job_id}' is already in terminal state: {job_status}"
raise AirflowException(message)
result = self.hook.fetch_job_messages_by_id(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
)
result = result if self.callback is None else self.callback(result)
if isinstance(result, PokeReturnValue):
return result
if bool(result):
return PokeReturnValue(
is_done=True,
xcom_value=result,
)
return False
def execute(self, context: Context) -> Any:
"""Airflow runs this method on the worker and defers using the trigger."""
if not self.deferrable:
super().execute(context)
else:
self.defer(
timeout=self.execution_timeout,
trigger=DataflowJobMessagesTrigger(
job_id=self.job_id,
project_id=self.project_id,
location=self.location,
gcp_conn_id=self.gcp_conn_id,
poll_sleep=self.poll_interval,
impersonation_chain=self.impersonation_chain,
fail_on_terminal_state=self.fail_on_terminal_state,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, str | list]) -> Any:
"""
Execute this method when the task resumes its execution on the worker after deferral.
If the trigger returns an event with success status - passes the event result to the callback function.
Returns the event result if no callback function is provided.
If the trigger returns an event with error status - raises an exception.
"""
if event["status"] == "success":
self.log.info(event["message"])
return event["result"] if self.callback is None else self.callback(event["result"])
raise AirflowException(f"Sensor failed with the following message: {event['message']}")
@cached_property
def hook(self) -> DataflowHook:
return DataflowHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
| DataflowJobMessagesSensor |
python | pypa__pip | tests/unit/test_index.py | {
"start": 19030,
"end": 31549
} | class ____:
@pytest.mark.parametrize(
"allow_all_prereleases, prefer_binary",
[
(False, False),
(False, True),
(True, False),
(True, True),
],
)
def test_create__candidate_prefs(
self,
allow_all_prereleases: bool,
prefer_binary: bool,
) -> None:
"""
Test that the _candidate_prefs attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], [], False),
)
selection_prefs = SelectionPreferences(
allow_yanked=True,
allow_all_prereleases=allow_all_prereleases,
prefer_binary=prefer_binary,
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
candidate_prefs = finder._candidate_prefs
assert candidate_prefs.allow_all_prereleases == allow_all_prereleases
assert candidate_prefs.prefer_binary == prefer_binary
def test_create__link_collector(self) -> None:
"""
Test that the _link_collector attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], [], False),
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=SelectionPreferences(allow_yanked=True),
)
assert finder._link_collector is link_collector
def test_create__target_python(self) -> None:
"""
Test that the _target_python attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], [], False),
)
target_python = TargetPython(py_version_info=(3, 7, 3))
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=SelectionPreferences(allow_yanked=True),
target_python=target_python,
)
actual_target_python = finder._target_python
# The target_python attribute should be set as is.
assert actual_target_python is target_python
# Check that the attributes weren't reset.
assert actual_target_python.py_version_info == (3, 7, 3)
def test_create__target_python_none(self) -> None:
"""
Test passing target_python=None.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], [], False),
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=SelectionPreferences(allow_yanked=True),
target_python=None,
)
# Spot-check the default TargetPython object.
actual_target_python = finder._target_python
assert actual_target_python._given_py_version_info is None
assert actual_target_python.py_version_info == CURRENT_PY_VERSION_INFO
@pytest.mark.parametrize("allow_yanked", [False, True])
def test_create__allow_yanked(self, allow_yanked: bool) -> None:
"""
Test that the _allow_yanked attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], [], False),
)
selection_prefs = SelectionPreferences(allow_yanked=allow_yanked)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
assert finder._allow_yanked == allow_yanked
@pytest.mark.parametrize("ignore_requires_python", [False, True])
def test_create__ignore_requires_python(self, ignore_requires_python: bool) -> None:
"""
Test that the _ignore_requires_python attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], [], False),
)
selection_prefs = SelectionPreferences(
allow_yanked=True,
ignore_requires_python=ignore_requires_python,
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
assert finder._ignore_requires_python == ignore_requires_python
def test_create__format_control(self) -> None:
"""
Test that the format_control attribute is set correctly.
"""
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], [], False),
)
format_control = FormatControl(set(), {":all:"})
selection_prefs = SelectionPreferences(
allow_yanked=True,
format_control=format_control,
)
finder = PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
)
actual_format_control = finder.format_control
assert actual_format_control is format_control
# Check that the attributes weren't reset.
assert actual_format_control.only_binary == {":all:"}
@pytest.mark.parametrize(
"allow_yanked, ignore_requires_python, only_binary, expected_formats",
[
(False, False, {}, frozenset({"binary", "source"})),
# Test allow_yanked=True.
(True, False, {}, frozenset({"binary", "source"})),
# Test ignore_requires_python=True.
(False, True, {}, frozenset({"binary", "source"})),
# Test a non-trivial only_binary.
(False, False, {"twine"}, frozenset({"binary"})),
],
)
def test_make_link_evaluator(
self,
allow_yanked: bool,
ignore_requires_python: bool,
only_binary: set[str],
expected_formats: frozenset[str],
) -> None:
# Create a test TargetPython that we can check for.
target_python = TargetPython(py_version_info=(3, 7))
format_control = FormatControl(set(), only_binary)
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], [], False),
)
finder = PackageFinder(
link_collector=link_collector,
target_python=target_python,
allow_yanked=allow_yanked,
format_control=format_control,
ignore_requires_python=ignore_requires_python,
)
# Pass a project_name that will be different from canonical_name.
link_evaluator = finder.make_link_evaluator("Twine")
assert link_evaluator.project_name == "Twine"
assert link_evaluator._canonical_name == "twine"
assert link_evaluator._allow_yanked == allow_yanked
assert link_evaluator._ignore_requires_python == ignore_requires_python
assert link_evaluator._formats == expected_formats
# Test the _target_python attribute.
actual_target_python = link_evaluator._target_python
# The target_python attribute should be set as is.
assert actual_target_python is target_python
# For good measure, check that the attributes weren't reset.
assert actual_target_python._given_py_version_info == (3, 7)
assert actual_target_python.py_version_info == (3, 7, 0)
@pytest.mark.parametrize(
"allow_all_prereleases, prefer_binary",
[
(False, False),
(False, True),
(True, False),
(True, True),
],
)
def test_make_candidate_evaluator(
self,
allow_all_prereleases: bool,
prefer_binary: bool,
) -> None:
target_python = TargetPython()
target_python._valid_tags = [Tag("py36", "none", "any")]
candidate_prefs = CandidatePreferences(
prefer_binary=prefer_binary,
allow_all_prereleases=allow_all_prereleases,
)
link_collector = LinkCollector(
session=PipSession(),
search_scope=SearchScope([], [], False),
)
finder = PackageFinder(
link_collector=link_collector,
target_python=target_python,
allow_yanked=True,
candidate_prefs=candidate_prefs,
)
specifier = SpecifierSet()
# Pass hashes to check that _hashes is set.
hashes = Hashes({"sha256": [64 * "a"]})
evaluator = finder.make_candidate_evaluator(
"my-project",
specifier=specifier,
hashes=hashes,
)
assert evaluator._allow_all_prereleases == allow_all_prereleases
assert evaluator._hashes == hashes
assert evaluator._prefer_binary == prefer_binary
assert evaluator._project_name == "my-project"
assert evaluator._specifier is specifier
assert evaluator._supported_tags == [Tag("py36", "none", "any")]
@pytest.mark.parametrize(
"fragment, canonical_name, expected",
[
# Trivial.
("pip-18.0", "pip", 3),
("zope-interface-4.5.0", "zope-interface", 14),
# Canonicalized name match non-canonicalized egg info. (pypa/pip#5870)
("Jinja2-2.10", "jinja2", 6),
("zope.interface-4.5.0", "zope-interface", 14),
("zope_interface-4.5.0", "zope-interface", 14),
# Should be smart enough to parse ambiguous names from the provided
# package name.
("foo-2-2", "foo", 3),
("foo-2-2", "foo-2", 5),
# Should be able to detect collapsed characters in the egg info.
("foo--bar-1.0", "foo-bar", 8),
("foo-_bar-1.0", "foo-bar", 8),
# The package name must not ends with a dash (PEP 508), so the first
# dash would be the separator, not the second.
("zope.interface--4.5.0", "zope-interface", 14),
("zope.interface--", "zope-interface", 14),
# The version part is missing, but the split function does not care.
("zope.interface-", "zope-interface", 14),
],
)
def test_find_name_version_sep(
fragment: str, canonical_name: str, expected: int
) -> None:
index = _find_name_version_sep(fragment, canonical_name)
assert index == expected
@pytest.mark.parametrize(
"fragment, canonical_name",
[
# A dash must follow the package name.
("zope.interface4.5.0", "zope-interface"),
("zope.interface.4.5.0", "zope-interface"),
("zope.interface.-4.5.0", "zope-interface"),
("zope.interface", "zope-interface"),
],
)
def test_find_name_version_sep_failure(fragment: str, canonical_name: str) -> None:
with pytest.raises(ValueError) as ctx:
_find_name_version_sep(fragment, canonical_name)
message = f"{fragment} does not match {canonical_name}"
assert str(ctx.value) == message
@pytest.mark.parametrize(
"fragment, canonical_name, expected",
[
# Trivial.
("pip-18.0", "pip", "18.0"),
("zope-interface-4.5.0", "zope-interface", "4.5.0"),
# Canonicalized name match non-canonicalized egg info. (pypa/pip#5870)
("Jinja2-2.10", "jinja2", "2.10"),
("zope.interface-4.5.0", "zope-interface", "4.5.0"),
("zope_interface-4.5.0", "zope-interface", "4.5.0"),
# Should be smart enough to parse ambiguous names from the provided
# package name.
("foo-2-2", "foo", "2-2"),
("foo-2-2", "foo-2", "2"),
("zope.interface--4.5.0", "zope-interface", "-4.5.0"),
("zope.interface--", "zope-interface", "-"),
# Should be able to detect collapsed characters in the egg info.
("foo--bar-1.0", "foo-bar", "1.0"),
("foo-_bar-1.0", "foo-bar", "1.0"),
# Invalid.
("the-package-name-8.19", "does-not-match", None),
("zope.interface.-4.5.0", "zope.interface", None),
("zope.interface-", "zope-interface", None),
("zope.interface4.5.0", "zope-interface", None),
("zope.interface.4.5.0", "zope-interface", None),
("zope.interface.-4.5.0", "zope-interface", None),
("zope.interface", "zope-interface", None),
],
)
def test_extract_version_from_fragment(
fragment: str, canonical_name: str, expected: str | None
) -> None:
version = _extract_version_from_fragment(fragment, canonical_name)
assert version == expected
| TestPackageFinder |
python | jmcnamara__XlsxWriter | xlsxwriter/theme.py | {
"start": 16131,
"end": 17651
} | class ____:
"""
A class for writing the Excel XLSX Theme file.
"""
###########################################################################
#
# Public API.
#
###########################################################################
def __init__(self) -> None:
"""
Constructor.
"""
super().__init__()
self.fh = None
self.internal_fh = False
###########################################################################
#
# Private API.
#
###########################################################################
def _assemble_xml_file(self, theme: str) -> None:
# Assemble and write the XML file.
self._write_theme_file(theme)
if self.internal_fh:
self.fh.close()
def _set_xml_writer(self, filename) -> None:
# Set the XML writer filehandle for the object.
if isinstance(filename, StringIO):
self.internal_fh = False
self.fh = filename
else:
self.internal_fh = True
# pylint: disable=consider-using-with
self.fh = open(filename, mode="w", encoding="utf-8")
###########################################################################
#
# XML methods.
#
###########################################################################
def _write_theme_file(self, theme: str) -> None:
# Write a default theme.xml file.
self.fh.write(theme)
| Theme |
python | ray-project__ray | python/ray/dashboard/modules/reporter/tests/test_gpu_providers.py | {
"start": 3232,
"end": 3958
} | class ____(unittest.TestCase):
"""Test abstract GpuProvider class."""
def test_decode_bytes(self):
"""Test _decode method with bytes input."""
result = GpuProvider._decode(b"test string")
self.assertEqual(result, "test string")
def test_decode_string(self):
"""Test _decode method with string input."""
result = GpuProvider._decode("test string")
self.assertEqual(result, "test string")
def test_abstract_methods_not_implemented(self):
"""Test that abstract methods raise NotImplementedError."""
class IncompleteProvider(GpuProvider):
pass
with self.assertRaises(TypeError):
IncompleteProvider()
| TestGpuProvider |
python | huggingface__transformers | src/transformers/models/zoedepth/modeling_zoedepth.py | {
"start": 31590,
"end": 34662
} | class ____(nn.Module):
"""Equivalent implementation of nn.MultiheadAttention with `batch_first=True`."""
# Ignore copy
def __init__(self, hidden_size, num_attention_heads, dropout):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
f"The hidden size ({hidden_size}) is not a multiple of the number of attention "
f"heads ({num_attention_heads})"
)
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.out_proj = nn.Linear(hidden_size, hidden_size)
self.dropout = nn.Dropout(dropout)
def forward(
self,
queries: torch.Tensor,
keys: torch.Tensor,
values: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
batch_size, seq_length, _ = queries.shape
query_layer = (
self.query(queries)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(keys).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
)
value_layer = (
self.value(values).view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in ZoeDepthModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
context_layer = self.out_proj(context_layer)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
| ZoeDepthMultiheadAttention |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 15691,
"end": 16103
} | class ____(LazyModuleMixin, torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def initialize_parameters(self, input):
with torch.no_grad():
self._param = torch.nn.Parameter(torch.empty(input[0].shape).fill_(0.5))
def forward(self, input):
x = 0
for i in range(len(input)):
x = x + input[i]
return x
| LazyLayerWithListInput |
python | ray-project__ray | python/ray/_private/tensor_serialization_utils.py | {
"start": 122,
"end": 5527
} | class ____(UserWarning):
"""
Warning for unsafe or failed zero-copy tensor serialization/deserialization.
"""
pass
warnings.filterwarnings("once", category=ZeroCopyTensorsWarning)
def _zero_copy_tensors_deserializer(
np_array: "np.ndarray", dtype_str: str, shape: Tuple[int, ...], device_str: str
) -> "torch.Tensor":
"""
Reconstructs a torch.Tensor from a zero-copy NumPy byte array.
Args:
np_array: 1D uint8 NumPy array of the original tensor's raw bytes.
dtype_str: Full string representation of the original tensor's dtype (e.g., 'torch.float32').
shape: The original shape of the tensor before serialization.
device_str: String representation of the original device (e.g., 'cpu', 'cuda:0').
Returns:
Reconstructed torch.Tensor on the specified device if successful;
otherwise, returns the input np_array unchanged and issues a warning.
Raises:
ImportError/DeserializationError: If deserialization fails for any reason (e.g., missing PyTorch
dtype mismatch, shape inconsistency, device error, etc.).
"""
try:
import torch
except ImportError as e:
raise ImportError(
"Zero-copy tensor deserialization failed: PyTorch is not installed."
) from e
try:
# Step 1: Convert uint8 numpy array back to torch tensor
uint8_tensor = torch.from_numpy(np_array)
# Step 2: Restore original dtype
dtype_name = dtype_str.split(".")[-1]
if not hasattr(torch, dtype_name):
raise ValueError(f"Invalid or unsupported dtype string: {dtype_str}")
original_dtype = getattr(torch, dtype_name)
# Compute number of bytes per element
dtype_size = torch.tensor([], dtype=original_dtype).element_size()
if np_array.size % dtype_size != 0:
raise ValueError(
f"Byte array size ({np_array.size}) is not divisible by "
f"dtype size ({dtype_size}) for dtype {dtype_str}"
)
# Step 3: Reshape and reinterpret bytes as target dtype
restored_tensor = uint8_tensor.view(original_dtype).reshape(shape)
# Step 4: Move to target device
return restored_tensor.to(device=device_str)
except Exception as e:
from ray._private.serialization import DeserializationError
raise DeserializationError(
f"Failed to deserialize zero-copy tensor from byte array. "
f"Input dtype={dtype_str}, shape={shape}, device={device_str}. "
f"Underlying error: {type(e).__name__}: {e}"
) from e
def zero_copy_tensors_reducer(tensor: "torch.Tensor") -> Tuple[Any, Tuple[Any, ...]]:
"""Pickle serializer for zero-copy serialization of read-only torch.Tensor.
This serializer aims to avoid copying tensor data by using a NumPy uint8 view,
which enables pickle5's out-of-band buffer transmission. However, true zero-copy
is only possible when the input tensor is already:
- On CPU,
- Detached from the computation graph (no gradients),
- Contiguous in memory.
If the input tensor does **not** meet these conditions, this function will:
- Call `.detach()` to remove gradient information,
- Move the tensor to CPU (copying data if it's on GPU or another device),
- Make the tensor contiguous (copying data if it's non-contiguous).
These operations may incur one or two full copies of the tensor data,
negating zero-copy benefits. A warning is issued in such cases.
Args:
tensor: The input torch.Tensor to serialize. Can be on any device,
with or without gradients, contiguous or not — but zero-copy
is only achieved if it is already CPU, detached, and contiguous.
Returns:
A tuple (deserializer_callable, args_tuple) suitable for pickle.
"""
warnings.warn(
"Zero-copy tensor serialization is enabled, but it only works safely for read-only tensors "
"(detached, no gradients, contiguous). Modifiable or non-contiguous tensors may cause data corruption.",
ZeroCopyTensorsWarning,
stacklevel=3,
)
import torch
# Detach the tensor from gradients and computation graph.
# Move it to cpu (this is a noop if the tensor is already in main memory, but will create a copy if the
# the tensor is on an accelerator).
# Ensure that the tensor is contiguous. If the tensor is not contiguous, this will create a contiguous
# copy.
cpu_tensor = tensor.detach().cpu()
if not cpu_tensor.is_contiguous():
warnings.warn(
"The input tensor is non-contiguous. A copy will be made to ensure contiguity. "
"For zero-copy serialization, please ensure the tensor is contiguous before passing it "
"(e.g., by calling `.contiguous()`).",
ZeroCopyTensorsWarning,
stacklevel=3,
)
cpu_tensor = cpu_tensor.contiguous()
# Flatten to 1D for safe uint8 view (handles scalars)
flat_tensor = cpu_tensor.reshape(-1)
# View as uint8 bytes
uint8_view = flat_tensor.view(torch.uint8)
np_array = uint8_view.numpy()
return _zero_copy_tensors_deserializer, (
np_array,
str(tensor.dtype),
tuple(tensor.shape),
str(tensor.device),
)
| ZeroCopyTensorsWarning |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride5.py | {
"start": 262,
"end": 397
} | class ____(Generic[*Ts]):
def method_1(self, *args: *Ts) -> None: ...
def method_2(self, *args: *tuple[*Ts]) -> None: ...
| Parent |
python | tensorflow__tensorflow | tensorflow/python/distribute/failure_handling/failure_handling.py | {
"start": 9450,
"end": 9919
} | class ____(TerminationConfig):
"""Configurations for Borg."""
def __init__( # pylint: disable=super-init-not-called
self,
termination_watcher_fn=None,
exit_fn=None,
grace_period=None,
save_fn=None):
self.termination_watcher_fn = termination_watcher_fn
default_exit_fn = lambda: sys.exit(42)
self.exit_fn = exit_fn or default_exit_fn
self.grace_period = grace_period or 0
self.save_fn = save_fn
| BorgTerminationConfig |
python | pyca__cryptography | tests/hazmat/primitives/test_serialization.py | {
"start": 2180,
"end": 4225
} | class ____:
@pytest.mark.parametrize(
("key_path", "password"),
[
(["DER_Serialization", "enc-rsa-pkcs8.der"], bytearray(b"foobar")),
(["DER_Serialization", "enc2-rsa-pkcs8.der"], bytearray(b"baz")),
(["DER_Serialization", "unenc-rsa-pkcs8.der"], None),
(["DER_Serialization", "testrsa.der"], None),
],
)
def test_load_der_rsa_private_key(self, key_path, password, backend):
_skip_fips_format(key_path, password, backend)
data = load_vectors_from_file(
os.path.join("asymmetric", *key_path),
lambda derfile: derfile.read(),
mode="rb",
)
key = load_der_private_key(
bytearray(data), password, unsafe_skip_rsa_key_validation=True
)
assert key
assert isinstance(key, rsa.RSAPrivateKey)
_check_rsa_private_numbers(key.private_numbers())
@pytest.mark.parametrize(
("key_path", "password"),
[
(
["PEM_Serialization", "rsa_private_key.pem"],
bytearray(b"123456"),
),
(["PKCS8", "unenc-rsa-pkcs8.pem"], None),
(["PKCS8", "enc-rsa-pkcs8.pem"], bytearray(b"foobar")),
(["PKCS8", "enc2-rsa-pkcs8.pem"], bytearray(b"baz")),
(
["Traditional_OpenSSL_Serialization", "key1.pem"],
bytearray(b"123456"),
),
],
)
def test_load_pem_rsa_private_key(self, key_path, password, backend):
_skip_fips_format(key_path, password, backend)
data = load_vectors_from_file(
os.path.join("asymmetric", *key_path),
lambda pemfile: pemfile.read(),
mode="rb",
)
key = load_pem_private_key(
bytearray(data), password, unsafe_skip_rsa_key_validation=True
)
assert key
assert isinstance(key, rsa.RSAPrivateKey)
_check_rsa_private_numbers(key.private_numbers())
| TestBufferProtocolSerialization |
python | getsentry__sentry | tests/sentry/api/endpoints/test_project_rules.py | {
"start": 3369,
"end": 4979
} | class ____(ProjectRuleBaseTestCase):
@override_settings(MAX_SLOW_CONDITION_ISSUE_ALERTS=1)
def test_get_max_alerts_slow(self) -> None:
result = get_max_alerts(self.project, "slow")
assert result == 1
@with_feature("organizations:more-slow-alerts")
@override_settings(MAX_SLOW_CONDITION_ISSUE_ALERTS=1)
@override_settings(MAX_MORE_SLOW_CONDITION_ISSUE_ALERTS=2)
def test_get_max_alerts_more_slow(self) -> None:
result = get_max_alerts(self.project, "slow")
assert result == 2
@override_settings(MAX_FAST_CONDITION_ISSUE_ALERTS=1)
def test_get_max_alerts_fast(self) -> None:
result = get_max_alerts(self.project, "fast")
assert result == 1
@with_feature("organizations:more-fast-alerts")
@override_settings(MAX_FAST_CONDITION_ISSUE_ALERTS=1)
@override_settings(MAX_MORE_FAST_CONDITION_ISSUE_ALERTS=2)
def test_get_max_alerts_more_fast_with_group_processing(self) -> None:
result = get_max_alerts(self.project, "fast")
assert result == 2
@override_settings(MAX_FAST_CONDITION_ISSUE_ALERTS=1)
@override_settings(MAX_MORE_FAST_CONDITION_ISSUE_ALERTS=2)
def test_get_max_alerts_fast_with_group_processing(self) -> None:
result = get_max_alerts(self.project, "fast")
assert result == 1
@override_settings(MAX_SLOW_CONDITION_ISSUE_ALERTS=1)
@override_settings(MAX_MORE_SLOW_CONDITION_ISSUE_ALERTS=2)
def test_get_max_alerts_slow_with_group_processing(self) -> None:
result = get_max_alerts(self.project, "slow")
assert result == 1
| GetMaxAlertsTest |
python | mlflow__mlflow | mlflow/store/model_registry/rest_store.py | {
"start": 1659,
"end": 21826
} | class ____(BaseRestStore):
"""
Client for a remote model registry server accessed via REST API calls
Args:
get_host_creds: Method to be invoked prior to every REST request to get the
:py:class:`mlflow.rest_utils.MlflowHostCreds` for the request. Note that this
is a function so that we can obtain fresh credentials in the case of expiry.
"""
def _get_response_from_method(self, method):
return method.Response()
def _get_endpoint_from_method(self, method):
return _METHOD_TO_INFO[method]
def _get_all_endpoints_from_method(self, method):
return _METHOD_TO_ALL_INFO[method]
def _get_webhook_endpoint_from_method(self, method):
return _WEBHOOK_METHOD_TO_INFO[method]
def _call_webhook_endpoint(
self,
api,
json_body: str | None = None,
webhook_id: str | None = None,
):
endpoint, method = self._get_webhook_endpoint_from_method(api)
if webhook_id:
endpoint = endpoint.format(webhook_id=webhook_id)
response_proto = self._get_response_from_method(api)
return call_endpoint(self.get_host_creds(), endpoint, method, json_body, response_proto)
# CRUD API for RegisteredModel objects
def create_registered_model(self, name, tags=None, description=None, deployment_job_id=None):
"""
Create a new registered model in backend store.
Args:
name: Name of the new model. This is expected to be unique in the backend store.
tags: A list of :py:class:`mlflow.entities.model_registry.RegisteredModelTag`
instances associated with this registered model.
description: Description of the model.
deployment_job_id: Optional deployment job ID.
Returns:
A single object of :py:class:`mlflow.entities.model_registry.RegisteredModel`
created in the backend.
"""
proto_tags = [tag.to_proto() for tag in tags or []]
req_body = message_to_json(
CreateRegisteredModel(name=name, tags=proto_tags, description=description)
)
response_proto = self._call_endpoint(CreateRegisteredModel, req_body)
return RegisteredModel.from_proto(response_proto.registered_model)
def update_registered_model(self, name, description, deployment_job_id=None):
"""
Update description of the registered model.
Args:
name: Registered model name.
description: New description.
deployment_job_id: Optional deployment job ID.
Returns:
A single updated :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
req_body = message_to_json(UpdateRegisteredModel(name=name, description=description))
response_proto = self._call_endpoint(UpdateRegisteredModel, req_body)
return RegisteredModel.from_proto(response_proto.registered_model)
def rename_registered_model(self, name, new_name):
"""
Rename the registered model.
Args:
name: Registered model name.
new_name: New proposed name.
Returns:
A single updated :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
req_body = message_to_json(RenameRegisteredModel(name=name, new_name=new_name))
response_proto = self._call_endpoint(RenameRegisteredModel, req_body)
return RegisteredModel.from_proto(response_proto.registered_model)
def delete_registered_model(self, name):
"""
Delete the registered model.
Backend raises exception if a registered model with given name does not exist.
Args:
name: Registered model name.
Returns:
None
"""
req_body = message_to_json(DeleteRegisteredModel(name=name))
self._call_endpoint(DeleteRegisteredModel, req_body)
def search_registered_models(
self, filter_string=None, max_results=None, order_by=None, page_token=None
):
"""
Search for registered models in backend that satisfy the filter criteria.
Args:
filter_string: Filter query string, defaults to searching all registered models.
max_results: Maximum number of registered models desired.
order_by: List of column names with ASC|DESC annotation, to be used for ordering
matching search results.
page_token: Token specifying the next page of results. It should be obtained from
a ``search_registered_models`` call.
Returns:
A PagedList of :py:class:`mlflow.entities.model_registry.RegisteredModel` objects
that satisfy the search expressions. The pagination token for the next page can be
obtained via the ``token`` attribute of the object.
"""
req_body = message_to_json(
SearchRegisteredModels(
filter=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
)
response_proto = self._call_endpoint(SearchRegisteredModels, req_body)
registered_models = [
RegisteredModel.from_proto(registered_model)
for registered_model in response_proto.registered_models
]
return PagedList(registered_models, response_proto.next_page_token)
def get_registered_model(self, name):
"""
Get registered model instance by name.
Args:
name: Registered model name.
Returns:
A single :py:class:`mlflow.entities.model_registry.RegisteredModel` object.
"""
req_body = message_to_json(GetRegisteredModel(name=name))
response_proto = self._call_endpoint(GetRegisteredModel, req_body)
return RegisteredModel.from_proto(response_proto.registered_model)
def get_latest_versions(self, name, stages=None):
"""
Latest version models for each requested stage. If no ``stages`` argument is provided,
returns the latest version for each stage.
Args:
name: Registered model name.
stages: List of desired stages. If input list is None, return latest versions for
each stage.
Returns:
List of :py:class:`mlflow.entities.model_registry.ModelVersion` objects.
"""
req_body = message_to_json(GetLatestVersions(name=name, stages=stages))
response_proto = self._call_endpoint(GetLatestVersions, req_body, call_all_endpoints=True)
return [
ModelVersion.from_proto(model_version)
for model_version in response_proto.model_versions
]
def set_registered_model_tag(self, name, tag):
"""
Set a tag for the registered model.
Args:
name: Registered model name.
tag: :py:class:`mlflow.entities.model_registry.RegisteredModelTag` instance to log.
Returns:
None
"""
req_body = message_to_json(SetRegisteredModelTag(name=name, key=tag.key, value=tag.value))
self._call_endpoint(SetRegisteredModelTag, req_body)
def delete_registered_model_tag(self, name, key):
"""
Delete a tag associated with the registered model.
Args:
name: Registered model name.
key: Registered model tag key.
Returns:
None
"""
req_body = message_to_json(DeleteRegisteredModelTag(name=name, key=key))
self._call_endpoint(DeleteRegisteredModelTag, req_body)
# CRUD API for ModelVersion objects
def create_model_version(
self,
name,
source,
run_id=None,
tags=None,
run_link=None,
description=None,
local_model_path=None,
model_id: str | None = None,
):
"""
Create a new model version from given source and run ID.
Args:
name: Registered model name.
source: URI indicating the location of the model artifacts.
run_id: Run ID from MLflow tracking server that generated the model.
tags: A list of :py:class:`mlflow.entities.model_registry.ModelVersionTag`
instances associated with this model version.
run_link: Link to the run from an MLflow tracking server that generated this model.
description: Description of the version.
local_model_path: Unused.
model_id: The ID of the model (from an Experiment) that is being promoted to a
registered model version, if applicable.
Returns:
A single object of :py:class:`mlflow.entities.model_registry.ModelVersion`
created in the backend.
"""
proto_tags = [tag.to_proto() for tag in tags or []]
req_body = message_to_json(
CreateModelVersion(
name=name,
source=source,
run_id=run_id,
run_link=run_link,
tags=proto_tags,
description=description,
model_id=model_id,
)
)
response_proto = self._call_endpoint(CreateModelVersion, req_body)
return ModelVersion.from_proto(response_proto.model_version)
def transition_model_version_stage(self, name, version, stage, archive_existing_versions):
"""
Update model version stage.
Args:
name: Registered model name.
version: Registered model version.
stage: New desired stage for this model version.
archive_existing_versions: If this flag is set to ``True``, all existing model
versions in the stage will be automatically moved to the "archived" stage. Only
valid when ``stage`` is ``"staging"`` or ``"production"`` otherwise an error will
be raised.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
req_body = message_to_json(
TransitionModelVersionStage(
name=name,
version=str(version),
stage=stage,
archive_existing_versions=archive_existing_versions,
)
)
response_proto = self._call_endpoint(TransitionModelVersionStage, req_body)
return ModelVersion.from_proto(response_proto.model_version)
def update_model_version(self, name, version, description):
"""
Update metadata associated with a model version in backend.
Args:
name: Registered model name.
version: Registered model version.
description: New model description.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
req_body = message_to_json(
UpdateModelVersion(name=name, version=str(version), description=description)
)
response_proto = self._call_endpoint(UpdateModelVersion, req_body)
return ModelVersion.from_proto(response_proto.model_version)
def delete_model_version(self, name, version):
"""
Delete model version in backend.
Args:
name: Registered model name.
version: Registered model version.
Returns:
None
"""
req_body = message_to_json(DeleteModelVersion(name=name, version=str(version)))
self._call_endpoint(DeleteModelVersion, req_body)
def get_model_version(self, name, version):
"""
Get the model version instance by name and version.
Args:
name: Registered model name.
version: Registered model version.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
req_body = message_to_json(GetModelVersion(name=name, version=str(version)))
response_proto = self._call_endpoint(GetModelVersion, req_body)
return ModelVersion.from_proto(response_proto.model_version)
def get_model_version_download_uri(self, name, version):
"""
Get the download location in Model Registry for this model version.
NOTE: For first version of Model Registry, since the models are not copied over to another
location, download URI points to input source path.
Args:
name: Registered model name.
version: Registered model version.
Returns:
A single URI location that allows reads for downloading.
"""
req_body = message_to_json(GetModelVersionDownloadUri(name=name, version=str(version)))
response_proto = self._call_endpoint(GetModelVersionDownloadUri, req_body)
return response_proto.artifact_uri
def search_model_versions(
self, filter_string=None, max_results=None, order_by=None, page_token=None
):
"""
Search for model versions in backend that satisfy the filter criteria.
Args:
filter_string: A filter string expression. Currently supports a single filter
condition either name of model like ``name = 'model_name'`` or
``run_id = '...'``.
max_results: Maximum number of model versions desired.
order_by: List of column names with ASC|DESC annotation, to be used for ordering
matching search results.
page_token: Token specifying the next page of results. It should be obtained from
a ``search_model_versions`` call.
Returns:
A PagedList of :py:class:`mlflow.entities.model_registry.ModelVersion`
objects that satisfy the search expressions. The pagination token for the next
page can be obtained via the ``token`` attribute of the object.
"""
req_body = message_to_json(
SearchModelVersions(
filter=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
)
response_proto = self._call_endpoint(SearchModelVersions, req_body)
model_versions = [ModelVersion.from_proto(mvd) for mvd in response_proto.model_versions]
return PagedList(model_versions, response_proto.next_page_token)
def set_model_version_tag(self, name, version, tag):
"""
Set a tag for the model version.
Args:
name: Registered model name.
version: Registered model version.
tag: :py:class:`mlflow.entities.model_registry.ModelVersionTag` instance to log.
Returns:
None
"""
req_body = message_to_json(
SetModelVersionTag(name=name, version=str(version), key=tag.key, value=tag.value)
)
self._call_endpoint(SetModelVersionTag, req_body)
def delete_model_version_tag(self, name, version, key):
"""
Delete a tag associated with the model version.
Args:
name: Registered model name.
version: Registered model version.
key: Tag key.
Returns:
None
"""
req_body = message_to_json(DeleteModelVersionTag(name=name, version=str(version), key=key))
self._call_endpoint(DeleteModelVersionTag, req_body)
def set_registered_model_alias(self, name, alias, version):
"""
Set a registered model alias pointing to a model version.
Args:
name: Registered model name.
alias: Name of the alias.
version: Registered model version number.
Returns:
None
"""
req_body = message_to_json(
SetRegisteredModelAlias(name=name, alias=alias, version=str(version))
)
self._call_endpoint(SetRegisteredModelAlias, req_body)
def delete_registered_model_alias(self, name, alias):
"""
Delete an alias associated with a registered model.
Args:
name: Registered model name.
alias: Name of the alias.
Returns:
None
"""
req_body = message_to_json(DeleteRegisteredModelAlias(name=name, alias=alias))
self._call_endpoint(DeleteRegisteredModelAlias, req_body)
def get_model_version_by_alias(self, name, alias):
"""
Get the model version instance by name and alias.
Args:
name: Registered model name.
alias: Name of the alias.
Returns:
A single :py:class:`mlflow.entities.model_registry.ModelVersion` object.
"""
req_body = message_to_json(GetModelVersionByAlias(name=name, alias=alias))
response_proto = self._call_endpoint(GetModelVersionByAlias, req_body)
return ModelVersion.from_proto(response_proto.model_version)
# Webhook APIs
def create_webhook(
self,
name: str,
url: str,
events: list[WebhookEvent],
description: str | None = None,
secret: str | None = None,
status: WebhookStatus | None = None,
) -> Webhook:
req_body = message_to_json(
CreateWebhook(
name=name,
url=url,
events=[e.to_proto() for e in events],
description=description,
secret=secret,
status=status.to_proto() if status else None,
)
)
response_proto = self._call_webhook_endpoint(CreateWebhook, req_body)
return Webhook.from_proto(response_proto.webhook)
def get_webhook(self, webhook_id: str) -> Webhook:
response_proto = self._call_webhook_endpoint(GetWebhook, webhook_id=webhook_id)
return Webhook.from_proto(response_proto.webhook)
def list_webhooks(
self,
max_results: int | None = None,
page_token: str | None = None,
) -> PagedList[Webhook]:
req_body = message_to_json(ListWebhooks(max_results=max_results, page_token=page_token))
response_proto = self._call_webhook_endpoint(ListWebhooks, req_body)
webhooks = [Webhook.from_proto(webhook) for webhook in response_proto.webhooks]
return PagedList(webhooks, response_proto.next_page_token)
def update_webhook(
self,
webhook_id: str,
name: str | None = None,
description: str | None = None,
url: str | None = None,
events: list[WebhookEvent] | None = None,
secret: str | None = None,
status: WebhookStatus | None = None,
) -> Webhook:
req_body = message_to_json(
UpdateWebhook(
name=name,
description=description,
url=url,
events=[e.to_proto() for e in events] if events else None,
secret=secret,
status=status.to_proto() if status else None,
)
)
response_proto = self._call_webhook_endpoint(UpdateWebhook, req_body, webhook_id=webhook_id)
return Webhook.from_proto(response_proto.webhook)
def delete_webhook(self, webhook_id: str) -> None:
self._call_webhook_endpoint(DeleteWebhook, webhook_id=webhook_id)
def test_webhook(self, webhook_id: str, event: WebhookEvent | None = None) -> WebhookTestResult:
"""
Test the webhook by sending a test event to the specified URL.
Args:
webhook_id: The ID of the webhook to test.
event: Optional event type to test. If not specified, uses the first event from webhook.
Returns:
WebhookTestResult indicating success/failure and response details
"""
req_body = message_to_json(TestWebhook(event=event.to_proto() if event else None))
response_proto = self._call_webhook_endpoint(TestWebhook, req_body, webhook_id=webhook_id)
return WebhookTestResult.from_proto(response_proto.result)
| RestStore |
python | google__pytype | pytype/pytd/codegen/function.py | {
"start": 5764,
"end": 10950
} | class ____:
"""A mutable builder for pytd.Function values."""
name: str
sigs: list[pytd.Signature]
is_abstract: bool = False
is_coroutine: bool = False
is_final: bool = False
decorators: tuple[pytd.Alias, ...] = ()
properties: _Properties | None = dataclasses.field(init=False)
prop_names: dict[str, _Property] = dataclasses.field(init=False)
@classmethod
def make(cls, fn: NameAndSig):
return cls(
name=fn.name,
sigs=[fn.signature],
is_abstract=fn.is_abstract,
is_coroutine=fn.is_coroutine,
is_final=fn.is_final,
decorators=fn.decorators)
def __post_init__(self):
self.prop_names = _property_decorators(self.name)
prop_decorators = [d for d in self.decorators if d.name in self.prop_names]
if prop_decorators:
self.properties = _Properties()
self.add_property(prop_decorators, self.sigs[0])
else:
self.properties = None
def add_property(self, decorators, sig):
"""Add a property overload."""
assert decorators
if len(decorators) > 1:
msg = "conflicting decorators " + ", ".join(d.name for d in decorators)
raise PropertyDecoratorError(self.name, msg)
decorator = decorators[0].name
prop = self.prop_names[decorator]
min_params = max_params = 0
for param in sig.params:
min_params += int(not param.optional)
max_params += 1
if min_params <= prop.arity <= max_params:
assert self.properties is not None
self.properties.set(prop.type, sig, self.name)
else:
raise TypeError(
f"Function '{self.name}' decorated by property decorator"
f" @{decorator} must have {prop.arity} param(s), but actually has"
f" {len(sig.params)}"
)
def add_overload(self, fn: NameAndSig) -> None:
"""Add an overloaded signature to a function."""
if self.properties:
prop_decorators = [d for d in fn.decorators if d.name in self.prop_names]
if not prop_decorators:
raise OverloadedDecoratorError(self.name, "property")
self.add_property(prop_decorators, fn.signature)
else:
self.sigs.append(fn.signature)
self._check_overload_consistency(fn)
def _check_overload_consistency(self, fn: NameAndSig) -> None:
"""Check if the new overload is consistent with existing."""
# Some decorators need to be consistent for all overloads.
if self.is_coroutine != fn.is_coroutine:
raise OverloadedDecoratorError(self.name, "coroutine")
if self.is_final != fn.is_final:
raise OverloadedDecoratorError(self.name, "final")
if (_has_decorator(self, "staticmethod") !=
_has_decorator(fn, "staticmethod")):
raise OverloadedDecoratorError(self.name, "staticmethod")
if _has_decorator(self, "classmethod") != _has_decorator(fn, "classmethod"):
raise OverloadedDecoratorError(self.name, "classmethod")
# It's okay for some property overloads to be abstract and others not.
if not self.properties and self.is_abstract != fn.is_abstract:
raise OverloadedDecoratorError(self.name, "abstractmethod")
def merge_method_signatures(
name_and_sigs: list[NameAndSig],
) -> list[pytd.Function]:
"""Group the signatures by name, turning each group into a function."""
functions = {}
for fn in name_and_sigs:
if fn.name not in functions:
functions[fn.name] = _DecoratedFunction.make(fn)
else:
functions[fn.name].add_overload(fn)
methods = []
for name, fn in functions.items():
decorators = []
is_staticmethod = is_classmethod = False
for decorator in fn.decorators:
if decorator.type.name == "staticmethod":
is_staticmethod = True
elif decorator.type.name == "classmethod":
is_classmethod = True
else:
decorators.append(decorator)
if name == "__new__" or is_staticmethod:
kind = pytd.MethodKind.STATICMETHOD
elif name == "__init_subclass__" or is_classmethod:
kind = pytd.MethodKind.CLASSMETHOD
elif fn.properties:
kind = pytd.MethodKind.PROPERTY
# If we have only setters and/or deleters, replace them with a single
# method foo(...) -> Any, so that we infer a constant `foo: Any` even if
# the original method signatures are all `foo(...) -> None`. (If we have a
# getter we use its return type, but in the absence of a getter we want to
# fall back on Any since we cannot say anything about what the setter sets
# the type of foo to.)
if fn.properties.getter:
fn.sigs = [fn.properties.getter]
else:
sig = fn.properties.setter or fn.properties.deleter
assert sig is not None
fn.sigs = [sig.Replace(return_type=pytd.AnythingType())]
else:
# Other decorators do not affect the kind
kind = pytd.MethodKind.METHOD
flags = pytd.MethodFlag.NONE
if fn.is_abstract:
flags |= pytd.MethodFlag.ABSTRACT
if fn.is_coroutine:
flags |= pytd.MethodFlag.COROUTINE
if fn.is_final:
flags |= pytd.MethodFlag.FINAL
methods.append(pytd.Function(name, tuple(fn.sigs), kind, flags,
tuple(decorators)))
return methods
| _DecoratedFunction |
python | wandb__wandb | wandb/sdk/interface/interface_queue.py | {
"start": 423,
"end": 1736
} | class ____(InterfaceShared):
"""Legacy implementation of InterfaceShared.
This was used by legacy-service to pass messages back to itself before
the existence of wandb-core. It may be removed once legacy-service is
completely removed (including its use in `wandb sync`).
Since it was used by the internal service, it does not implement
the "deliver" methods, which are only used in the client.
"""
def __init__(
self,
record_q: Queue[pb.Record] | None = None,
result_q: Queue[pb.Result] | None = None,
process: BaseProcess | None = None,
) -> None:
self.record_q = record_q
self.result_q = result_q
self._process = process
super().__init__()
@override
def _publish(self, record: pb.Record, *, nowait: bool = False) -> None:
if self._process and not self._process.is_alive():
raise Exception("The wandb backend process has shutdown")
if self.record_q:
self.record_q.put(record)
@override
async def deliver_async(
self,
record: pb.Record,
) -> MailboxHandle[pb.Result]:
raise NotImplementedError
@override
def _deliver(self, record: pb.Record) -> MailboxHandle[pb.Result]:
raise NotImplementedError
| InterfaceQueue |
python | numba__numba | numba/tests/test_ndarray_subclasses.py | {
"start": 3810,
"end": 7345
} | class ____(numba.core.datamodel.models.StructModel):
def __init__(self, dmm, fe_type):
ndim = fe_type.ndim
members = [
('meminfo', types.MemInfoPointer(fe_type.dtype)),
('parent', types.pyobject),
('nitems', types.intp),
('itemsize', types.intp),
('data', types.CPointer(fe_type.dtype)),
('shape', types.UniTuple(types.intp, ndim)),
('strides', types.UniTuple(types.intp, ndim)),
('extra_field', types.intp),
]
super(MyArrayTypeModel, self).__init__(dmm, fe_type, members)
@type_callable(MyArray)
def type_myarray(context):
def typer(shape, dtype, buf):
out = MyArrayType(
dtype=buf.dtype, ndim=len(shape), layout=buf.layout
)
return out
return typer
@lower_builtin(MyArray, types.UniTuple, types.DType, types.Array)
def impl_myarray(context, builder, sig, args):
from numba.np.arrayobj import make_array, populate_array
srcaryty = sig.args[-1]
shape, dtype, buf = args
srcary = make_array(srcaryty)(context, builder, value=buf)
# Copy source array and remove the parent field to avoid boxer re-using
# the original ndarray instance.
retary = make_array(sig.return_type)(context, builder)
populate_array(retary,
data=srcary.data,
shape=srcary.shape,
strides=srcary.strides,
itemsize=srcary.itemsize,
meminfo=srcary.meminfo)
ret = retary._getvalue()
context.nrt.incref(builder, sig.return_type, ret)
return ret
@box(MyArrayType)
def box_array(typ, val, c):
assert c.context.enable_nrt
np_dtype = numpy_support.as_dtype(typ.dtype)
dtypeptr = c.env_manager.read_const(c.env_manager.add_const(np_dtype))
newary = c.pyapi.nrt_adapt_ndarray_to_python(typ, val, dtypeptr)
# Steals NRT ref
c.context.nrt.decref(c.builder, typ, val)
return newary
@overload_classmethod(MyArrayType, "_allocate")
def _ol_array_allocate(cls, allocsize, align):
"""Implements a Numba-only classmethod on the array type.
"""
def impl(cls, allocsize, align):
log("LOG _ol_array_allocate", allocsize, align)
return allocator_MyArray(allocsize, align)
return impl
@intrinsic
def allocator_MyArray(typingctx, allocsize, align):
def impl(context, builder, sig, args):
context.nrt._require_nrt()
size, align = args
mod = builder.module
u32 = ir.IntType(32)
voidptr = cgutils.voidptr_t
get_alloc_fnty = ir.FunctionType(voidptr, ())
get_alloc_fn = cgutils.get_or_insert_function(
mod, get_alloc_fnty, name="_nrt_get_sample_external_allocator"
)
ext_alloc = builder.call(get_alloc_fn, ())
fnty = ir.FunctionType(voidptr, [cgutils.intp_t, u32, voidptr])
fn = cgutils.get_or_insert_function(
mod, fnty, name="NRT_MemInfo_alloc_safe_aligned_external"
)
fn.return_value.add_attribute("noalias")
if isinstance(align, builtins.int):
align = context.get_constant(types.uint32, align)
else:
assert align.type == u32, "align must be a uint32"
call = builder.call(fn, [size, align, ext_alloc])
call.name = "allocate_MyArray"
return call
mip = types.MemInfoPointer(types.voidptr) # return untyped pointer
sig = typing.signature(mip, allocsize, align)
return sig, impl
| MyArrayTypeModel |
python | getsentry__sentry | src/sentry/issues/status_change_message.py | {
"start": 512,
"end": 1259
} | class ____:
fingerprint: Sequence[str]
project_id: int
new_status: int
new_substatus: int | None
detector_id: int | None = None
activity_data: dict[str, Any] | None = None
update_date: datetime | None = None
id: str = field(default_factory=lambda: uuid4().hex)
def to_dict(
self,
) -> StatusChangeMessageData:
return {
"fingerprint": self.fingerprint,
"project_id": self.project_id,
"new_status": self.new_status,
"new_substatus": self.new_substatus,
"detector_id": self.detector_id,
"activity_data": self.activity_data,
"update_date": self.update_date,
"id": self.id,
}
| StatusChangeMessage |
python | kamyu104__LeetCode-Solutions | Python/split-strings-by-separator.py | {
"start": 42,
"end": 304
} | class ____(object):
def splitWordsBySeparator(self, words, separator):
"""
:type words: List[str]
:type separator: str
:rtype: List[str]
"""
return [w for word in words for w in word.split(separator) if w]
| Solution |
python | pytorch__pytorch | test/quantization/eager/test_quantize_eager_qat.py | {
"start": 10872,
"end": 28859
} | class ____(QuantizationTestCase):
def setUp(self):
super().setUp()
self.embed_linear_data_train = [
[
torch.randint(0, 10, (12, 12), dtype=torch.long),
torch.randn((12, 1), dtype=torch.float),
]
for _ in range(2)
]
self.embed_data = [[torch.randint(0, 10, (12, 1))]]
def test_manual(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = ManualLinearQATModel(qengine)
model = prepare_qat(model)
self.checkObservers(model)
test_only_train_fn(model, self.train_data)
model = convert(model)
def checkQuantized(model):
self.assertEqual(type(model.fc1), nnq.Linear)
self.assertEqual(type(model.fc2), nnq.Linear)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
checkQuantized(model)
model = quantize_qat(
ManualLinearQATModel(qengine), test_only_train_fn, [self.train_data]
)
checkQuantized(model)
def test_dropout(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = ManualDropoutQATModel(qengine)
model = prepare_qat(model)
self.checkObservers(model)
test_only_train_fn(model, self.train_data)
model = convert(model)
def checkQuantized(model):
self.assertEqual(type(model.fc1), nnq.Linear)
self.assertEqual(type(model.dropout), nnq.Dropout)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
checkQuantized(model)
model = quantize_qat(
ManualDropoutQATModel(qengine),
test_only_train_fn,
[self.train_data],
)
checkQuantized(model)
def test_eval_only_fake_quant(self):
r"""Using FakeQuant in evaluation only mode,
this is useful for estimating accuracy loss when we quantize the
network
"""
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = ManualLinearQATModel(qengine)
model = prepare_qat(model)
self.checkObservers(model)
model.eval()
test_only_eval_fn(model, self.calib_data)
def test_conv_linear(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = ManualConvLinearQATModel()
model = prepare_qat(model)
self.checkObservers(model)
test_only_train_fn(model, self.img_data_2d_train)
model = convert(model)
def checkQuantized(model):
self.assertEqual(type(model.conv), nnq.Conv2d)
self.assertEqual(type(model.fc1), nnq.Linear)
self.assertEqual(type(model.fc2), nnq.Linear)
test_only_eval_fn(model, self.img_data_2d)
self.checkScriptable(model, self.img_data_2d)
self.checkNoQconfig(model)
checkQuantized(model)
model = ManualConvLinearQATModel()
model = quantize_qat(
model, test_only_train_fn, [self.img_data_2d_train]
)
checkQuantized(model)
@skipIfNoXNNPACK
def test_conv_linear_symm(self):
r"""Same as test_conv_linear but with Symmetric quantization.
Supported only with qengine=qnnpack, which uses symmetric
kernels from xnnpack library."""
for qengine in supported_qengines:
if qengine != "qnnpack":
continue
with override_quantized_engine(qengine):
model = ManualConvLinearSymmQATModel()
model = prepare_qat(model)
self.checkObservers(model)
test_only_train_fn(model, self.img_data_2d_train)
model = convert(model)
def checkQuantized(model):
self.assertEqual(type(model.conv), nnq.Conv2d)
self.assertEqual(type(model.fc1), nnq.Linear)
self.assertEqual(type(model.fc2), nnq.Linear)
test_only_eval_fn(model, self.img_data_2d)
self.checkScriptable(model, self.img_data_2d)
self.checkNoQconfig(model)
checkQuantized(model)
model = ManualConvLinearSymmQATModel()
model = quantize_qat(
model, test_only_train_fn, [self.img_data_2d_train]
)
checkQuantized(model)
def test_dynamic_qat_linear(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
# Dynamic QAT without memoryless observers should fail
with self.assertRaisesRegex(
ValueError,
"Dynamic QAT requires a memoryless observer."
+ "This means a MovingAverage observer with averaging constant equal to 1",
):
model = ManualLinearDynamicQATModel(default_qat_qconfig)
model = prepare_qat(model, mapping={torch.nn.Linear: nnqatd.Linear})
model = ManualLinearDynamicQATModel()
model = prepare_qat(model, mapping={torch.nn.Linear: nnqatd.Linear})
self.assertEqual(type(model.fc1), nnqatd.Linear)
self.assertEqual(type(model.fc2), nnqatd.Linear)
self.checkObservers(model)
test_only_train_fn(model, self.train_data)
model = convert(model, mapping={nnqatd.Linear: nnqd.Linear})
self.assertEqual(type(model.fc1), nnqd.Linear)
self.assertEqual(type(model.fc2), nnqd.Linear)
test_only_eval_fn(model, self.calib_data)
self.checkScriptable(model, self.calib_data)
self.checkNoQconfig(model)
def test_defused_embedding_bag_linear(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = DeFusedEmbeddingBagLinear().train()
model = prepare_qat(model, mapping=get_embedding_qat_module_mappings())
self.checkObservers(model)
test_only_train_fn(model, self.embed_linear_data_train)
# make sure activation_post_process is inserted after Linear.
self.assertEqual(
type(model.linear.activation_post_process),
FusedMovingAvgObsFakeQuantize,
)
# make sure that Embedding has a noop for activation.
self.assertEqual(type(model.emb.activation_post_process), NoopObserver)
# make sure that FakeQuant zero_points are correct dtype
self.assertEqual(
model.emb.weight_fake_quant.zero_point.dtype, torch.float32
)
self.assertEqual(
model.linear.weight_fake_quant.zero_point.dtype, torch.int32
)
model = convert(
model, mapping=get_embedding_static_quant_module_mappings()
)
def checkQuantized(model):
# make sure Embedding is now a QuantizedEmbedding
self.assertEqual(type(model.emb), nn.quantized.Embedding)
# make sure Linear is now a QuantizedLinear
self.assertEqual(type(model.linear), nn.quantized.Linear)
test_only_eval_fn(model, self.embed_data)
self.checkScriptable(model, self.embed_data)
self.checkNoQconfig(model)
checkQuantized(model)
def test_embedding_bag_linear(self):
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = ManualEmbeddingBagLinear().train()
model = prepare_qat(model, mapping=get_embedding_qat_module_mappings())
self.checkObservers(model)
test_only_train_fn(model, self.embed_linear_data_train)
# make sure not activation_post_process is inserted for EmbeddingBag
self.assertFalse(hasattr(model, "activation_post_process"))
# make sure that FakeQuant zero_points are correct dtype
self.assertEqual(
model.emb.weight_fake_quant.zero_point.dtype, torch.float32
)
self.assertEqual(
model.linear.weight_fake_quant.zero_point.dtype, torch.int32
)
model = convert(
model, mapping=get_embedding_static_quant_module_mappings()
)
def checkQuantized(model):
# Make sure EmbeddingBag is now a quantized EmbeddingBag.
self.assertTrue(type(model.emb), nn.quantized.EmbeddingBag)
# Also test that Linear has been quantized.
self.assertTrue(type(model.linear), nnq.Linear)
test_only_eval_fn(model, self.embed_data)
self.checkScriptable(model, self.embed_data)
self.checkNoQconfig(model)
checkQuantized(model)
model = ManualEmbeddingBagLinear()
def test_train_save_load_eval(self):
r"""Test QAT flow of creating a model, doing QAT and saving the quantized state_dict
During eval, we first call prepare_qat and convert on the model and then load the state_dict
and compare results against original model
"""
for qengine in supported_qengines:
with override_quantized_engine(qengine):
model = TwoLayerLinearModel()
model = torch.ao.quantization.QuantWrapper(model)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine)
model = prepare_qat(model)
fq_state_dict = model.state_dict()
test_only_train_fn(model, self.train_data)
model = convert(model)
quant_state_dict = model.state_dict()
x = torch.rand(2, 5, dtype=torch.float)
ref = model(x)
# Create model again for eval. Check result using quantized state_dict
model = TwoLayerLinearModel()
model = torch.ao.quantization.QuantWrapper(model)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine)
torch.ao.quantization.prepare_qat(model, inplace=True)
new_state_dict = model.state_dict()
# Check to make sure the model after prepare_qat has the same state_dict as original.
self.assertEqual(set(fq_state_dict.keys()), set(new_state_dict.keys()))
torch.ao.quantization.convert(model, inplace=True)
model.eval()
model.load_state_dict(quant_state_dict)
out = model(x)
self.assertEqual(ref, out)
# Check model created using prepare has same state dict as quantized state_dict
model = TwoLayerLinearModel()
model.eval()
model = torch.ao.quantization.QuantWrapper(model)
model.qconfig = torch.ao.quantization.get_default_qconfig(qengine)
torch.ao.quantization.prepare(model, inplace=True)
torch.ao.quantization.convert(model, inplace=True)
self.assertEqual(
set(model.state_dict().keys()), set(quant_state_dict.keys())
)
model.eval()
model.load_state_dict(quant_state_dict)
out = model(x)
self.assertEqual(ref, out)
@override_qengines
def test_forward_hooks_preserved(self):
r"""Test QAT on preserving pre forward and post forward hooks of original model"""
qengine = torch.backends.quantized.engine
model = QuantStubModel()
counter = {
"pre_forwards": 0,
"forwards": 0,
}
def fw_pre_hook(h_module, input):
counter["pre_forwards"] += 1
def fw_hook(h_module, input, output):
counter["forwards"] += 1
model.fc.register_forward_pre_hook(fw_pre_hook)
model.fc.register_forward_hook(fw_hook)
model.qconfig = torch.ao.quantization.get_default_qat_qconfig(qengine)
model = prepare_qat(model)
def checkHooksIsPresent(model, before_convert=True):
forward_hooks = 1
if before_convert:
self.assertEqual(
len(model.quant._forward_hooks.values()),
1,
"Quantization observer hook has disappeared",
)
forward_hooks = 2
self.assertObjectIn(fw_pre_hook, model.fc._forward_pre_hooks.values())
self.assertObjectIn(fw_hook, model.fc._forward_hooks.values())
self.assertEqual(
len(model.fc._forward_pre_hooks.values()),
1,
"Extra pre forward hooks have appeared on a layer",
)
self.assertEqual(
len(model.fc._forward_hooks.values()),
forward_hooks,
"Extra post forward hooks have appeared on a layer",
)
checkHooksIsPresent(model, True)
x = torch.rand(2, 5, dtype=torch.float)
model(x)
torch.ao.quantization.convert(model, inplace=True)
checkHooksIsPresent(model, False)
def test_add_scalar_uses_input_qparams(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.quant = torch.ao.quantization.QuantStub()
self.ff = torch.ao.nn.quantized.FloatFunctional()
def forward(self, x):
x = self.quant(x)
x = self.ff.add_scalar(x, 1.0)
return x
m = M()
m.qconfig = torch.ao.quantization.default_qconfig
mp = torch.ao.quantization.prepare_qat(m)
mp(torch.randn(4, 4))
mq = torch.ao.quantization.convert(mp)
res = mq(torch.randn(4, 4))
eps = 1e-5
self.assertTrue(torch.abs(mq.quant.scale - res.q_scale()) < eps)
def test_mul_scalar_uses_input_qparams(self):
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.quant = torch.ao.quantization.QuantStub()
self.ff = torch.ao.nn.quantized.FloatFunctional()
def forward(self, x):
x = self.quant(x)
x = self.ff.mul_scalar(x, 2.0)
return x
m = M()
m.qconfig = torch.ao.quantization.default_qconfig
mp = torch.ao.quantization.prepare_qat(m)
mp(torch.randn(4, 4))
mq = torch.ao.quantization.convert(mp)
res = mq(torch.randn(4, 4))
eps = 1e-5
self.assertTrue(torch.abs(mq.quant.scale * 2 - res.q_scale()) < eps)
@override_qengines
def test_qat_embedding_bag_errors(self):
default_qat_qconfig = get_default_qat_qconfig(torch.backends.quantized.engine)
# Test constructor parameters checks here.
with self.assertRaisesRegex(
AssertionError, "qconfig must be provided for QAT module"
):
nnqat.EmbeddingBag(10, 5, qconfig=None)
with self.assertRaisesRegex(
AssertionError,
"Embedding Bag weights requires a qscheme of "
+ "torch.per_channel_affine_float_qparams",
):
nnqat.EmbeddingBag(10, 5, qconfig=default_qat_qconfig)
# Test from_float checks here.
embed = nn.Embedding(10, 5)
with self.assertRaisesRegex(
AssertionError, "qat.EmbeddingBag.from_float only works for EmbeddingBag"
):
nnqat.EmbeddingBag.from_float(embed)
embed_bag = nn.EmbeddingBag(10, 5)
with self.assertRaisesRegex(
AssertionError, "Input float module must have qconfig defined"
):
nnqat.EmbeddingBag.from_float(embed_bag)
embed_bag.qconfig = None
with self.assertRaisesRegex(
AssertionError, "Input float module must have a valid qconfig"
):
nnqat.EmbeddingBag.from_float(embed_bag)
embed_bag.qconfig = default_qat_qconfig
with self.assertRaisesRegex(
AssertionError,
"Embedding Bag weights requires a qscheme of "
+ "torch.per_channel_affine_float_qparams",
):
nnqat.EmbeddingBag.from_float(embed_bag)
def test_embedding_qat_qconfig_equal(self):
# Embedding QAT uses a NoopObserver class for activation,
# and a FakeQuant for weight, make sure that qconfig comparison
# functions properly for a mix of partial function and class in
# qconfig.
model = ManualEmbeddingBagLinear().train()
model = prepare_qat(model)
self.assertTrue(
qconfig_equals(model.emb.qconfig, default_embedding_qat_qconfig)
)
| TestQuantizeEagerQAT |
python | Pylons__pyramid | tests/test_router.py | {
"start": 60806,
"end": 60937
} | class ____:
def __call__(self, info, request):
return True
def text(self):
return 'predicate'
| DummyPredicate |
python | ray-project__ray | rllib/core/testing/torch/bc_module.py | {
"start": 489,
"end": 1699
} | class ____(TorchRLModule):
def setup(self):
input_dim = self.observation_space.shape[0]
hidden_dim = self.model_config["fcnet_hiddens"][0]
output_dim = self.action_space.n
self.policy = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, output_dim),
)
self.input_dim = input_dim
def get_train_action_dist_cls(self):
return TorchCategorical
def get_exploration_action_dist_cls(self):
return TorchCategorical
def get_inference_action_dist_cls(self):
return TorchCategorical
@override(RLModule)
def _forward_inference(self, batch: Dict[str, Any]) -> Dict[str, Any]:
with torch.no_grad():
return self._forward_train(batch)
@override(RLModule)
def _forward_exploration(self, batch: Dict[str, Any]) -> Dict[str, Any]:
with torch.no_grad():
return self._forward_train(batch)
@override(RLModule)
def _forward_train(self, batch: Dict[str, Any]) -> Dict[str, Any]:
action_logits = self.policy(batch["obs"])
return {Columns.ACTION_DIST_INPUTS: action_logits}
| DiscreteBCTorchModule |
python | pennersr__django-allauth | allauth/headless/constants.py | {
"start": 254,
"end": 1003
} | class ____(str, Enum):
LOGIN = "login"
LOGIN_BY_CODE = AccountLoginStageKey.LOGIN_BY_CODE.value
MFA_AUTHENTICATE = MFALoginStageKey.MFA_AUTHENTICATE.value
MFA_LOGIN_WEBAUTHN = "mfa_login_webauthn"
MFA_REAUTHENTICATE = "mfa_reauthenticate"
MFA_SIGNUP_WEBAUTHN = MFALoginStageKey.MFA_SIGNUP_WEBAUTHN.value
MFA_TRUST = MFALoginStageKey.MFA_TRUST.value
PASSWORD_RESET_BY_CODE = "password_reset_by_code" # nosec
PROVIDER_REDIRECT = "provider_redirect"
PROVIDER_SIGNUP = "provider_signup"
PROVIDER_TOKEN = "provider_token" # nosec
REAUTHENTICATE = "reauthenticate"
SIGNUP = "signup"
VERIFY_EMAIL = AccountLoginStageKey.VERIFY_EMAIL.value
VERIFY_PHONE = AccountLoginStageKey.VERIFY_PHONE.value
| Flow |
python | apache__airflow | airflow-core/src/airflow/cli/commands/standalone_command.py | {
"start": 1754,
"end": 9715
} | class ____:
"""
Runs all components of Airflow under a single parent process.
Useful for local development.
"""
@classmethod
def entrypoint(cls, args):
"""CLI entrypoint, called by the main CLI system."""
StandaloneCommand().run()
def __init__(self):
self.subcommands = {}
self.output_queue = deque()
self.ready_time = None
self.ready_delay = 3
@providers_configuration_loaded
def run(self):
self.print_output("standalone", "Starting Airflow Standalone")
# Silence built-in logging at INFO
logging.getLogger("").setLevel(logging.WARNING)
# Startup checks and prep
env = self.calculate_env()
self.find_user_info()
self.initialize_database()
# Set up commands to run
self.subcommands["scheduler"] = SubCommand(
self,
name="scheduler",
command=["scheduler"],
env=env,
)
self.subcommands["dag-processor"] = SubCommand(
self,
name="dag-processor",
command=["dag-processor"],
env=env,
)
self.subcommands["api-server"] = SubCommand(
self,
name="api-server",
command=["api-server"],
env=env,
)
self.subcommands["triggerer"] = SubCommand(
self,
name="triggerer",
command=["triggerer"],
env=env,
)
# Run subcommand threads
for command in self.subcommands.values():
command.start()
# Run output loop
shown_ready = False
try:
while True:
# Print all the current lines onto the screen
self.update_output()
# Print info banner when all components are ready and the
# delay has passed
if not self.ready_time and self.is_ready():
self.ready_time = time.monotonic()
if (
not shown_ready
and self.ready_time
and time.monotonic() - self.ready_time > self.ready_delay
):
self.print_ready()
shown_ready = True
# Ensure we idle-sleep rather than fast-looping
time.sleep(0.1)
except KeyboardInterrupt:
pass
# Stop subcommand threads
self.print_output("standalone", "Shutting down components")
for command in self.subcommands.values():
command.stop()
for command in self.subcommands.values():
command.join()
self.print_output("standalone", "Complete")
def update_output(self):
"""Drains the output queue and prints its contents to the screen."""
while self.output_queue:
# Extract info
name, line = self.output_queue.popleft()
# Make line printable
line_str = line.decode("utf8").strip()
self.print_output(name, line_str)
def print_output(self, name: str, output):
"""
Print an output line with name and colouring.
You can pass multiple lines to output if you wish; it will be split for you.
"""
color: dict[str, str] = {
"api-server": "magenta",
"scheduler": "blue",
"dag-processor": "yellow",
"triggerer": "cyan",
"standalone": "white",
}
colorised_name = colored(
f"{name:10}", color.get(name, "white"), no_color=NO_COLOR, force_color=FORCE_COLOR
)
for line in output.splitlines():
print(f"{colorised_name} | {line.strip()}")
def print_error(self, name: str, output):
"""
Print an error message to the console.
This is the same as print_output but with the text red
"""
self.print_output(name, colored(output, "red"))
def calculate_env(self):
"""
Works out the environment variables needed to run subprocesses.
We override some settings as part of being standalone.
"""
env = dict(os.environ)
# Make sure we're using a local executor flavour
executor_class, _ = ExecutorLoader.import_default_executor_cls()
if not executor_class.is_local:
self.print_output("standalone", "Forcing executor to LocalExecutor")
env["AIRFLOW__CORE__EXECUTOR"] = executor_constants.LOCAL_EXECUTOR
# Make sure we're using SimpleAuthManager
simple_auth_manager_classpath = (
"airflow.api_fastapi.auth.managers.simple.simple_auth_manager.SimpleAuthManager"
)
if conf.get("core", "auth_manager") != simple_auth_manager_classpath:
self.print_output("standalone", "Forcing auth manager to SimpleAuthManager")
env["AIRFLOW__CORE__AUTH_MANAGER"] = simple_auth_manager_classpath
os.environ["AIRFLOW__CORE__AUTH_MANAGER"] = simple_auth_manager_classpath # also in this process!
return env
def find_user_info(self):
if conf.get("core", "simple_auth_manager_all_admins").lower() == "true":
# If we have no auth anyways, no need to print or do anything
return
if conf.get("core", "simple_auth_manager_users") != "admin:admin":
self.print_output(
"standalone",
"Not outputting user passwords - `[core] simple_auth_manager_users` is already set.",
)
return
am = create_auth_manager()
password_file = am.get_generated_password_file()
if os.path.exists(password_file):
self.print_output(
"standalone",
f"Password for the admin user has been previously generated in {password_file}. Not echoing it here.",
)
return
# this generates the password and prints it
am.init()
def initialize_database(self):
"""Make sure all the tables are created."""
# Set up DB tables
self.print_output("standalone", "Checking database is initialized")
db.initdb()
self.print_output("standalone", "Database ready")
def is_ready(self):
"""
Detect when all Airflow components are ready to serve.
For now, it's simply time-based.
"""
return (
self.job_running(SchedulerJobRunner)
and self.job_running(DagProcessorJobRunner)
and self.job_running(TriggererJobRunner)
)
def port_open(self, port):
"""
Check if the given port is listening on the local machine.
Used to tell if webserver is alive.
"""
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.connect(("127.0.0.1", port))
sock.close()
except (OSError, ValueError):
# Any exception means the socket is not available
return False
return True
def job_running(self, job_runner_class: type[BaseJobRunner]):
"""
Check if the given job name is running and heartbeating correctly.
Used to tell if a component is alive.
"""
recent = most_recent_job(job_runner_class.job_type)
if not recent:
return False
return recent.is_alive()
def print_ready(self):
"""
Print the banner shown when Airflow is ready to go.
Include with login details.
"""
self.print_output("standalone", "")
self.print_output("standalone", "Airflow is ready")
self.print_output(
"standalone",
"Airflow Standalone is for development purposes only. Do not use this in production!",
)
self.print_output("standalone", "")
| StandaloneCommand |
python | doocs__leetcode | solution/1100-1199/1135.Connecting Cities With Minimum Cost/Solution.py | {
"start": 0,
"end": 557
} | class ____:
def minimumCost(self, n: int, connections: List[List[int]]) -> int:
def find(x):
if p[x] != x:
p[x] = find(p[x])
return p[x]
connections.sort(key=lambda x: x[2])
p = list(range(n))
ans = 0
for x, y, cost in connections:
x, y = x - 1, y - 1
if find(x) == find(y):
continue
p[find(x)] = find(y)
ans += cost
n -= 1
if n == 1:
return ans
return -1
| Solution |
python | PyCQA__pylint | tests/functional/s/super/super_with_arguments.py | {
"start": 273,
"end": 365
} | class ____(Foo):
def __init__(self):
super.test(Bar, self).__init__()
| NotSuperCall |
python | ansible__ansible | lib/ansible/module_utils/_internal/_patches/_dataclass_annotation_patch.py | {
"start": 306,
"end": 2225
} | class ____(CallablePatch):
"""Patch broken ClassVar support in dataclasses when ClassVar is accessed via a module other than `typing`."""
target_container: t.ClassVar = dataclasses
target_attribute = '_is_type'
@classmethod
def is_patch_needed(cls) -> bool:
@dataclasses.dataclass
class CheckClassVar:
# this is the broken case requiring patching: ClassVar dot-referenced from a module that is not `typing` is treated as an instance field
# DTFIX-FUTURE: file/link CPython bug report, deprecate this patch if/when it's fixed in CPython
a_classvar: _ts.ClassVar[int] # type: ignore[name-defined]
a_field: int
return len(dataclasses.fields(CheckClassVar)) != 1
def __call__(self, annotation, cls, a_module, a_type, is_type_predicate) -> bool:
"""
This is a patched copy of `_is_type` from dataclasses.py in Python 3.13.
It eliminates the redundant source module reference equality check for the ClassVar type that triggers the bug.
"""
match = dataclasses._MODULE_IDENTIFIER_RE.match(annotation) # type: ignore[attr-defined]
if match:
ns = None
module_name = match.group(1)
if not module_name:
# No module name, assume the class's module did
# "from dataclasses import InitVar".
ns = sys.modules.get(cls.__module__).__dict__
else:
# Look up module_name in the class's module.
module = sys.modules.get(cls.__module__)
if module and module.__dict__.get(module_name): # this is the patched line; removed `is a_module`
ns = sys.modules.get(a_type.__module__).__dict__
if ns and is_type_predicate(ns.get(match.group(2)), a_module):
return True
return False
| DataclassesIsTypePatch |
python | great-expectations__great_expectations | tests/expectations/test_condition_validators.py | {
"start": 5475,
"end": 6663
} | class ____:
@pytest.mark.parametrize(
"operator_func",
[
pytest.param(
lambda col: col == None, # noqa: E711 # testing invalid syntax
id="eq - Linting error - `is None` is pythonic, "
"but that only compares to singleton instance",
),
pytest.param(
lambda col: col != None, # noqa: E711 # testing invalid syntax
id="ne - Linting error - `is None` is pythonic, "
"but that only compares to singleton instance",
),
pytest.param(lambda col: col < None, id="lt - Nonsense"),
pytest.param(lambda col: col <= None, id="le - Nonsense"),
pytest.param(lambda col: col > None, id="gt - Nonsense"),
pytest.param(lambda col: col >= None, id="ge - Nonsense"),
],
)
def test_column_operators_with_none_raises_error(self, operator_func):
"""Test that Column operators with None parameter raise InvalidParameterTypeError."""
col = Column("status")
with pytest.raises(ValidationError):
operator_func(col)
| TestComparisonConditionValidators |
python | davidhalter__jedi | jedi/inference/names.py | {
"start": 23156,
"end": 23214
} | class ____(StubNameMixin, ModuleName):
pass
| StubModuleName |
python | django__django | tests/messages_tests/test_middleware.py | {
"start": 134,
"end": 499
} | class ____(unittest.TestCase):
def test_response_without_messages(self):
"""
MessageMiddleware is tolerant of messages not existing on request.
"""
request = HttpRequest()
response = HttpResponse()
MessageMiddleware(lambda req: HttpResponse()).process_response(
request, response
)
| MiddlewareTests |
python | tensorflow__tensorflow | tensorflow/compiler/tests/special_math_test.py | {
"start": 2136,
"end": 4875
} | class ____(xla_test.XLATestCase, parameterized.TestCase):
def setUp(self):
if flags.FLAGS.vary_seed:
entropy = os.urandom(64)
answer = int.from_bytes(entropy, 'big')
np.random.seed(answer % (2**32 - 1))
super(Log1pTest, self).setUp()
def adjust_tolerance_for_tpu(self, dtype, rtol, atol):
if self.device not in ['TPU']:
return rtol, atol
if dtype == np.float32:
return 4e-4, 0.
return 1e-10, 0.
def _test_range(self, low, high, dtype, rtol, atol, is_negative=False):
# Test values near zero.
rtol, atol = self.adjust_tolerance_for_tpu(dtype, rtol, atol)
x = np.exp(np.random.uniform(
low=low, high=high, size=[NUM_SAMPLES])).astype(dtype)
if is_negative:
x = -x
expected_values = np.log1p(x)
with self.session() as sess:
with self.test_scope():
actual = _log1p(x)
actual = sess.run(actual)
self.assertAllClose(expected_values, actual, atol=atol, rtol=rtol)
@parameterized.parameters((np.float32, 1e-7, 0.),
(np.float64, 1e-15, 0.))
def testSmallX(self, dtype, rtol, atol):
self._test_range(-40., -20., dtype, rtol, atol, is_negative=False)
self._test_range(-40., -20., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 2e-7, 0.),
(np.float64, 1e-15, 0.))
def testGreaterThanNegativeTwentyExponent(self, dtype, rtol, atol):
self._test_range(-20., -10., dtype, rtol, atol, is_negative=False)
self._test_range(-20., -10., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 2e-7, 0.),
(np.float64, 1e-15, 0.))
def testGreaterThanNegativeTenExponent(self, dtype, rtol, atol):
self._test_range(-10., -5., dtype, rtol, atol, is_negative=False)
self._test_range(-10., -5., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 2e-7, 0.),
(np.float64, 1e-15, 0.))
def testGreaterThanNegativeFiveExponent(self, dtype, rtol, atol):
self._test_range(-5., -1., dtype, rtol, atol, is_negative=False)
self._test_range(-5., -1., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 4e-7, 0.),
(np.float64, 3e-14, 0.))
def testXGreaterThanOneTenth(self, dtype, rtol, atol):
self._test_range(-1., 0., dtype, rtol, atol, is_negative=False)
self._test_range(-1., 0., dtype, rtol, atol, is_negative=True)
@parameterized.parameters((np.float32, 2e-7, 0.),
(np.float64, 2e-15, 0.))
def testXGreaterThanOne(self, dtype, rtol, atol):
self._test_range(0., 3., dtype, rtol, atol, is_negative=False)
| Log1pTest |
python | google__jax | tests/pallas/mosaic_gpu_test.py | {
"start": 113915,
"end": 158412
} | class ____(PallasSm100ATest):
def test_print_layout_tmem(self):
shape = (128, 256)
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct(shape, jnp.bfloat16),
scratch_shapes=[plgpu.TMEM(shape, jnp.bfloat16, packed=True)],
)
def kernel(o_ref, tmem_ref):
del o_ref
# Slicing TMEM to make sure we handle transforms correctly.
plgpu.print_layout("tmem: {}", tmem_ref.at[:, :128])
with self.capture_stdout() as output:
jax.block_until_ready(kernel())
self.assertIn("tmem: TMEM_DEFAULT(packing=2)\n", output())
def test_mixed_tmem_allocations_raise(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct((), jnp.float32),
scratch_shapes=[
plgpu.TMEM((128, 128), jnp.float32, collective=True),
plgpu.TMEM((128, 128), jnp.float32, collective=False),
],
)
def kernel(out_ref, tmem_ref0, tmem_ref1):
del out_ref, tmem_ref0, tmem_ref1
with self.assertRaisesRegex(
ValueError,
"Can't mix collective and non-collective TMEM allocations within the"
" same kernel.",
):
kernel()
def test_transposed_tmem_ref_raises(self):
@functools.partial(
self.pallas_call,
out_shape=jax.ShapeDtypeStruct([], jnp.float32),
scratch_shapes=[plgpu.TMEM((128, 128), jnp.float32)],
)
def kernel(out, tmem_ref):
del out
plgpu.transpose_ref(tmem_ref, (1, 0))
with self.assertRaisesRegex(ValueError, "Can't transpose a TMEM reference"):
kernel()
@parameterized.parameters((False,), (True,))
def test_tmem(self, collective):
transforms = self.default_transforms(dtype=jnp.float32)
@functools.partial(
self.kernel,
out_shape=jnp.zeros((128, 128), jnp.float32),
scratch_shapes=[
plgpu.TMEM((128, 128), jnp.float32, collective=collective),
plgpu.TMEM((128, 128), jnp.float32, collective=collective),
plgpu.SMEM((128, 128), jnp.float32, transforms=transforms),
plgpu.Barrier(),
],
num_threads=1,
thread_name="x",
cluster=(2,) if collective else (),
cluster_names=("x",) if collective else (),
)
def kernel(x_ref, y_ref, tmem_ref, tmem_ref2, smem_ref, barrier_ref):
plgpu.copy_gmem_to_smem(x_ref, smem_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
# Exercise TMEM by roundtripping SMEM -> TMEM -> TMEM -> SMEM.
x_val = plgpu.load(smem_ref, (), layout=plgpu.Layout.TCGEN05)
plgpu.async_store_tmem(tmem_ref, x_val + 1)
plgpu.commit_tmem()
# We don't await the load, because we never overwrite tmem_ref
tmem_read = plgpu.async_load_tmem(tmem_ref)
plgpu.async_store_tmem(tmem_ref2, tmem_read)
plgpu.commit_tmem()
# We don't await the load, because we never overwrite tmem_ref2
smem_ref[...] = plgpu.async_load_tmem(tmem_ref2)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(smem_ref, y_ref)
plgpu.wait_smem_to_gmem(0)
x = jax.random.uniform(
jax.random.key(0), shape=(128, 128), dtype=jnp.float32)
x_result = jax.block_until_ready(kernel(x))
np.testing.assert_array_equal(x_result, x + 1)
def test_tmem_allocation_estimation(self):
"""Make sure that we don't overestimate the TMEM allocation.
All of the refs below are packed and should fit into TMEM at once.
"""
transforms = self.default_transforms(dtype=jnp.bfloat16)
@functools.partial(
self.kernel,
out_shape=jnp.zeros((128, 256), jnp.bfloat16),
scratch_shapes=[
plgpu.TMEM((128, 256), jnp.bfloat16, packed=True),
plgpu.TMEM((128, 256), jnp.bfloat16, packed=True),
plgpu.TMEM((128, 256), jnp.bfloat16, packed=True),
plgpu.SMEM((128, 256), jnp.bfloat16, transforms=transforms),
plgpu.Barrier(),
],
num_threads=1,
thread_name="x",
)
def kernel(x_ref, y_ref, tmem_ref1, tmem_ref2, tmem_ref3, smem_ref, barrier_ref):
plgpu.copy_gmem_to_smem(x_ref, smem_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
x_val = plgpu.load(smem_ref, (), layout=plgpu.Layout.TCGEN05)
plgpu.async_store_tmem(tmem_ref1, x_val + 1)
plgpu.commit_tmem()
x_val = plgpu.async_load_tmem(tmem_ref1)
plgpu.async_store_tmem(tmem_ref2, x_val + 1)
plgpu.commit_tmem()
x_val = plgpu.async_load_tmem(tmem_ref2)
plgpu.async_store_tmem(tmem_ref3, x_val + 1)
plgpu.commit_tmem()
smem_ref[...] = plgpu.async_load_tmem(tmem_ref3)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(smem_ref, y_ref)
plgpu.wait_smem_to_gmem(0)
x = jax.random.uniform(jax.random.key(0), shape=(128, 256), dtype=jnp.bfloat16)
x_result = jax.block_until_ready(kernel(x))
np.testing.assert_array_equal(x_result, x + 3)
def test_tmem_ref_aliasing(self):
self.skip_if_wg_semantics()
transforms = self.default_transforms(dtype=jnp.float32)
@functools.partial(
self.kernel,
out_shape=jnp.zeros((128, 128), jnp.float32),
scratch_shapes=[
plgpu.RefUnion(
[plgpu.TMEM((128, 32), jnp.float32),
plgpu.TMEM((128, 32), jnp.float32)],
plgpu.TMEM((128, 64), jnp.float32),
),
plgpu.SMEM((128, 128), jnp.float32, transforms=transforms),
plgpu.Barrier(),
],
num_threads=1,
thread_name="x",
)
def kernel(x_ref, y_ref, aliased_ref, smem_ref, barrier_ref):
tmem_128x32a, tmem_128x32b, tmem_128x64 = aliased_ref
plgpu.copy_gmem_to_smem(x_ref, smem_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
# Test tmem_128x32 a and b
x_val = plgpu.load(smem_ref.at[:, 0:32], (), layout=plgpu.Layout.TCGEN05)
plgpu.async_store_tmem(tmem_128x32a, x_val + 1)
plgpu.commit_tmem()
smem_ref[:, 0:32] = plgpu.async_load_tmem(tmem_128x32a)
plgpu.wait_load_tmem() # Make sure the load is done before we write to TMEM again.
x_val = plgpu.load(smem_ref.at[:, 32:64], (), layout=plgpu.Layout.TCGEN05)
plgpu.async_store_tmem(tmem_128x32b, x_val + 1)
plgpu.commit_tmem()
smem_ref[:, 32:64] = plgpu.async_load_tmem(tmem_128x32b)
plgpu.wait_load_tmem() # Make sure the load is done before we write to TMEM again.
# Test tmem_128x64
x_val = plgpu.load(smem_ref.at[:, 64:128], (), layout=plgpu.Layout.TCGEN05)
plgpu.async_store_tmem(tmem_128x64, x_val + 1)
plgpu.commit_tmem()
smem_ref[:, 64:128] = plgpu.async_load_tmem(tmem_128x64)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(smem_ref, y_ref)
plgpu.wait_smem_to_gmem(0)
x = jax.random.uniform(
jax.random.key(0), shape=(128, 128), dtype=jnp.float32)
x_result = jax.block_until_ready(kernel(x))
np.testing.assert_array_equal(x_result, x + 1)
@parameterized.parameters(
plgpu.Layout.TCGEN05, plgpu.Layout.TCGEN05_TMEM_NATIVE
)
def test_tmem_load_layout(self, layout):
self.skip_if_wg_semantics() # TiledLayout replication not supported yet.
transforms = self.default_transforms(dtype=jnp.float32)
@functools.partial(
self.kernel,
out_shape=jax.ShapeDtypeStruct((128, 128), jnp.float32),
scratch_shapes=[
plgpu.TMEM((128, 128), jnp.float32),
plgpu.SMEM((128, 128), jnp.float32, transforms=transforms),
plgpu.Barrier(),
],
)
def kernel(x_ref, y_ref, tmem_ref, smem_ref, barrier_ref):
plgpu.copy_gmem_to_smem(x_ref, smem_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
optimized = layout != plgpu.Layout.TCGEN05_TMEM_NATIVE
x_val = plgpu.load(smem_ref, (), layout=layout, optimized=optimized)
plgpu.async_store_tmem(tmem_ref, x_val + 1)
plgpu.commit_tmem()
# We don't wait for the load to complete, because we never overwrite
# tmem_ref.
smem_ref[...] = plgpu.async_load_tmem(tmem_ref, layout=layout)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(smem_ref, y_ref)
plgpu.wait_smem_to_gmem(0)
x = jax.random.uniform(
jax.random.key(0), shape=(128, 128), dtype=jnp.float32)
x_result = jax.block_until_ready(kernel(x))
np.testing.assert_array_equal(x_result, x + 1)
@parameterized.parameters(
plgpu.Layout.TCGEN05_M64_COLLECTIVE(160),
plgpu.Layout.TCGEN05_M64_COLLECTIVE_NATIVE(160)
)
def test_tmem_store_load_collective(self, layout):
self.skip_if_wg_semantics() # TiledLayout replication not supported yet.
@functools.partial(
self.kernel,
out_shape=jax.ShapeDtypeStruct((64, 160), jnp.float32),
cluster=(2,),
cluster_names=("cluster",),
scratch_shapes=[
plgpu.TMEM(
(64, 160), jnp.float32, collective=True,
layout=plgpu.TMEMLayout.M64_COLLECTIVE_LAYOUT(160),
),
],
)
def kernel(x_ref, y_ref, tmem_ref):
x_val = plgpu.load(x_ref, (), layout=layout, optimized=False)
plgpu.async_store_tmem(tmem_ref, x_val + 1)
plgpu.commit_tmem()
# We don't wait for the load to complete, because we never overwrite
# tmem_ref.
y_ref[...] = plgpu.async_load_tmem(tmem_ref, layout=layout)
x = jax.random.uniform(
jax.random.key(0), shape=(64, 160), dtype=jnp.float32)
x_result = jax.block_until_ready(kernel(x))
np.testing.assert_array_equal(x_result, x + 1)
def test_tmem_column_slicing(self):
transforms = self.default_transforms(dtype=jnp.float32)
@functools.partial(
self.kernel,
out_shape=jax.ShapeDtypeStruct((128, 128), jnp.float32),
scratch_shapes=[
plgpu.TMEM((128, 256), jnp.float32),
plgpu.SMEM((128, 128), jnp.float32, transforms=transforms),
plgpu.Barrier(),
],
num_threads=1,
thread_name="x",
)
def kernel(x_ref, y_ref, tmem_ref, smem_ref, barrier_ref):
plgpu.copy_gmem_to_smem(x_ref, smem_ref, barrier_ref)
plgpu.barrier_wait(barrier_ref)
x_val = plgpu.load(smem_ref, (), layout=plgpu.Layout.TCGEN05)
tmem_slice = tmem_ref.at[:, 8:208].at[:, 0:128]
plgpu.async_store_tmem(tmem_slice, x_val + 1)
plgpu.commit_tmem()
smem_ref[...] = plgpu.async_load_tmem(tmem_ref.at[:, 8:136])
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(smem_ref, y_ref)
plgpu.wait_smem_to_gmem(0)
x = jax.random.uniform(
jax.random.key(0), shape=(128, 128), dtype=jnp.float32)
x_result = jax.block_until_ready(kernel(x))
np.testing.assert_array_equal(x_result, (x + 1)[:, 0:128])
@parameterized.product(
m=[64, 128],
n=[64, 128, 256],
swizzle=[64, 32],
dtype=[jnp.int8, jnp.uint8],
lhs_tmem=[False, True],
)
def test_integer_matmul(self, m, n, swizzle, dtype, lhs_tmem):
if n * jnp.dtype(dtype).itemsize <= swizzle:
self.skipTest("swizzle too big")
if lhs_tmem and m == 64:
self.skipTest("m=64 not supported for LHS in TMEM")
if lhs_tmem:
self.skip_if_wg_semantics() # Layout inference fails to find a solution.
k = 128
is_signed = jnp.issubdtype(dtype, jnp.signedinteger)
o_dtype = jnp.int32
in_transforms = self.default_transforms(dtype=dtype, swizzle=swizzle)
out_transforms = self.default_transforms(dtype=o_dtype)
def kernel(
a_smem, b_smem, out_ref, acc_tmem, scratch_smem, barrier_ref, a_tmem_ref
):
if lhs_tmem:
lhs_ref = a_tmem_ref
layout = plgpu.Layout.TCGEN05_TMEM_NATIVE(4)
plgpu.async_store_tmem(lhs_ref, plgpu.load(a_smem, (), layout=layout, optimized=False))
plgpu.commit_tmem()
else:
lhs_ref = a_smem
plgpu.tcgen05_mma(
acc_tmem, lhs_ref, b_smem, barrier_ref, accumulate=False
)
plgpu.barrier_wait(barrier_ref)
scratch_smem[...] = plgpu.async_load_tmem(acc_tmem)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(scratch_smem, out_ref)
plgpu.wait_smem_to_gmem(0)
scratch_shapes = [
plgpu.TMEM((m, n), o_dtype, packed=False),
plgpu.SMEM((m, n), o_dtype, transforms=out_transforms),
plgpu.Barrier(orders_tensor_core=True),
]
if lhs_tmem:
scratch_shapes.append(plgpu.TMEM((m, k), dtype, packed=True))
else:
scratch_shapes.append(None)
f = self.pallas_call(
kernel,
in_specs=(
plgpu.BlockSpec(transforms=in_transforms, memory_space=plgpu.SMEM),
plgpu.BlockSpec(transforms=in_transforms, memory_space=plgpu.SMEM),
),
out_specs=plgpu.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct((m, n), o_dtype),
scratch_shapes=scratch_shapes,
)
# use small values to avoid overflow, [0, 8) for u8 and (-8, 8) for s8
random_int_input = lambda key, shape: jax.random.randint(
key, minval=-8 * is_signed, maxval=8, shape=shape, dtype=dtype
)
x = random_int_input(jax.random.key(0), shape=(m, k))
y = random_int_input(jax.random.key(1), shape=(k, n))
result = f(x, y)
expected = x.astype(o_dtype) @ y.astype(o_dtype)
np.testing.assert_array_equal(result, expected)
@parameterized.product(m=[64, 128],
n=[64, 128, 256],
swizzle=[128, 64, 32],
dtype=[jnp.float16, jnp.bfloat16],
lhs_tmem=[False, True],
transpose_rhs=[False, True],
transpose_lhs=[False, True])
def test_simple_matmul(
self, m, n, swizzle, dtype, lhs_tmem, transpose_lhs, transpose_rhs
):
if transpose_lhs and lhs_tmem:
self.skipTest("TMEM transpose not supported")
if n * jnp.dtype(dtype).itemsize <= swizzle:
self.skipTest("swizzle too big")
if lhs_tmem and m == 64:
self.skipTest("m=64 not supported for LHS in TMEM")
k = 128
# Test a matmul with a single block.
transforms = self.default_transforms(dtype=dtype, swizzle=swizzle)
def kernel(a_smem, b_smem, out_ref, acc_tmem, scratch_smem, barrier_ref,
a_tmem_ref):
if transpose_lhs:
a_smem = plgpu.transpose_ref(a_smem, (1, 0))
if transpose_rhs:
b_smem = plgpu.transpose_ref(b_smem, (1, 0))
if lhs_tmem:
lhs_ref = a_tmem_ref
layout = plgpu.Layout.TCGEN05 if m == 128 else plgpu.Layout.WGMMA
plgpu.async_store_tmem(lhs_ref, plgpu.load(a_smem, (), layout=layout))
plgpu.commit_tmem()
else:
lhs_ref = a_smem
plgpu.tcgen05_mma(acc_tmem,
lhs_ref,
b_smem,
barrier_ref,
accumulate=False)
plgpu.barrier_wait(barrier_ref)
# We don't await the load because acc_tmem is never modified again.
scratch_smem[...] = plgpu.async_load_tmem(acc_tmem).astype(dtype)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(scratch_smem, out_ref)
plgpu.wait_smem_to_gmem(0)
scratch_shapes = [
plgpu.TMEM((m, n), jnp.float32, packed=False),
plgpu.SMEM((m, n), dtype, transforms=transforms),
plgpu.Barrier(orders_tensor_core=True),
]
if lhs_tmem:
scratch_shapes.append(plgpu.TMEM((m, k), dtype, packed=True))
else:
scratch_shapes.append(None)
f = self.pallas_call(
kernel,
in_specs=(
plgpu.BlockSpec(transforms=transforms, memory_space=plgpu.SMEM),
plgpu.BlockSpec(transforms=transforms, memory_space=plgpu.SMEM),
),
out_specs=plgpu.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct((m, n), dtype),
scratch_shapes=scratch_shapes,
)
lhs_shape = (k, m) if transpose_lhs else (m, k)
rhs_shape = (n, k) if transpose_rhs else (k, n)
x = jax.random.uniform(jax.random.key(0), shape=lhs_shape, dtype=dtype)
y = jax.random.uniform(jax.random.key(1), shape=rhs_shape, dtype=dtype)
result = f(x, y)
if transpose_lhs:
x = jnp.transpose(x, (1, 0))
if transpose_rhs:
y = jnp.transpose(y, (1, 0))
expected = x @ y
np.testing.assert_allclose(result, expected, rtol=1e-3)
def test_matmul_alignment(self):
m = k = n = 128
dtype = jnp.float16
transforms = self.default_transforms(dtype=dtype)
def kernel(a_smem, b_smem, out_ref, _, acc_tmem, barrier_ref):
plgpu.tcgen05_mma(acc_tmem, a_smem, b_smem, barrier_ref, accumulate=False)
plgpu.barrier_wait(barrier_ref)
# We don't await the load because acc_tmem is never modified again.
out_ref[...] = plgpu.async_load_tmem(acc_tmem).astype(dtype)
spec = plgpu.BlockSpec(transforms=transforms, memory_space=plgpu.SMEM)
f = self.pallas_call(
kernel,
in_specs=(spec, spec),
out_specs=spec,
out_shape=jax.ShapeDtypeStruct((m, n), dtype),
# Add a one column space to test if we align the accumulator.
scratch_shapes=(
plgpu.TMEM((128, 1), jnp.float32),
plgpu.TMEM((m, n), jnp.float32),
plgpu.Barrier(orders_tensor_core=True),
),
)
lhs_shape = (m, k)
rhs_shape = (k, n)
x = jax.random.uniform(jax.random.key(0), shape=lhs_shape, dtype=dtype)
y = jax.random.uniform(jax.random.key(1), shape=rhs_shape, dtype=dtype)
result = f(x, y)
expected = x @ y
np.testing.assert_allclose(result, expected, rtol=1e-3)
@parameterized.product(
m=[128],
n=[128, 256],
dtype=[jnp.float8_e5m2, jnp.float8_e4m3fn, jnp.float4_e2m1fn],
)
def test_simple_scaled_matmul(self, m, n, dtype):
self.skip_if_wg_semantics()
# TODO(apaszke): Add support for single-buffering in pallas_call.
causes_oom = jnp.finfo(dtype).bits == 8 and n == 256
k = 128 if causes_oom else 256
swizzle = 128
transforms = self.default_transforms(swizzle=swizzle, dtype=dtype)
out_transforms = self.default_transforms(dtype=jnp.float32)
def kernel(a_smem, b_smem, a_scale_smem, b_scale_smem, out_ref,
barrier_ref, acc_tmem, a_scale_tmem, b_scale_tmem):
plgpu.async_copy_scales_to_tmem(a_scale_smem, a_scale_tmem)
plgpu.async_copy_scales_to_tmem(b_scale_smem, b_scale_tmem)
# We don't have to await the copy because it's only used by the MMA.
plgpu.tcgen05_mma(acc_tmem,
a_smem,
plgpu.transpose_ref(b_smem, (1, 0)),
a_scale=a_scale_tmem,
b_scale=b_scale_tmem,
accumulate=False)
plgpu.tcgen05_commit_arrive(barrier_ref)
plgpu.barrier_wait(barrier_ref)
# We don't await the load because acc_tmem is never modified again.
out_ref[...] = plgpu.async_load_tmem(acc_tmem)
scratch_shapes = [
plgpu.Barrier(orders_tensor_core=True),
plgpu.TMEM((m, n), jnp.float32),
plgpu.TMEM((m, k // 32), jnp.float8_e8m0fnu, layout=plgpu.TMEMLayout.SCALES_LAYOUT),
plgpu.TMEM((n, k // 32), jnp.float8_e8m0fnu, layout=plgpu.TMEMLayout.SCALES_LAYOUT),
]
f = self.pallas_call(
kernel,
in_specs=(
plgpu.BlockSpec(memory_space=plgpu.SMEM, transforms=transforms),
plgpu.BlockSpec(memory_space=plgpu.SMEM, transforms=transforms),
plgpu.BlockSpec(memory_space=plgpu.SMEM),
plgpu.BlockSpec(memory_space=plgpu.SMEM),
),
out_shape=jax.ShapeDtypeStruct((m, n), jnp.float32),
out_specs=plgpu.BlockSpec(transforms=out_transforms),
scratch_shapes=scratch_shapes,
)
x = jax.random.uniform(jax.random.key(1), shape=(m, k), dtype=jnp.float32).astype(dtype)
y = jax.random.uniform(jax.random.key(2), shape=(n, k), dtype=jnp.float32).astype(dtype)
ksx, ksy = jax.random.split(jax.random.key(1234), 2)
x_scale = jax.lax.bitcast_convert_type(
jax.random.randint(ksx, (m, k // 32), 122, 132, dtype=jnp.uint8),
jnp.float8_e8m0fnu
)
y_scale = jax.lax.bitcast_convert_type(
jax.random.randint(ksy, (n, k // 32), 122, 132, dtype=jnp.uint8),
jnp.float8_e8m0fnu
)
def format_scales(scales):
mn, k = scales.shape
assert mn % 128 == 0 and k % 4 == 0
return (
scales.reshape(mn // 128, 4, 32, k // 4, 4)
.transpose(0, 3, 2, 1, 4)
.reshape(mn // 128, k // 4, 32, 16)
)
result = f(x, y, format_scales(x_scale), format_scales(y_scale))
x_logical_scale = jnp.repeat(x_scale, 32, axis=1).astype(jnp.float32)
y_logical_scale = jnp.repeat(y_scale, 32, axis=1).astype(jnp.float32)
expected = jnp.dot(
x.astype(jnp.float32) * x_logical_scale,
(y.astype(jnp.float32) * y_logical_scale).T,
)
np.testing.assert_allclose(result, expected, rtol=1e-3)
@parameterized.product(
m=[128],
n=[128, 256],
dtype=[jnp.float16],
)
def test_simple_sparse_matmul(self, m, n, dtype):
self.skip_if_wg_semantics()
k = 128
swizzle = 128 // jnp.dtype(dtype).itemsize
transforms = self.default_transforms(swizzle=swizzle, dtype=dtype)
out_transforms = self.default_transforms(dtype=jnp.float32)
def kernel(a_smem, b_smem, a_sparse_smem, out_ref,
barrier_ref, acc_tmem, a_sparse_tmem):
plgpu.async_copy_sparse_metadata_to_tmem(a_sparse_smem, a_sparse_tmem)
# We don't have to await the copy because it's only used by the MMA.
plgpu.tcgen05_mma(acc_tmem,
a_smem,
plgpu.transpose_ref(b_smem, (1, 0)),
a_sparse_metadata=a_sparse_tmem,
accumulate=False)
plgpu.tcgen05_commit_arrive(barrier_ref)
plgpu.barrier_wait(barrier_ref)
# We don't await the load because acc_tmem is never modified again.
out_ref[...] = plgpu.async_load_tmem(acc_tmem)
scratch_shapes = [
plgpu.Barrier(orders_tensor_core=True),
plgpu.TMEM((m, n), jnp.float32),
plgpu.TMEM((m, k // 2), jnp.uint2, layout=plgpu.TMEMLayout.SPARSE_METADATA_LAYOUT),
]
f = self.pallas_call(
kernel,
in_specs=(
plgpu.BlockSpec(memory_space=plgpu.SMEM, transforms=transforms),
plgpu.BlockSpec(memory_space=plgpu.SMEM, transforms=transforms),
plgpu.BlockSpec(memory_space=plgpu.SMEM),
),
out_shape=jax.ShapeDtypeStruct((m, n), jnp.float32),
out_specs=plgpu.BlockSpec(transforms=out_transforms),
scratch_shapes=scratch_shapes,
)
x = jax.random.uniform(jax.random.key(1), shape=(m, k // 2), dtype=dtype)
y = jax.random.uniform(jax.random.key(2), shape=(n, k), dtype=dtype)
index_pairs = np.asarray(np.meshgrid(range(4), range(4))).T.reshape(-1, 2)
valid_pairs = index_pairs[index_pairs[:, 0] < index_pairs[:, 1]]
assert len(valid_pairs) == 6
x_pairs = jax.random.randint(jax.random.key(1234), (m, k // 4), 0, 6, dtype=jnp.uint8)
x_sparse = valid_pairs[x_pairs]
assert x_sparse.shape == (m, k // 4, 2)
z = f(x, y, plgpu.format_tcgen05_sparse_metadata(x_sparse.astype(jnp.uint2)))
x_logical = np.zeros_like(x, shape=(m, k // 4, 4))
np.put_along_axis(x_logical, x_sparse, x.reshape(x_sparse.shape), axis=-1)
x_logical = x_logical.reshape(m, k)
ref = x_logical.astype(jnp.float32) @ y.T.astype(jnp.float32)
np.testing.assert_allclose(z, ref, atol=7e-5, rtol=5e-6)
@parameterized.parameters(
(128, jnp.float16)
)
def test_manual_tcgen05_commit_arrive(self, swizzle, dtype):
shape = (128, 128)
transforms = self.default_transforms(swizzle=swizzle, dtype=dtype)
def kernel(a_gmem, b_gmem, out_gmem,
a_smem, b_smem, out_smem, tma_barrier, mma_barrier, acc_tmem):
plgpu.copy_gmem_to_smem(a_gmem, a_smem, tma_barrier)
plgpu.barrier_wait(tma_barrier)
plgpu.copy_gmem_to_smem(b_gmem, b_smem, tma_barrier)
plgpu.barrier_wait(tma_barrier)
plgpu.commit_tmem()
# Don't pass a barrier directly into tcgen05_mma and arrive manually.
plgpu.tcgen05_mma(acc_tmem,
a_smem,
b_smem,
accumulate=False)
plgpu.tcgen05_commit_arrive(mma_barrier)
plgpu.barrier_wait(mma_barrier)
# We don't await the load because acc_tmem is never modified again.
out_smem[...] = plgpu.async_load_tmem(acc_tmem).astype(dtype)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(out_smem, out_gmem)
plgpu.wait_smem_to_gmem(0)
f = self.kernel(
kernel,
out_shape=jax.ShapeDtypeStruct(shape, dtype),
scratch_shapes=[
plgpu.SMEM(shape, dtype, transforms=transforms), # a_smem
plgpu.SMEM(shape, dtype, transforms=transforms), # b_smem
plgpu.SMEM(shape, dtype, transforms=transforms), # out_smem
plgpu.Barrier(), # tma_barrier
plgpu.Barrier(orders_tensor_core=True), # mma_barrier
plgpu.TMEM((128, 128), jnp.float32), # acc
],
)
x = jax.random.uniform(jax.random.key(0), shape=shape, dtype=dtype)
y = jax.random.uniform(jax.random.key(1), shape=shape, dtype=dtype)
result = f(x, y)
np.testing.assert_allclose(result, x @ y, rtol=1e-3)
def test_matmul_with_sliced_accumulator(self):
dtype = jnp.bfloat16
shape = (128, 128)
tmem_shape = (128, 2 * 128)
swizzle = 128
# Test a matmul with a single block.
transforms = self.default_transforms(swizzle=swizzle, dtype=dtype)
def kernel(a_smem, b_smem, out_ref, acc_tmem, scratch_smem, barrier_ref):
acc_tmem_slice = acc_tmem.at[slice(None), pl.dslice(0, 128)]
plgpu.tcgen05_mma(acc_tmem_slice,
a_smem,
b_smem,
barrier_ref,
accumulate=False)
plgpu.barrier_wait(barrier_ref)
# We don't await the load because acc_tmem is never modified again.
scratch_smem[...] = plgpu.async_load_tmem(acc_tmem_slice).astype(dtype)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(scratch_smem, out_ref)
plgpu.wait_smem_to_gmem(0)
scratch_shapes = [
plgpu.TMEM(tmem_shape, jnp.float32, packed=False),
plgpu.SMEM(shape, dtype, transforms=transforms),
plgpu.Barrier(orders_tensor_core=True),
]
f = self.pallas_call(
kernel,
in_specs=(
plgpu.BlockSpec(transforms=transforms, memory_space=plgpu.SMEM),
plgpu.BlockSpec(transforms=transforms, memory_space=plgpu.SMEM),
),
out_specs=plgpu.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct(shape, dtype),
scratch_shapes=scratch_shapes,
)
x = jax.random.uniform(jax.random.key(0), shape=shape, dtype=dtype)
y = jax.random.uniform(jax.random.key(1), shape=shape, dtype=dtype)
result = f(x, y)
expected = x @ y
np.testing.assert_allclose(result, expected, rtol=1e-3)
@parameterized.product(
m_n_k=[
(256, 256, 256),
(256, 128, 128),
(256, 256, 64),
(128, 64, 128),
(128, 64, 128),
],
swizzle=[128, 64, 32],
dtype=[jnp.float16, jnp.bfloat16],
lhs_tmem=[False, True],
)
def test_simple_collective_matmul(self, m_n_k, swizzle, dtype, lhs_tmem):
m, n, k = m_n_k
if (n // 2) * jnp.dtype(dtype).itemsize < swizzle:
self.skipTest("swizzle too big")
full_lhs_shape = (m, k)
full_rhs_shape = (k, n)
full_acc_shape = (m, n)
block_acc_shape = (m // 2, n)
block_lhs_shape = (m // 2, k)
block_rhs_shape = (k, n // 2)
# Test a collective (paired CTA) matmul on a single block.
transforms = self.default_transforms(swizzle=swizzle, dtype=dtype)
if lhs_tmem and m == 128:
self.skipTest("m=128 not supported for LHS in TMEM")
def kernel(a_gmem, b_gmem, out_gmem, a_smem, b_smem,
scratch_smem, acc_tmem, tma_barrier, mma_barrier,
cluster_barrier, lhs_tmem_ref):
cluster_idx = lax.axis_index("x")
slice_lhs = pl.ds(cluster_idx * block_lhs_shape[0], block_lhs_shape[0])
slice_rhs = pl.ds(cluster_idx * block_rhs_shape[1], block_rhs_shape[1])
plgpu.copy_gmem_to_smem(a_gmem.at[slice_lhs, :], a_smem, tma_barrier)
plgpu.barrier_wait(tma_barrier)
plgpu.copy_gmem_to_smem(b_gmem.at[:, slice_rhs], b_smem, tma_barrier)
plgpu.barrier_wait(tma_barrier)
if lhs_tmem:
lhs_ref = lhs_tmem_ref
plgpu.async_store_tmem(lhs_ref, plgpu.load(a_smem, (), layout=plgpu.Layout.TCGEN05))
plgpu.commit_tmem()
else:
lhs_ref = a_smem
plgpu.barrier_arrive(cluster_barrier)
plgpu.barrier_wait(cluster_barrier)
plgpu.tcgen05_mma(
acc_tmem,
lhs_ref,
b_smem,
mma_barrier,
accumulate=False,
collective_axis="x",
)
plgpu.barrier_wait(mma_barrier)
if m == 128:
layout = plgpu.Layout.TCGEN05_M64_COLLECTIVE(n)
else:
layout = plgpu.Layout.TCGEN05
# We don't await the load because acc_tmem is never modified again.
scratch_smem[...] = plgpu.async_load_tmem(acc_tmem, layout=layout).astype(dtype)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(scratch_smem, out_gmem.at[slice_lhs, :])
plgpu.wait_smem_to_gmem(0)
scratch_shapes = [
plgpu.SMEM(block_lhs_shape, dtype, transforms=transforms),
plgpu.SMEM(block_rhs_shape, dtype, transforms=transforms),
plgpu.SMEM(block_acc_shape, dtype, transforms=transforms),
plgpu.TMEM(block_acc_shape, jnp.float32, collective=True),
plgpu.Barrier(),
plgpu.Barrier(orders_tensor_core=True),
plgpu.ClusterBarrier(collective_axes=("x",)),
]
if lhs_tmem:
scratch_shapes.append(
plgpu.TMEM(block_lhs_shape, dtype, collective=True, packed=True)
)
else:
scratch_shapes.append(None)
f = self.kernel(
kernel,
out_shape=jax.ShapeDtypeStruct(full_acc_shape, dtype),
grid=(1,),
grid_names=("_",),
cluster=(2,),
cluster_names=("x",),
scratch_shapes=scratch_shapes,
)
x = jax.random.uniform(jax.random.key(0), shape=full_lhs_shape, dtype=dtype)
y = jax.random.uniform(jax.random.key(1), shape=full_rhs_shape, dtype=dtype)
result = f(x, y)
expected = x @ y
np.testing.assert_allclose(result, expected, rtol=1e-3)
@parameterized.parameters(
(128, jnp.float16)
)
def test_matmul_with_smem_aliasing(self, swizzle, dtype):
# Perform a 128x128 @ 128x128 matmul and a 128x64 @ 64x128 matmul
# using aliased Refs pointing to the same SMEM address.
self.skip_if_wg_semantics()
shape = (128, 128)
transforms = self.default_transforms(swizzle=swizzle, dtype=dtype)
def kernel(a_gmem, b_gmem, out_gmem128, out_gmem64,
a_aliased, b_aliased, out_smem, tma_barrier, mma_barrier, acc_tmem):
# Note: We directly copy into 128-sized refs assuming that both aliased
# refs point to the same address, so we can skip the copy for
# the 64-sized ref. We transpose the LHS Ref so that the 64-sized Ref
# receives the correct slice of data from this TMA.
# As this is implementation dependent, this test may break if we change
# the underlying aliasing behavior.
a_smem_128, a_smem_64 = a_aliased
plgpu.copy_gmem_to_smem(a_gmem, a_smem_128, tma_barrier)
plgpu.barrier_wait(tma_barrier)
b_smem_128, b_smem_64 = b_aliased
plgpu.copy_gmem_to_smem(b_gmem, b_smem_128, tma_barrier)
plgpu.barrier_wait(tma_barrier)
# Do 128x128 @ 128x128 matmul
plgpu.tcgen05_mma(acc_tmem,
plgpu.transpose_ref(a_smem_128, (1, 0)),
b_smem_128,
mma_barrier,
accumulate=False)
plgpu.barrier_wait(mma_barrier)
out_smem[...] = plgpu.async_load_tmem(acc_tmem).astype(dtype)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(out_smem, out_gmem128)
plgpu.wait_smem_to_gmem(0)
# Do 128x64 @ 64x128 matmul
plgpu.wait_load_tmem() # Make sure the loads are complete
plgpu.tcgen05_mma(acc_tmem,
plgpu.transpose_ref(a_smem_64, (1, 0)),
b_smem_64,
mma_barrier,
accumulate=False)
plgpu.barrier_wait(mma_barrier)
out_smem[...] = plgpu.async_load_tmem(acc_tmem).astype(dtype)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(out_smem, out_gmem64)
plgpu.wait_smem_to_gmem(0)
f = self.kernel(
kernel,
out_shape=[jax.ShapeDtypeStruct(shape, dtype),
jax.ShapeDtypeStruct(shape, dtype)],
scratch_shapes=[
plgpu.RefUnion( # aliased a_smem
plgpu.SMEM(shape, dtype, transforms=transforms),
plgpu.SMEM((64, 128), dtype, transforms=transforms),
),
plgpu.RefUnion( # aliased b_smem
plgpu.SMEM(shape, dtype, transforms=transforms),
plgpu.SMEM((64, 128), dtype, transforms=transforms),
),
plgpu.SMEM(shape, dtype, transforms=transforms), # out_smem
plgpu.Barrier(), # tma_barrier
plgpu.Barrier(orders_tensor_core=True), # mma_barrier
plgpu.TMEM(shape, jnp.float32), # acc
],
)
x = jax.random.uniform(jax.random.key(0), shape=shape, dtype=dtype)
y = jax.random.uniform(jax.random.key(1), shape=shape, dtype=dtype)
result_128, result_64 = f(x.T, y)
np.testing.assert_allclose(result_128, x @ y, rtol=1e-3)
np.testing.assert_allclose(result_64, x[:, :64] @ y[:64, :], rtol=1e-3)
@parameterized.parameters(
(128, jnp.float16)
)
def test_matmul_with_tmem_aliasing(self, swizzle, dtype):
# Perform a 128x128 @ 128x128 matmul and a 128x64 @ 64x128 matmul
# using aliased Refs pointing to the same TMEM address.
self.skip_if_wg_semantics()
shape = (128, 128)
swizzle_elems = swizzle // jnp.dtype(dtype).itemsize
transforms = (
plgpu.TilingTransform((8, swizzle_elems)),
plgpu.SwizzleTransform(swizzle),
)
def kernel(a_gmem, b_gmem, out_gmem128, out_gmem64,
a_smem, b_smem, out_smem, tma_barrier, mma_barrier, aliased_refs):
plgpu.copy_gmem_to_smem(a_gmem, a_smem, tma_barrier)
plgpu.barrier_wait(tma_barrier)
plgpu.copy_gmem_to_smem(b_gmem, b_smem, tma_barrier)
plgpu.barrier_wait(tma_barrier)
acc_128, lhs_128, lhs_64, acc_64, _ = aliased_refs
# Do 128x128 @ 128x128 matmul
plgpu.async_store_tmem(lhs_128, plgpu.load(a_smem, (), layout=plgpu.Layout.TCGEN05))
plgpu.commit_tmem()
plgpu.tcgen05_mma(acc_128,
lhs_128,
b_smem,
mma_barrier,
accumulate=False)
plgpu.barrier_wait(mma_barrier)
out_smem[...] = plgpu.async_load_tmem(acc_128).astype(dtype)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(out_smem, out_gmem128)
plgpu.wait_smem_to_gmem(0)
# Do 128x64 @ 64x128 matmul
plgpu.wait_load_tmem() # Make sure the loads have completed
plgpu.async_store_tmem(
lhs_64,
plgpu.load(a_smem.at[:, 0:64], (), layout=plgpu.Layout.TCGEN05),
)
plgpu.commit_tmem()
plgpu.tcgen05_mma(acc_64,
lhs_64,
b_smem.at[0:64, :],
mma_barrier,
accumulate=False)
plgpu.barrier_wait(mma_barrier)
# We don't await the load because TMEM is never modified again.
out_smem[...] = plgpu.async_load_tmem(acc_64).astype(dtype)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(out_smem, out_gmem64)
plgpu.wait_smem_to_gmem(0)
f = self.kernel(
kernel,
out_shape=[jax.ShapeDtypeStruct(shape, dtype),
jax.ShapeDtypeStruct(shape, dtype)],
scratch_shapes=[
plgpu.SMEM(shape, dtype, transforms=transforms), # a_smem
plgpu.SMEM(shape, dtype, transforms=transforms), # b_smem
plgpu.SMEM(shape, dtype, transforms=transforms), # out_smem
plgpu.Barrier(), # tma_barrier
plgpu.Barrier(orders_tensor_core=True), # mma_barrier
plgpu.RefUnion( # aliased_refs
[plgpu.TMEM((128, 128), jnp.float32), # acc
plgpu.TMEM((128, 128), dtype, packed=True)], # lhs
[plgpu.TMEM((128, 64), dtype, packed=True), # lhs
plgpu.TMEM((128, 128), jnp.float32)], # acc
plgpu.TMEM((128, 128), jnp.float32) # unused
),
],
)
x = jax.random.uniform(jax.random.key(0), shape=shape, dtype=dtype)
y = jax.random.uniform(jax.random.key(1), shape=shape, dtype=dtype)
result_128, result_64 = f(x, y)
np.testing.assert_allclose(result_128, x @ y, rtol=1e-3)
np.testing.assert_allclose(result_64, x[:, :64] @ y[:64, :], rtol=1e-3)
@parameterized.parameters((0,), (1,))
def test_mma_barrier_indexing(
self, barrier_index, shape=(128, 128), swizzle=128, dtype=jnp.float16
):
transforms = self.default_transforms(swizzle=swizzle, dtype=dtype)
def kernel(a_smem, b_smem, out_ref, acc_tmem, scratch_smem, barrier_ref):
plgpu.tcgen05_mma(
acc_tmem,
a_smem,
b_smem,
barrier_ref.at[barrier_index],
accumulate=False,
)
plgpu.barrier_wait(barrier_ref.at[barrier_index])
scratch_smem[...] = plgpu.async_load_tmem(acc_tmem).astype(dtype)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(scratch_smem, out_ref)
plgpu.wait_smem_to_gmem(0)
scratch_shapes = [
plgpu.TMEM(shape, jnp.float32, packed=False),
plgpu.SMEM(shape, dtype, transforms=transforms),
plgpu.Barrier(num_barriers=2, orders_tensor_core=True),
]
f = self.pallas_call(
kernel,
in_specs=(
plgpu.BlockSpec(transforms=transforms, memory_space=plgpu.SMEM),
plgpu.BlockSpec(transforms=transforms, memory_space=plgpu.SMEM),
),
out_specs=plgpu.BlockSpec(memory_space=plgpu.GMEM),
out_shape=jax.ShapeDtypeStruct(shape, dtype),
scratch_shapes=scratch_shapes,
)
x = jax.random.uniform(jax.random.key(0), shape=shape, dtype=dtype)
y = jax.random.uniform(jax.random.key(1), shape=shape, dtype=dtype)
result = f(x, y)
expected = x @ y
np.testing.assert_allclose(result, expected, rtol=1e-3)
@parameterized.product(
warp_level=(True, False),
squeezed_index=(True, False),
)
def test_copy_gmem_to_smem_partitioned(self, warp_level, squeezed_index):
self.skip_if_wg_semantics() # `pl.core_map` not implemented for warpgroup.
block_size = (128, 128)
partitioned_block_size = (block_size[0] // 2, block_size[1])
a = jax.random.uniform(
jax.random.key(0), shape=block_size, dtype=jnp.float32)
if squeezed_index:
a = a.reshape(1, *block_size)
b = jax.random.uniform(
jax.random.key(1), shape=block_size, dtype=jnp.float32)
def kernel(a_gmem, b_gmem, out_gmem,
a_smem, b_smem, out_smem,
a_tma_barrier, b_tma_barrier, cluster_barrier):
if squeezed_index:
a_gmem = a_gmem.at[0]
cluster_idx = lax.axis_index("x")
out_slice = pl.ds(cluster_idx * partitioned_block_size[0],
partitioned_block_size[0])
if warp_level:
@pl.core_map(plgpu.WarpMesh(axis_name="warp"))
def _per_warp():
warp_id = lax.axis_index("warp")
@pl.when(warp_id == 0)
def _():
plgpu.copy_gmem_to_smem(
a_gmem,
a_smem,
a_tma_barrier,
collective_axes="x",
partitioned_axis=0,
)
plgpu.copy_gmem_to_smem(
b_gmem,
b_smem,
b_tma_barrier,
collective_axes="x",
partitioned_axis=0,
)
else:
plgpu.copy_gmem_to_smem(
a_gmem,
a_smem,
a_tma_barrier,
collective_axes="x",
partitioned_axis=0,
)
plgpu.copy_gmem_to_smem(
b_gmem,
b_smem,
b_tma_barrier,
collective_axes="x",
partitioned_axis=0,
)
@pl.when(cluster_idx == 0)
def _():
plgpu.barrier_wait(a_tma_barrier)
plgpu.barrier_wait(b_tma_barrier)
plgpu.barrier_arrive(cluster_barrier)
plgpu.barrier_wait(cluster_barrier)
out_smem[...] = a_smem[...] + b_smem[...]
plgpu.copy_smem_to_gmem(out_smem, out_gmem.at[out_slice])
plgpu.wait_smem_to_gmem(0)
f = self.kernel(
kernel,
out_shape=jax.ShapeDtypeStruct(block_size, jnp.float32),
grid=(1,),
grid_names=("_"),
cluster_names=("x",),
cluster=(2,),
scratch_shapes=( # type: ignore
plgpu.SMEM(partitioned_block_size, jnp.float32),
plgpu.SMEM(partitioned_block_size, jnp.float32),
plgpu.SMEM(partitioned_block_size, jnp.float32),
plgpu.Barrier(num_arrivals=1),
plgpu.Barrier(num_arrivals=1),
plgpu.ClusterBarrier(collective_axes=("x",)),
),
)
result = f(a, b)
if squeezed_index:
a = a[0]
np.testing.assert_array_equal(result, a + b)
def test_arrive_wait_on_tc_barrier(self):
def kernel(out_ref, barrier):
plgpu.barrier_arrive(barrier)
plgpu.barrier_wait(barrier)
out_ref[...] = jnp.ones_like(out_ref)
f = self.kernel(
kernel,
out_shape=jax.ShapeDtypeStruct((128,), jnp.float32),
scratch_shapes=( # type: ignore
plgpu.Barrier(num_arrivals=1, orders_tensor_core=True),
),
)
np.testing.assert_array_equal(f(), np.ones((128,), np.float32))
@parameterized.parameters(
(0, (1,), False),
(0, (1,), True),
(1, (1,), False),
(2, (1,), False),
(0, (1, 2,), False),
(0, (2, 1,), False),
)
def test_cluster_launch_control(self, dim, cluster, with_indexing):
self.skip_if_wg_semantics()
# We attempt to schedule 1 more CTA than can be scheduled at once. Only
# one CTA will succeed in stealing the last block, and the others will
# fail. Therefore we test that there is exactly 1 stolen block and the
# others fail and return -1.
num_sms = jax.devices()[0].core_count
cluster_size = math.prod(cluster)
grid = [1, 1, 1]
grid[dim] = num_sms // cluster_size + 1
grid_names = tuple("xyz"[: len(grid)])
cluster_names = tuple("abc"[: len(cluster)])
def kernel(out_ref, cancel_result_ref, barrier, _):
if with_indexing:
cancel_result_ref = cancel_result_ref.at[0]
plgpu.try_cluster_cancel(cancel_result_ref, barrier)
plgpu.barrier_wait(barrier)
cta_ids, cancelled_launch = plgpu.query_cluster_cancel(
cancel_result_ref, grid_names=grid_names)
cta_id = sum(cta_ids)
# Store a sentinel value if no work can be scheduled.
value = lax.select(cancelled_launch, cta_id, jnp.int32(-1))
grid_idx = lax.axis_index(grid_names) * lax.axis_size(
cluster_names
) + lax.axis_index(cluster_names)
out_ref[grid_idx] = value
f = self.kernel(
kernel,
out_shape=jax.ShapeDtypeStruct((num_sms,), jnp.int32),
grid=grid,
grid_names=grid_names,
num_threads=2,
thread_name="wg",
cluster=cluster,
cluster_names=cluster_names,
scratch_shapes=[
plgpu.TryClusterCancelResult(2 if with_indexing else None),
plgpu.Barrier(num_arrivals=2),
# Requesting SMEM close to the 228kb limit to ensure that each SM
# only schedules 1 block.
plgpu.SMEM((220 * 1024,), jnp.int8),
],
)
result = np.sort(f())
last_cta_id = math.ceil(num_sms / cluster_size)
expected = np.array([-1] * (num_sms - cluster_size) + [last_cta_id] * cluster_size)
np.testing.assert_equal(result, expected)
| PallasCallSm100ATest |
python | catalyst-team__catalyst | catalyst/contrib/models/resnet_encoder.py | {
"start": 320,
"end": 3350
} | class ____(nn.Module):
"""Specifies ResNet encoders for classification network.
Examples:
>>> encoders = ResnetEncoder(
>>> arch="resnet18",
>>> pretrained=False,
>>> state_dict="/model/path/resnet18-5c106cde.pth"
>>> )
"""
def __init__(
self,
arch: str = "resnet18",
pretrained: bool = True,
frozen: bool = True,
pooling: str = None,
pooling_kwargs: dict = None,
cut_layers: int = 2,
state_dict: Union[dict, str, Path] = None,
):
"""
Args:
arch: Name for resnet. Have to be one of
resnet18, resnet34, resnet50, resnet101, resnet152
pretrained: If True, returns a model pre-trained on ImageNet
frozen: If frozen, sets requires_grad to False
pooling: pooling
pooling_kwargs: params for pooling
state_dict (Union[dict, str, Path]): Path to ``torch.Model``
or a dict containing parameters and persistent buffers.
"""
super().__init__()
resnet = torchvision.models.__dict__[arch](pretrained=pretrained)
if state_dict is not None:
if isinstance(state_dict, (Path, str)):
state_dict = torch.load(str(state_dict))
resnet.load_state_dict(state_dict)
modules = list(resnet.children())[:-cut_layers] # delete last layers
if frozen:
for module in modules:
utils.set_requires_grad(module, requires_grad=False)
if pooling is not None:
pooling_kwargs = pooling_kwargs or {}
pooling_layer_fn = REGISTRY.get(pooling)
pooling_layer = (
pooling_layer_fn(in_features=resnet.fc.in_features, **pooling_kwargs)
if "attn" in pooling.lower()
else pooling_layer_fn(**pooling_kwargs)
)
modules += [pooling_layer]
if hasattr(pooling_layer, "out_features"):
out_features = pooling_layer.out_features(
in_features=resnet.fc.in_features
)
else:
out_features = None
else:
out_features = resnet.fc.in_features
modules += [Flatten()]
self.out_features = out_features
self.encoder = nn.Sequential(*modules)
def forward(self, image):
"""Extract the image feature vectors."""
features = self.encoder(image)
return features
def get_resnet1d(model: ResNet) -> ResNet:
"""
Args:
model: ResNet model
Returns:
ResNet model with changed 1st conv layer
"""
conv_old = model.conv1
model.conv1 = nn.Conv2d(
in_channels=1,
out_channels=conv_old.out_channels,
kernel_size=conv_old.kernel_size,
stride=conv_old.stride,
padding=conv_old.padding,
bias=conv_old.bias,
)
return model
__all__ = ["ResnetEncoder"]
| ResnetEncoder |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pydoclint/DOC202_numpy.py | {
"start": 346,
"end": 805
} | class ____:
# DOC202
def foo(self) -> str:
"""
Do something
Parameters
----------
num : int
A number
Returns
-------
str
A string
"""
print('test')
# OK
def bar(self) -> str:
"""
Do something
Parameters
----------
num : int
A number
"""
print('test')
import abc
| Bar |
python | PrefectHQ__prefect | src/prefect/client/schemas/responses.py | {
"start": 3661,
"end": 4243
} | class ____(PrefectBaseModel):
"""Represents a history of aggregation states over an interval"""
interval_start: DateTime = Field(
default=..., description="The start date of the interval."
)
interval_end: DateTime = Field(
default=..., description="The end date of the interval."
)
states: list[HistoryResponseState] = Field(
default=..., description="A list of state histories during the interval."
)
StateResponseDetails = Union[
StateAcceptDetails, StateWaitDetails, StateRejectDetails, StateAbortDetails
]
| HistoryResponse |
python | tensorflow__tensorflow | tensorflow/python/distribute/parameter_server_strategy_test.py | {
"start": 32727,
"end": 36053
} | class ____(ParameterServerStrategyTestBase,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=3, num_ps=2, has_chief=True)
cls._default_target = 'grpc://' + cls._cluster_spec[CHIEF][0]
@combinations.generate(
combinations.combine(mode=['graph'], required_gpus=[0, 1, 2]))
def testSimpleBetweenGraph(self, required_gpus):
self._run_between_graph_clients(self._test_simple_increment,
self._cluster_spec, required_gpus)
@combinations.generate(
combinations.combine(mode=['graph'], num_gpus=[0, 1, 2]))
def testMinimizeLossGraph(self, num_gpus):
self._run_between_graph_clients(self._test_minimize_loss_graph,
self._cluster_spec, num_gpus)
@combinations.generate(combinations.combine(mode=['graph']))
def testGlobalStepIsWrappedOnTwoGPUs(self):
strategy, _, _ = create_test_objects(num_gpus=2)
with ops.Graph().as_default(), strategy.scope():
created_step = training_util.create_global_step()
get_step = training_util.get_global_step()
self.assertEqual(created_step, get_step,
msg=('created_step %s type %s vs. get_step %s type %s' %
(id(created_step), created_step.__class__.__name__,
id(get_step), get_step.__class__.__name__)))
self.assertIs(ps_values.AggregatingVariable, type(created_step))
self.assertIs(ps_values.AggregatingVariable, type(get_step))
self.assertIs(strategy, created_step.distribute_strategy)
@combinations.generate(combinations.combine(mode=['graph']))
def testGlobalStepIsNotWrappedOnOneGPU(self):
strategy, _, _ = create_test_objects(num_gpus=1)
with ops.Graph().as_default(), strategy.scope():
created_step = training_util.create_global_step()
get_step = training_util.get_global_step()
self.assertEqual(created_step, get_step,
msg=('created_step %s type %s vs. get_step %s type %s' %
(id(created_step), created_step.__class__.__name__,
id(get_step), get_step.__class__.__name__)))
self.assertIs(resource_variable_ops.ResourceVariable, type(created_step))
self.assertIs(resource_variable_ops.ResourceVariable, type(get_step))
# All variables have an _distribute_strategy parameter. Only variable
# subclasses in distribution strategy expose it publicly.
self.assertFalse(hasattr(strategy, 'distribute_strategy'))
self.assertIs(strategy, created_step._distribute_strategy)
@combinations.generate(combinations.combine(mode=['graph'], required_gpus=2))
def testValueContainer(self):
strategy, _, _ = create_test_objects(num_gpus=2)
with ops.Graph().as_default(), strategy.scope():
def f():
with backprop.GradientTape() as tape:
v = variable_scope.get_variable('v', initializer=10.0)
_ = v * v
v, = tape.watched_variables()
w = strategy.extended.value_container(v)
self.assertIs(ps_values.AggregatingVariable, type(w))
strategy.extended.call_for_each_replica(f)
| ParameterServerStrategyWithChiefTest |
python | allegroai__clearml | clearml/backend_api/services/v2_13/queues.py | {
"start": 8594,
"end": 16232
} | class ____(NonStrictDataModel):
"""
:param id: Queue id
:type id: str
:param name: Queue name
:type name: str
:param user: Associated user id
:type user: str
:param company: Company id
:type company: str
:param created: Queue creation time
:type created: datetime.datetime
:param tags: User-defined tags
:type tags: Sequence[str]
:param system_tags: System tags. This field is reserved for system use, please
don't use it.
:type system_tags: Sequence[str]
:param entries: List of ordered queue entries
:type entries: Sequence[Entry]
:param metadata: Queue metadata
:type metadata: list
"""
_schema = {
"definitions": {
"metadata_item": {
"properties": {
"key": {
"description": "The key uniquely identifying the metadata item inside the given entity",
"type": "string",
},
"type": {
"description": "The type of the metadata item",
"type": "string",
},
"value": {
"description": "The value stored in the metadata item",
"type": "string",
},
},
"type": "object",
}
},
"properties": {
"company": {"description": "Company id", "type": ["string", "null"]},
"created": {
"description": "Queue creation time",
"format": "date-time",
"type": ["string", "null"],
},
"entries": {
"description": "List of ordered queue entries",
"items": {"$ref": "#/definitions/entry"},
"type": ["array", "null"],
},
"id": {"description": "Queue id", "type": ["string", "null"]},
"metadata": {
"type": "array",
"items": {"$ref": "#/definitions/metadata_item"},
"description": "Queue metadata",
},
"name": {"description": "Queue name", "type": ["string", "null"]},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {"description": "Associated user id", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
user: Optional[str] = None,
company: Optional[str] = None,
created: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
entries: Optional[List[Any]] = None,
metadata: Optional[List[Any]] = None,
**kwargs: Any
) -> None:
super(Queue, self).__init__(**kwargs)
self.id = id
self.name = name
self.user = user
self.company = company
self.created = created
self.tags = tags
self.system_tags = system_tags
self.entries = entries
self.metadata = metadata
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("user")
def user(self) -> Optional[str]:
return self._property_user
@user.setter
def user(self, value: Optional[str]) -> None:
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property("company")
def company(self) -> Optional[str]:
return self._property_company
@company.setter
def company(self, value: Optional[str]) -> None:
if value is None:
self._property_company = None
return
self.assert_isinstance(value, "company", six.string_types)
self._property_company = value
@schema_property("created")
def created(self) -> Optional[str]:
return self._property_created
@created.setter
def created(self, value: Optional[str]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("entries")
def entries(self) -> Optional[List[Any]]:
return self._property_entries
@entries.setter
def entries(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_entries = None
return
self.assert_isinstance(value, "entries", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [Entry.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "entries", Entry, is_array=True)
self._property_entries = value
@schema_property("metadata")
def metadata(self) -> Optional[List[Any]]:
return self._property_metadata
@metadata.setter
def metadata(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_metadata = None
return
self.assert_isinstance(value, "metadata", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [MetadataItem.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "metadata", MetadataItem, is_array=True)
self._property_metadata = value
| Queue |
python | great-expectations__great_expectations | great_expectations/render/renderer/opsgenie_renderer.py | {
"start": 476,
"end": 2401
} | class ____(Renderer):
@override
def render(self, checkpoint_result: CheckpointResult):
text_blocks: list[str] = []
for run_result in checkpoint_result.run_results.values():
text_block = self._render_validation_result(result=run_result)
text_blocks.append(text_block)
return self._concatenate_text_blocks(
checkpoint_result=checkpoint_result, text_blocks=text_blocks
)
def _render_validation_result(self, result: ExpectationSuiteValidationResult) -> str:
suite_name = result.suite_name
data_asset_name = result.asset_name or "__no_data_asset_name__"
n_checks_succeeded = result.statistics["successful_expectations"]
n_checks = result.statistics["evaluated_expectations"]
run_id = result.meta.get("run_id", "__no_run_id__")
batch_id = result.batch_id or "__no_batch_id__"
check_details_text = f"{n_checks_succeeded} of {n_checks} expectations were met"
if result.success:
status = "Success 🎉"
else:
status = "Failed ❌"
return f"""Batch Validation Status: {status}
Expectation Suite Name: {suite_name}
Data Asset Name: {data_asset_name}
Run ID: {run_id}
Batch ID: {batch_id}
Summary: {check_details_text}"""
def _concatenate_text_blocks(
self, checkpoint_result: CheckpointResult, text_blocks: list[str]
) -> str:
checkpoint_name = checkpoint_result.checkpoint_config.name
success = checkpoint_result.success
run_id = checkpoint_result.run_id.run_time
title = f"Checkpoint: {checkpoint_name} - Run ID: {run_id}"
status = "Status: Failed ❌" if not success else "Status: Success 🎉"
return f"{title}\n{status}\n\n" + "\n\n".join(text_blocks)
def _custom_blocks(self, evr):
return None
def _get_report_element(self, docs_link):
return None
| OpsgenieRenderer |
python | openai__openai-python | src/openai/types/graders/text_similarity_grader.py | {
"start": 197,
"end": 887
} | class ____(BaseModel):
evaluation_metric: Literal[
"cosine",
"fuzzy_match",
"bleu",
"gleu",
"meteor",
"rouge_1",
"rouge_2",
"rouge_3",
"rouge_4",
"rouge_5",
"rouge_l",
]
"""The evaluation metric to use.
One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`,
`rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
"""
input: str
"""The text being graded."""
name: str
"""The name of the grader."""
reference: str
"""The text being graded against."""
type: Literal["text_similarity"]
"""The type of grader."""
| TextSimilarityGrader |
python | getsentry__sentry | src/sentry/core/endpoints/organization_projects_experiment.py | {
"start": 2128,
"end": 2274
} | class ____(OrganizationPermission):
scope_map = {
"POST": ["project:read", "project:write", "project:admin"],
}
| OrgProjectPermission |
python | psf__black | tests/data/cases/preview_long_strings__regression.py | {
"start": 5447,
"end": 5728
} | class ____:
def foo(self):
if True:
xxxxx_xxxxxxxxxxxx('xxx xxxxxx xxx xxxxxxxxx.xx xx xxxxxxxx. xxx xxxxxxxxxxxxx.xx xxxxxxx '
+ 'xx xxxxxx xxxxxx xxxxxx xx xxxxxxx xxx xxx ${0} xx x xxxxxxxx xxxxx'.xxxxxx(xxxxxx_xxxxxx_xxx))
| A |
python | rapidsai__cudf | python/cudf/cudf/core/dataframe.py | {
"start": 6716,
"end": 14690
} | class ____(_DataFrameIndexer):
"""
For selection by label.
"""
@_performance_tracking
def __getitem__(self, arg):
if isinstance(self._frame.index, MultiIndex):
# This try/except block allows the use of pandas-like
# tuple arguments to index into MultiIndex dataframes.
try:
return self._getitem_tuple_arg(arg)
except (TypeError, KeyError, IndexError, ValueError):
return self._getitem_tuple_arg((arg, slice(None)))
else:
(
row_key,
(
col_is_scalar,
ca,
),
) = indexing_utils.destructure_dataframe_loc_indexer(
arg, self._frame
)
row_spec = indexing_utils.parse_row_loc_indexer(
row_key, self._frame.index
)
return self._frame._getitem_preprocessed(
row_spec, col_is_scalar, ca
)
@_performance_tracking
def _getitem_tuple_arg(self, arg):
# Step 1: Gather columns
if isinstance(arg, tuple):
columns_df = self._frame._get_columns_by_label(arg[1])
columns_df.index = self._frame.index
else:
columns_df = self._frame
# Step 2: Gather rows
if isinstance(columns_df.index, MultiIndex):
if isinstance(arg, (MultiIndex, pd.MultiIndex)):
if isinstance(arg, pd.MultiIndex):
arg = MultiIndex(
levels=arg.levels,
codes=arg.codes,
names=arg.names,
)
indices = _indices_from_labels(columns_df, arg)
return columns_df.take(indices)
else:
if isinstance(arg, tuple):
row_arg = arg[0]
elif is_scalar(arg):
row_arg = (arg,)
else:
row_arg = arg
result = columns_df.index._get_row_major(columns_df, row_arg)
if (
len(result) == 1
and isinstance(arg, tuple)
and len(arg) > 1
and is_scalar(arg[1])
):
return result._columns[0].element_indexing(0)
return result
else:
raise RuntimeError(
"Should have been handled by now. Please raise Github issue "
"at https://github.com/rapidsai/cudf/issues"
)
@_performance_tracking
def _setitem_tuple_arg(self, key, value):
if (
isinstance(self._frame.index, MultiIndex)
or self._frame._data.multiindex
):
raise NotImplementedError(
"Setting values using df.loc[] not supported on "
"DataFrames with a MultiIndex"
)
try:
columns_df = self._frame._get_columns_by_label(key[1])
except KeyError:
if not self._frame.empty and isinstance(key[0], slice):
indexer = indexing_utils.find_label_range_or_mask(
key[0], self._frame.index
)
index = self._frame.index
if isinstance(indexer, indexing_utils.EmptyIndexer):
idx = index[0:0:1]
elif isinstance(indexer, indexing_utils.SliceIndexer):
idx = index[indexer.key]
else:
idx = index[indexer.key.column]
elif self._frame.empty and isinstance(key[0], slice):
idx = None
else:
if is_scalar(key[0]):
arr = [key[0]]
else:
arr = key[0]
idx = Index(arr)
if is_scalar(value):
length = len(idx) if idx is not None else 1
value = as_column(value, length=length)
if isinstance(value, ColumnBase):
new_ser = Series._from_column(value, index=idx)
else:
new_ser = Series(value, index=idx)
if len(self._frame) != 0:
new_ser = new_ser._align_to_index(
self._frame.index, how="right"
)
if len(self._frame) == 0:
self._frame.index = (
idx if idx is not None else cudf.RangeIndex(len(new_ser))
)
self._frame._data.insert(key[1], new_ser._column)
else:
if is_scalar(value):
try:
if columns_df._num_columns:
self._frame[
columns_df._column_names[0]
].loc._loc_to_iloc(key[0])
for col in columns_df._column_names:
self._frame[col].loc[key[0]] = value
except KeyError:
if not is_scalar(key[0]):
raise
# TODO: There is a potential bug here if the inplace modifications
# done above fail half-way we are left with a partially modified
# frame. Need to handle this case better.
self.append_new_row(key, value, columns_df=columns_df)
elif isinstance(value, cudf.DataFrame):
if value.shape != self._frame.loc[key[0]].shape:
_shape_mismatch_error(
value.shape,
self._frame.loc[key[0]].shape,
)
value_column_names = set(value._column_names)
scatter_map = _indices_from_labels(self._frame, key[0])
for col in columns_df._column_names:
columns_df[col][scatter_map] = (
value._data[col] if col in value_column_names else NA
)
else:
if not is_column_like(value):
value = cupy.asarray(value)
if getattr(value, "ndim", 1) == 2:
# If the inner dimension is 1, it's broadcastable to
# all columns of the dataframe.
indexed_shape = columns_df.loc[key[0]].shape
if value.shape[1] == 1:
if value.shape[0] != indexed_shape[0]:
_shape_mismatch_error(value.shape, indexed_shape)
for i, col in enumerate(columns_df._column_names):
self._frame[col].loc[key[0]] = value[:, 0]
else:
if value.shape != indexed_shape:
_shape_mismatch_error(value.shape, indexed_shape)
for i, col in enumerate(columns_df._column_names):
self._frame[col].loc[key[0]] = value[:, i]
else:
# handle cases where value is 1d object:
# If the key on column axis is a scalar, we indexed
# a single column; The 1d value should assign along
# the columns.
if is_scalar(key[1]):
for col in columns_df._column_names:
self._frame[col].loc[key[0]] = value
# Otherwise, there are two situations. The key on row axis
# can be a scalar or 1d. In either of the situation, the
# ith element in value corresponds to the ith row in
# the indexed object.
# If the key is 1d, a broadcast will happen.
else:
for i, col in enumerate(columns_df._column_names):
self._frame[col].loc[key[0]] = value[i]
| _DataFrameLocIndexer |
python | numpy__numpy | numpy/_core/tests/test_scalarmath.py | {
"start": 25643,
"end": 27302
} | class ____:
def _test_type_repr(self, t):
finfo = np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
last_exponent_bit_idx = finfo.nexp
storage_bytes = np.dtype(t).itemsize * 8
# could add some more types to the list below
for which in ['small denorm', 'small norm']:
# Values from https://en.wikipedia.org/wiki/IEEE_754
constr = np.array([0x00] * storage_bytes, dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
bytebit = 7 - (last_fraction_bit_idx % 8)
constr[byte] = 1 << bytebit
elif which == 'small norm':
byte = last_exponent_bit_idx // 8
bytebit = 7 - (last_exponent_bit_idx % 8)
constr[byte] = 1 << bytebit
else:
raise ValueError('hmm')
val = constr.view(t)[0]
val_repr = repr(val)
val2 = t(eval(val_repr))
if not (val2 == 0 and val < 1e-100):
assert_equal(val, val2)
def test_float_repr(self):
# long double test cannot work, because eval goes through a python
# float
for t in [np.float32, np.float64]:
self._test_type_repr(t)
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
class TestSizeOf:
def test_equal_nbytes(self):
for type in types:
x = type(0)
assert_(sys.getsizeof(x) > x.nbytes)
def test_error(self):
d = np.float32()
assert_raises(TypeError, d.__sizeof__, "a")
| TestRepr |
python | pydata__xarray | xarray/tests/test_backends_datatree.py | {
"start": 21247,
"end": 26150
} | class ____:
"""Test PyDAP backend for DataTree."""
engine: T_DataTreeNetcdfEngine | None = "pydap"
# you can check these by adding a .dmr to urls, and replacing dap4 with http
unaligned_datatree_url = (
"dap4://test.opendap.org/opendap/dap4/unaligned_simple_datatree.nc.h5"
)
all_aligned_child_nodes_url = (
"dap4://test.opendap.org/opendap/dap4/all_aligned_child_nodes.nc.h5"
)
simplegroup_datatree_url = "dap4://test.opendap.org/opendap/dap4/SimpleGroup.nc4.h5"
def test_open_datatree_unaligned_hierarchy(
self,
url=unaligned_datatree_url,
) -> None:
with pytest.raises(
ValueError,
match=(
re.escape(
"group '/Group1/subgroup1' is not aligned with its parents:\nGroup:\n"
)
+ ".*"
),
):
open_datatree(url, engine=self.engine)
def test_open_groups(self, url=unaligned_datatree_url) -> None:
"""Test `open_groups` with a netCDF4/HDF5 file with an unaligned group hierarchy."""
unaligned_dict_of_datasets = open_groups(url, engine=self.engine)
# Check that group names are keys in the dictionary of `xr.Datasets`
assert "/" in unaligned_dict_of_datasets.keys()
assert "/Group1" in unaligned_dict_of_datasets.keys()
assert "/Group1/subgroup1" in unaligned_dict_of_datasets.keys()
# Check that group name returns the correct datasets
with xr.open_dataset(url, engine=self.engine, group="/") as expected:
assert_identical(unaligned_dict_of_datasets["/"], expected)
with xr.open_dataset(url, group="Group1", engine=self.engine) as expected:
assert_identical(unaligned_dict_of_datasets["/Group1"], expected)
with xr.open_dataset(
url,
group="/Group1/subgroup1",
engine=self.engine,
) as expected:
assert_identical(unaligned_dict_of_datasets["/Group1/subgroup1"], expected)
def test_inherited_coords(self, tmpdir, url=simplegroup_datatree_url) -> None:
"""Test that `open_datatree` inherits coordinates from root tree.
This particular h5 file is a test file that inherits the time coordinate from the root
dataset to the child dataset.
Group: /
│ Dimensions: (time: 1, Z: 1000, nv: 2)
│ Coordinates:
| time: (time) float32 0.5
| Z: (Z) float32 -0.0 -1.0 -2.0 ...
│ Data variables:
│ Pressure (Z) float32 ...
| time_bnds (time, nv) float32 ...
└── Group: /SimpleGroup
│ Dimensions: (time: 1, Z: 1000, nv: 2, Y: 40, X: 40)
│ Coordinates:
| Y: (Y) int16 1 2 3 4 ...
| X: (X) int16 1 2 3 4 ...
| Inherited coordinates:
| time: (time) float32 0.5
| Z: (Z) float32 -0.0 -1.0 -2.0 ...
│ Data variables:
│ Temperature (time, Z, Y, X) float32 ...
| Salinity (time, Z, Y, X) float32 ...
"""
import pydap
from pydap.net import create_session
# Create a session with pre-set retry params in pydap backend, to cache urls
cache_name = tmpdir / "debug"
session = create_session(
use_cache=True, cache_kwargs={"cache_name": cache_name}
)
session.cache.clear()
_version_ = Version(pydap.__version__)
tree = open_datatree(url, engine=self.engine, session=session)
assert set(tree.dims) == {"time", "Z", "nv"}
assert tree["/SimpleGroup"].coords["time"].dims == ("time",)
assert tree["/SimpleGroup"].coords["Z"].dims == ("Z",)
assert tree["/SimpleGroup"].coords["Y"].dims == ("Y",)
assert tree["/SimpleGroup"].coords["X"].dims == ("X",)
with xr.open_dataset(url, engine=self.engine, group="/SimpleGroup") as expected:
assert set(tree["/SimpleGroup"].dims) == set(
list(expected.dims) + ["Z", "nv"]
)
if _version_ > Version("3.5.5"):
# Total downloads are: 1 dmr, + 1 dap url for all dimensions for each group
assert len(session.cache.urls()) == 3
else:
# 1 dmr + 1 dap url per dimension (total there are 4 dimension arrays)
assert len(session.cache.urls()) == 5
def test_open_groups_to_dict(self, url=all_aligned_child_nodes_url) -> None:
aligned_dict_of_datasets = open_groups(url, engine=self.engine)
aligned_dt = DataTree.from_dict(aligned_dict_of_datasets)
with open_datatree(url, engine=self.engine) as opened_tree:
assert opened_tree.identical(aligned_dt)
@requires_zarr
@parametrize_zarr_format
| TestPyDAPDatatreeIO |
python | mlflow__mlflow | mlflow/telemetry/events.py | {
"start": 9919,
"end": 9982
} | class ____(Event):
name: str = "autologging"
| AutologgingEvent |
python | gevent__gevent | src/gevent/backdoor.py | {
"start": 7310,
"end": 8074
} | class ____(_BaseFileLike):
"""
A file-like object that wraps the result of socket.makefile (composition
instead of inheritance lets us work identically under CPython and PyPy).
We write directly to the socket, avoiding the buffering that the text-oriented
makefile would want to do (otherwise we'd be at the mercy of waiting on a
flush() to get called for the remote user to see data); this beats putting
the file in binary mode and translating everywhere with a non-default
encoding.
"""
def flush(self):
"Does nothing. raw_input() calls this, only on Python 3."
def write(self, data):
if not isinstance(data, bytes):
data = data.encode(self.encoding)
self.sock.sendall(data)
| _StdErr |
python | matplotlib__matplotlib | lib/matplotlib/scale.py | {
"start": 26449,
"end": 31774
} | class ____(ScaleBase):
"""
Logit scale for data between zero and one, both excluded.
This scale is similar to a log scale close to zero and to one, and almost
linear around 0.5. It maps the interval ]0, 1[ onto ]-infty, +infty[.
"""
name = 'logit'
@_make_axis_parameter_optional
def __init__(self, axis=None, nonpositive='mask', *,
one_half=r"\frac{1}{2}", use_overline=False):
r"""
Parameters
----------
axis : `~matplotlib.axis.Axis`
The axis for the scale.
.. note::
This parameter is unused and about to be removed in the future.
It can already now be left out because of special preprocessing,
so that ``LogitScale()`` is valid.
nonpositive : {'mask', 'clip'}
Determines the behavior for values beyond the open interval ]0, 1[.
They can either be masked as invalid, or clipped to a number very
close to 0 or 1.
use_overline : bool, default: False
Indicate the usage of survival notation (\overline{x}) in place of
standard notation (1-x) for probability close to one.
one_half : str, default: r"\frac{1}{2}"
The string used for ticks formatter to represent 1/2.
"""
self._transform = LogitTransform(nonpositive)
self._use_overline = use_overline
self._one_half = one_half
def get_transform(self):
"""Return the `.LogitTransform` associated with this scale."""
return self._transform
def set_default_locators_and_formatters(self, axis):
# docstring inherited
# ..., 0.01, 0.1, 0.5, 0.9, 0.99, ...
axis.set_major_locator(LogitLocator())
axis.set_major_formatter(
LogitFormatter(
one_half=self._one_half,
use_overline=self._use_overline
)
)
axis.set_minor_locator(LogitLocator(minor=True))
axis.set_minor_formatter(
LogitFormatter(
minor=True,
one_half=self._one_half,
use_overline=self._use_overline
)
)
def limit_range_for_scale(self, vmin, vmax, minpos):
"""
Limit the domain to values between 0 and 1 (excluded).
"""
if not np.isfinite(minpos):
minpos = 1e-7 # Should rarely (if ever) have a visible effect.
return (minpos if vmin <= 0 else vmin,
1 - minpos if vmax >= 1 else vmax)
_scale_mapping = {
'linear': LinearScale,
'log': LogScale,
'symlog': SymmetricalLogScale,
'asinh': AsinhScale,
'logit': LogitScale,
'function': FuncScale,
'functionlog': FuncScaleLog,
}
# caching of signature info
# For backward compatibility, the built-in scales will keep the *axis* parameter
# in their constructors until matplotlib 3.15, i.e. as long as the *axis* parameter
# is still supported.
_scale_has_axis_parameter = {
'linear': True,
'log': True,
'symlog': True,
'asinh': True,
'logit': True,
'function': True,
'functionlog': True,
}
def get_scale_names():
"""Return the names of the available scales."""
return sorted(_scale_mapping)
def scale_factory(scale, axis, **kwargs):
"""
Return a scale class by name.
Parameters
----------
scale : {%(names)s}
axis : `~matplotlib.axis.Axis`
"""
scale_cls = _api.check_getitem(_scale_mapping, scale=scale)
if _scale_has_axis_parameter[scale]:
return scale_cls(axis, **kwargs)
else:
return scale_cls(**kwargs)
if scale_factory.__doc__:
scale_factory.__doc__ = scale_factory.__doc__ % {
"names": ", ".join(map(repr, get_scale_names()))}
def register_scale(scale_class):
"""
Register a new kind of scale.
Parameters
----------
scale_class : subclass of `ScaleBase`
The scale to register.
"""
_scale_mapping[scale_class.name] = scale_class
# migration code to handle the *axis* parameter
has_axis_parameter = "axis" in inspect.signature(scale_class).parameters
_scale_has_axis_parameter[scale_class.name] = has_axis_parameter
if has_axis_parameter:
_api.warn_deprecated(
"3.11",
message=f"The scale {scale_class.__qualname__!r} uses an 'axis' parameter "
"in the constructors. This parameter is pending-deprecated since "
"matplotlib 3.11. It will be fully deprecated in 3.13 and removed "
"in 3.15. Starting with 3.11, 'register_scale()' accepts scales "
"without the *axis* parameter.",
pending=True,
)
def _get_scale_docs():
"""
Helper function for generating docstrings related to scales.
"""
docs = []
for name, scale_class in _scale_mapping.items():
docstring = inspect.getdoc(scale_class.__init__) or ""
docs.extend([
f" {name!r}",
"",
textwrap.indent(docstring, " " * 8),
""
])
return "\n".join(docs)
_docstring.interpd.register(
scale_type='{%s}' % ', '.join([repr(x) for x in get_scale_names()]),
scale_docs=_get_scale_docs().rstrip(),
)
| LogitScale |
python | scrapy__scrapy | tests/test_cmdline_crawl_with_pipeline/test_spider/pipelines.py | {
"start": 131,
"end": 296
} | class ____:
def open_spider(self, spider):
raise RuntimeError("exception")
def process_item(self, item):
return item
| TestSpiderExceptionPipeline |
python | python__mypy | mypy/moduleinspect.py | {
"start": 3497,
"end": 6326
} | class ____:
"""Perform runtime introspection of modules in a separate process.
Reuse the process for multiple modules for efficiency. However, if there is an
error, retry using a fresh process to avoid cross-contamination of state between
modules.
We use a separate process to isolate us from many side effects. For example, the
import of a module may kill the current process, and we want to recover from that.
Always use in a with statement for proper clean-up:
with ModuleInspect() as m:
p = m.get_package_properties('urllib.parse')
"""
def __init__(self) -> None:
self._start()
def _start(self) -> None:
if sys.platform == "linux":
ctx = get_context("forkserver")
else:
ctx = get_context("spawn")
self.tasks: Queue[str] = ctx.Queue()
self.results: Queue[ModuleProperties | str] = ctx.Queue()
self.proc = ctx.Process(target=worker, args=(self.tasks, self.results, sys.path))
self.proc.start()
self.counter = 0 # Number of successful roundtrips
def close(self) -> None:
"""Free any resources used."""
self.proc.terminate()
def get_package_properties(self, package_id: str) -> ModuleProperties:
"""Return some properties of a module/package using runtime introspection.
Raise InspectError if the target couldn't be imported.
"""
self.tasks.put(package_id)
res = self._get_from_queue()
if res is None:
# The process died; recover and report error.
self._start()
raise InspectError(f"Process died when importing {package_id!r}")
if isinstance(res, str):
# Error importing module
if self.counter > 0:
# Also try with a fresh process. Maybe one of the previous imports has
# corrupted some global state.
self.close()
self._start()
return self.get_package_properties(package_id)
raise InspectError(res)
self.counter += 1
return res
def _get_from_queue(self) -> ModuleProperties | str | None:
"""Get value from the queue.
Return the value read from the queue, or None if the process unexpectedly died.
"""
max_iter = 600
n = 0
while True:
if n == max_iter:
raise RuntimeError("Timeout waiting for subprocess")
try:
return self.results.get(timeout=0.05)
except queue.Empty:
if not self.proc.is_alive():
return None
n += 1
def __enter__(self) -> ModuleInspect:
return self
def __exit__(self, *args: object) -> None:
self.close()
| ModuleInspect |
python | explosion__spaCy | spacy/lang/ar/__init__.py | {
"start": 479,
"end": 572
} | class ____(Language):
Defaults = ArabicDefaults
lang = "ar"
__all__ = ["Arabic"]
| Arabic |
python | mlflow__mlflow | tests/store/artifact/test_artifact_repo.py | {
"start": 1699,
"end": 8983
} | class ____(ArtifactRepository):
"""Implementation of ArtifactRepository which simulates download failures."""
def log_artifact(self, local_file, artifact_path=None):
raise NotImplementedError()
def log_artifacts(self, local_dir, artifact_path=None):
raise NotImplementedError()
def list_artifacts(self, path):
raise NotImplementedError()
def _download_file(self, remote_file_path, local_path):
raise MlflowException(_MOCK_ERROR)
@pytest.mark.parametrize(
("base_uri", "download_arg", "list_return_val"),
[
(_PARENT_MODEL_DIR, "", [_MODEL_FILE]),
(_PARENT_MODEL_DIR, "", [".", _MODEL_FILE]),
(_PARENT_DIR, _MODEL_DIR, [_MODEL_DIR + "/" + _MODEL_FILE]),
(_PARENT_DIR, _MODEL_DIR, [_MODEL_DIR, _MODEL_DIR + "/" + _MODEL_FILE]),
("", _PARENT_MODEL_DIR, [_PARENT_MODEL_FILE]),
("", _PARENT_MODEL_DIR, [_PARENT_MODEL_DIR, _PARENT_MODEL_FILE]),
],
)
def test_download_artifacts_does_not_infinitely_loop(base_uri, download_arg, list_return_val):
def list_artifacts(path):
fullpath = posixpath.join(base_uri, path)
if fullpath.endswith(_MODEL_DIR) or fullpath.endswith(_MODEL_DIR + "/"):
return [FileInfo(item, False, _DUMMY_FILE_SIZE) for item in list_return_val]
elif fullpath.endswith(_PARENT_MODEL_DIR) or fullpath.endswith(_PARENT_MODEL_DIR + "/"):
return [FileInfo(posixpath.join(path, _MODEL_DIR), True, _EMPTY_FILE_SIZE)]
else:
return []
with mock.patch.object(ArtifactRepositoryImpl, "list_artifacts") as list_artifacts_mock:
list_artifacts_mock.side_effect = list_artifacts
repo = ArtifactRepositoryImpl(base_uri)
repo.download_artifacts(download_arg)
def test_download_artifacts_download_file():
with mock.patch.object(ArtifactRepositoryImpl, "list_artifacts", return_value=[]):
repo = ArtifactRepositoryImpl(_PARENT_DIR)
repo.download_artifacts(_MODEL_FILE)
def test_download_artifacts_dst_path_does_not_exist(tmp_path):
repo = ArtifactRepositoryImpl(_PARENT_DIR)
dst_path = tmp_path.joinpath("does_not_exist")
with pytest.raises(
MlflowException, match="The destination path for downloaded artifacts does not exist"
):
repo.download_artifacts(_MODEL_DIR, dst_path)
def test_download_artifacts_dst_path_is_file(tmp_path):
repo = ArtifactRepositoryImpl(_PARENT_DIR)
dst_path = tmp_path.joinpath("file")
dst_path.touch()
with pytest.raises(
MlflowException, match="The destination path for downloaded artifacts must be a directory"
):
repo.download_artifacts(_MODEL_DIR, dst_path)
@pytest.mark.parametrize(
("base_uri", "download_arg", "list_return_val"),
[
(
"",
_PARENT_MODEL_DIR,
[_PARENT_MODEL_DIR, _PARENT_MODEL_FILE, _PARENT_MODEL_DIR + "/" + _EMPTY_DIR],
)
],
)
def test_download_artifacts_handles_empty_dir(base_uri, download_arg, list_return_val):
def list_artifacts(path):
if path.endswith(_MODEL_DIR):
return [
FileInfo(item, item.endswith(_EMPTY_DIR), _DUMMY_FILE_SIZE)
for item in list_return_val
]
elif path.endswith(_PARENT_DIR) or path.endswith(_PARENT_DIR + "/"):
return [FileInfo(_PARENT_MODEL_DIR, True, _EMPTY_FILE_SIZE)]
else:
return []
with mock.patch.object(ArtifactRepositoryImpl, "list_artifacts") as list_artifacts_mock:
list_artifacts_mock.side_effect = list_artifacts
repo = ArtifactRepositoryImpl(base_uri)
with TempDir() as tmp:
repo.download_artifacts(download_arg, dst_path=tmp.path())
@pytest.mark.parametrize(
("base_uri", "download_arg", "list_return_val"),
[
(_PARENT_MODEL_DIR, "", [_MODEL_FILE]),
(_PARENT_MODEL_DIR, "", [".", _MODEL_FILE]),
(_PARENT_DIR, _MODEL_DIR, [_MODEL_DIR + "/" + _MODEL_FILE]),
(_PARENT_DIR, _MODEL_DIR, [_MODEL_DIR, _MODEL_DIR + "/" + _MODEL_FILE]),
("", _PARENT_MODEL_DIR, [_PARENT_MODEL_FILE]),
("", _PARENT_MODEL_DIR, [_PARENT_MODEL_DIR, _PARENT_MODEL_FILE]),
],
)
def test_download_artifacts_awaits_download_completion(base_uri, download_arg, list_return_val):
"""
Verifies that all asynchronous artifact downloads are joined before `download_artifacts()`
returns a result to the caller
"""
def list_artifacts(path):
fullpath = posixpath.join(base_uri, path)
if fullpath.endswith(_MODEL_DIR) or fullpath.endswith(_MODEL_DIR + "/"):
return [FileInfo(item, False, _DUMMY_FILE_SIZE) for item in list_return_val]
elif fullpath.endswith(_PARENT_MODEL_DIR) or fullpath.endswith(_PARENT_MODEL_DIR + "/"):
return [FileInfo(posixpath.join(path, _MODEL_DIR), True, _EMPTY_FILE_SIZE)]
else:
return []
with mock.patch.object(SlowArtifactRepositoryImpl, "list_artifacts") as list_artifacts_mock:
list_artifacts_mock.side_effect = list_artifacts
repo = SlowArtifactRepositoryImpl(base_uri)
repo.download_artifacts(download_arg)
@pytest.mark.parametrize(
("base_uri", "download_arg", "list_return_val"),
[
(_PARENT_MODEL_DIR, "", [_MODEL_FILE]),
],
)
def test_download_artifacts_provides_failure_info(base_uri, download_arg, list_return_val):
def list_artifacts(path):
fullpath = posixpath.join(base_uri, path)
if fullpath.endswith(_MODEL_DIR) or fullpath.endswith(_MODEL_DIR + "/"):
return [FileInfo(item, False, _DUMMY_FILE_SIZE) for item in list_return_val]
else:
return []
with mock.patch.object(FailureArtifactRepositoryImpl, "list_artifacts") as list_artifacts_mock:
list_artifacts_mock.side_effect = list_artifacts
repo = FailureArtifactRepositoryImpl(base_uri)
match = r"The following failures occurred while downloading one or more artifacts."
with pytest.raises(MlflowException, match=match) as exc:
repo.download_artifacts(download_arg)
err_msg = str(exc.value)
assert _MODEL_FILE in err_msg
assert _MOCK_ERROR in err_msg
@pytest.mark.parametrize("debug", [True, False])
def test_download_artifacts_provides_traceback_info(debug, reset_logging_level):
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
def list_artifacts(path):
fullpath = posixpath.join(_PARENT_MODEL_DIR, path)
if fullpath.endswith(_MODEL_DIR) or fullpath.endswith(_MODEL_DIR + "/"):
return [FileInfo(item, False, _DUMMY_FILE_SIZE) for item in [_MODEL_FILE]]
else:
return []
with mock.patch.object(FailureArtifactRepositoryImpl, "list_artifacts") as list_artifacts_mock:
list_artifacts_mock.side_effect = list_artifacts
repo = FailureArtifactRepositoryImpl(_PARENT_MODEL_DIR)
try:
repo.download_artifacts("")
except MlflowException as exc:
err_msg = str(exc.message)
if debug:
assert "Traceback" in err_msg
else:
assert "Traceback" not in err_msg
| FailureArtifactRepositoryImpl |
python | Pylons__pyramid | tests/test_asset.py | {
"start": 2173,
"end": 3004
} | class ____(unittest.TestCase):
def _callFUT(self, abspath, package):
from pyramid.asset import asset_spec_from_abspath
return asset_spec_from_abspath(abspath, package)
def test_package_name_is_main(self):
pkg = DummyPackage('__main__')
result = self._callFUT('abspath', pkg)
self.assertEqual(result, 'abspath')
def test_abspath_startswith_package_path(self):
abspath = os.path.join(here, 'fixtureapp')
pkg = DummyPackage('tests')
pkg.__file__ = 'file'
result = self._callFUT(abspath, pkg)
self.assertEqual(result, 'tests:fixtureapp')
def test_abspath_doesnt_startwith_package_path(self):
pkg = DummyPackage('tests')
result = self._callFUT(here, pkg)
self.assertEqual(result, here)
| Test_asset_spec_from_abspath |
python | pyca__cryptography | tests/hazmat/primitives/test_serialization.py | {
"start": 67055,
"end": 68750
} | class ____:
def test_unsupported_format(self):
f = PrivateFormat.PKCS8
with pytest.raises(ValueError):
f.encryption_builder()
def test_duplicate_kdf_rounds(self):
b = PrivateFormat.OpenSSH.encryption_builder().kdf_rounds(12)
with pytest.raises(ValueError):
b.kdf_rounds(12)
def test_invalid_kdf_rounds(self):
b = PrivateFormat.OpenSSH.encryption_builder()
with pytest.raises(ValueError):
b.kdf_rounds(0)
with pytest.raises(ValueError):
b.kdf_rounds(-1)
with pytest.raises(TypeError):
b.kdf_rounds("string") # type: ignore[arg-type]
def test_invalid_password(self):
b = PrivateFormat.OpenSSH.encryption_builder()
with pytest.raises(ValueError):
b.build(12) # type: ignore[arg-type]
with pytest.raises(ValueError):
b.build(b"")
def test_unsupported_type_for_methods(self):
b = PrivateFormat.OpenSSH.encryption_builder()
with pytest.raises(TypeError):
b.key_cert_algorithm(PBES.PBESv1SHA1And3KeyTripleDESCBC)
with pytest.raises(TypeError):
b.hmac_hash(SHA1())
def test_duplicate_hmac_hash(self):
b = PrivateFormat.PKCS12.encryption_builder().hmac_hash(SHA1())
with pytest.raises(ValueError):
b.hmac_hash(SHA1())
def test_duplicate_key_cert_algorithm(self):
b = PrivateFormat.PKCS12.encryption_builder().key_cert_algorithm(
PBES.PBESv1SHA1And3KeyTripleDESCBC
)
with pytest.raises(ValueError):
b.key_cert_algorithm(PBES.PBESv1SHA1And3KeyTripleDESCBC)
| TestEncryptionBuilder |
python | wandb__wandb | wandb/vendor/pygments/formatters/other.py | {
"start": 927,
"end": 4103
} | class ____(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
:doc:`lexer list <lexers>`.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
.. versionadded:: 0.11
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
# We ignore self.encoding if it is set, since it gets set for lexer
# and formatter if given with -Oencoding on the command line.
# The RawTokenFormatter outputs only ASCII. Override here.
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b'')
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
def write(text):
outfile.write(text.encode())
flush = outfile.flush
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text.encode()))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
def write(text):
outfile.write(text.encode())
flush = outfile.flush
if self.error_color:
for ttype, value in tokensource:
line = "%s\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write("%s\t%r\n" % (ttype, value))
flush()
TESTCASE_BEFORE = u'''\
def testNeedsName(self):
fragment = %r
tokens = [
'''
TESTCASE_AFTER = u'''\
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
'''
| RawTokenFormatter |
python | ray-project__ray | python/ray/experimental/util/types.py | {
"start": 255,
"end": 322
} | class ____(_CollectiveOp):
pass
@PublicAPI
@dataclass
| AllGatherOp |
python | sqlalchemy__sqlalchemy | test/aaa_profiling/test_misc.py | {
"start": 1672,
"end": 4646
} | class ____(fixtures.TestBase):
__requires__ = ("cpython", "python_profiling_backend")
@testing.fixture(scope="class")
def mapping_fixture(self):
# note in order to work nicely with "fixture" we are emerging
# a whole new model of setup/teardown, since pytest "fixture"
# sort of purposely works badly with setup/teardown
registry = sqlalchemy.orm.registry()
metadata = MetaData()
parent = Table(
"parent",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(20)),
)
child = Table(
"child",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(20)),
Column(
"parent_id", Integer, ForeignKey("parent.id"), nullable=False
),
)
class Parent(testing.entities.BasicEntity):
pass
class Child(testing.entities.BasicEntity):
pass
registry.map_imperatively(
Parent,
parent,
properties={"children": relationship(Child, backref="parent")},
)
registry.map_imperatively(Child, child)
registry.configure()
yield Parent, Child
registry.dispose()
@testing.fixture(scope="function")
def stmt_fixture_one(self, mapping_fixture):
Parent, Child = mapping_fixture
return [
(
select(Parent.id, Child.id)
.select_from(ormjoin(Parent, Child, Parent.children))
.where(Child.id == 5)
)
for i in range(100)
]
@profiling.function_call_count(variance=0.15, warmup=2)
def test_statement_key_is_cached(self, stmt_fixture_one):
current_key = None
for stmt in stmt_fixture_one:
key = stmt._generate_cache_key()
assert key is not None
if current_key:
eq_(key, current_key)
else:
current_key = key
def test_statement_key_is_not_cached(
self, stmt_fixture_one, mapping_fixture
):
Parent, Child = mapping_fixture
# run a totally different statement so that everything cache
# related not specific to the statement is warmed up
some_other_statement = (
select(Parent.id, Child.id)
.join_from(Parent, Child, Parent.children)
.where(Parent.id == 5)
)
some_other_statement._generate_cache_key()
@profiling.function_call_count(variance=0.15, warmup=0)
def go():
current_key = None
for stmt in stmt_fixture_one:
key = stmt._generate_cache_key()
assert key is not None
if current_key:
eq_(key, current_key)
else:
current_key = key
go()
| CacheKeyTest |
python | scipy__scipy | scipy/_lib/_util.py | {
"start": 24819,
"end": 39092
} | class ____:
"""
Parallelisation wrapper for working with map-like callables, such as
`multiprocessing.Pool.map`.
Parameters
----------
pool : int or map-like callable
If `pool` is an integer, then it specifies the number of threads to
use for parallelization. If ``int(pool) == 1``, then no parallel
processing is used and the map builtin is used.
If ``pool == -1``, then the pool will utilize all available CPUs.
If `pool` is a map-like callable that follows the same
calling sequence as the built-in map function, then this callable is
used for parallelization.
"""
def __init__(self, pool=1):
self.pool = None
self._mapfunc = map
self._own_pool = False
if callable(pool):
self.pool = pool
self._mapfunc = self.pool
else:
from multiprocessing import get_context, get_start_method
method = get_start_method(allow_none=True)
if method is None and os.name=='posix' and sys.version_info < (3, 14):
# Python 3.13 and older used "fork" on posix, which can lead to
# deadlocks. This backports that fix to older Python versions.
method = 'forkserver'
# user supplies a number
if int(pool) == -1:
# use as many processors as possible
self.pool = get_context(method=method).Pool()
self._mapfunc = self.pool.map
self._own_pool = True
elif int(pool) == 1:
pass
elif int(pool) > 1:
# use the number of processors requested
self.pool = get_context(method=method).Pool(processes=int(pool))
self._mapfunc = self.pool.map
self._own_pool = True
else:
raise RuntimeError("Number of workers specified must be -1,"
" an int >= 1, or an object with a 'map' "
"method")
def __enter__(self):
return self
def terminate(self):
if self._own_pool:
self.pool.terminate()
def join(self):
if self._own_pool:
self.pool.join()
def close(self):
if self._own_pool:
self.pool.close()
def __exit__(self, exc_type, exc_value, traceback):
if self._own_pool:
self.pool.close()
self.pool.terminate()
def __call__(self, func, iterable):
# only accept one iterable because that's all Pool.map accepts
try:
return self._mapfunc(func, iterable)
except TypeError as e:
# wrong number of arguments
raise TypeError("The map-like callable must be of the"
" form f(func, iterable)") from e
def _workers_wrapper(func):
"""
Wrapper to deal with setup-cleanup of workers outside a user function via a
ContextManager. It saves having to do the setup/tear down with within that
function, which can be messy.
"""
@functools.wraps(func)
def inner(*args, **kwds):
kwargs = kwds.copy()
if 'workers' not in kwargs:
_workers = map
elif 'workers' in kwargs and kwargs['workers'] is None:
_workers = map
else:
_workers = kwargs['workers']
with MapWrapper(_workers) as mf:
kwargs['workers'] = mf
return func(*args, **kwargs)
return inner
def rng_integers(gen, low, high=None, size=None, dtype='int64',
endpoint=False):
"""
Return random integers from low (inclusive) to high (exclusive), or if
endpoint=True, low (inclusive) to high (inclusive). Replaces
`RandomState.randint` (with endpoint=False) and
`RandomState.random_integers` (with endpoint=True).
Return random integers from the "discrete uniform" distribution of the
specified dtype. If high is None (the default), then results are from
0 to low.
Parameters
----------
gen : {None, np.random.RandomState, np.random.Generator}
Random number generator. If None, then the np.random.RandomState
singleton is used.
low : int or array-like of ints
Lowest (signed) integers to be drawn from the distribution (unless
high=None, in which case this parameter is 0 and this value is used
for high).
high : int or array-like of ints
If provided, one above the largest (signed) integer to be drawn from
the distribution (see above for behavior if high=None). If array-like,
must contain integer values.
size : array-like of ints, optional
Output shape. If the given shape is, e.g., (m, n, k), then m * n * k
samples are drawn. Default is None, in which case a single value is
returned.
dtype : {str, dtype}, optional
Desired dtype of the result. All dtypes are determined by their name,
i.e., 'int64', 'int', etc, so byteorder is not available and a specific
precision may have different C types depending on the platform.
The default value is 'int64'.
endpoint : bool, optional
If True, sample from the interval [low, high] instead of the default
[low, high) Defaults to False.
Returns
-------
out: int or ndarray of ints
size-shaped array of random integers from the appropriate distribution,
or a single such random int if size not provided.
"""
if isinstance(gen, np.random.Generator):
return gen.integers(low, high=high, size=size, dtype=dtype,
endpoint=endpoint)
else:
if gen is None:
# default is RandomState singleton used by np.random.
gen = np.random.mtrand._rand
if endpoint:
# inclusive of endpoint
# remember that low and high can be arrays, so don't modify in
# place
if high is None:
return gen.randint(low + 1, size=size, dtype=dtype)
if high is not None:
return gen.randint(low, high=high + 1, size=size, dtype=dtype)
# exclusive
return gen.randint(low, high=high, size=size, dtype=dtype)
@contextmanager
def _fixed_default_rng(seed=1638083107694713882823079058616272161):
"""Context with a fixed np.random.default_rng seed."""
orig_fun = np.random.default_rng
np.random.default_rng = lambda seed=seed: orig_fun(seed)
try:
yield
finally:
np.random.default_rng = orig_fun
@contextmanager
def ignore_warns(expected_warning, *, match=None):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", match, expected_warning)
yield
def _rng_html_rewrite(func):
"""Rewrite the HTML rendering of ``np.random.default_rng``.
This is intended to decorate
``numpydoc.docscrape_sphinx.SphinxDocString._str_examples``.
Examples are only run by Sphinx when there are plot involved. Even so,
it does not change the result values getting printed.
"""
# hexadecimal or number seed, case-insensitive
pattern = re.compile(r'np.random.default_rng\((0x[0-9A-F]+|\d+)\)', re.I)
def _wrapped(*args, **kwargs):
res = func(*args, **kwargs)
lines = [
re.sub(pattern, 'np.random.default_rng()', line)
for line in res
]
return lines
return _wrapped
def _argmin(a, keepdims=False, axis=None):
"""
argmin with a `keepdims` parameter.
See https://github.com/numpy/numpy/issues/8710
If axis is not None, a.shape[axis] must be greater than 0.
"""
res = np.argmin(a, axis=axis)
if keepdims and axis is not None:
res = np.expand_dims(res, axis=axis)
return res
def _contains_nan(
a: Array,
nan_policy: Literal["propagate", "raise", "omit"] = "propagate",
*,
xp_omit_okay: bool = False,
xp: ModuleType | None = None,
) -> Array | bool:
# Regarding `xp_omit_okay`: Temporarily, while `_axis_nan_policy` does not
# handle non-NumPy arrays, most functions that call `_contains_nan` want
# it to raise an error if `nan_policy='omit'` and `xp` is not `np`.
# Some functions support `nan_policy='omit'` natively, so setting this to
# `True` prevents the error from being raised.
policies = {"propagate", "raise", "omit"}
if nan_policy not in policies:
msg = f"nan_policy must be one of {policies}."
raise ValueError(msg)
if xp_size(a) == 0:
return False
if xp is None:
xp = array_namespace(a)
if xp.isdtype(a.dtype, "real floating"):
# Faster and less memory-intensive than xp.any(xp.isnan(a)), and unlike other
# reductions, `max`/`min` won't return NaN unless there is a NaN in the data.
contains_nan = xp.isnan(xp.max(a))
elif xp.isdtype(a.dtype, "complex floating"):
# Typically `real` and `imag` produce views; otherwise, `xp.any(xp.isnan(a))`
# would be more efficient.
contains_nan = xp.isnan(xp.max(xp.real(a))) | xp.isnan(xp.max(xp.imag(a)))
elif is_numpy(xp) and np.issubdtype(a.dtype, object):
contains_nan = False
for el in a.ravel():
# isnan doesn't work on non-numeric elements
if np.issubdtype(type(el), np.number) and np.isnan(el):
contains_nan = True
break
else:
# Only `object` and `inexact` arrays can have NaNs
return False
# The implicit call to bool(contains_nan) must happen after testing
# nan_policy to prevent lazy and device-bound xps from raising in the
# default policy='propagate' case.
if nan_policy == 'raise':
if is_lazy_array(a):
msg = "nan_policy='raise' is not supported for lazy arrays."
raise TypeError(msg)
if contains_nan:
msg = "The input contains nan values"
raise ValueError(msg)
elif nan_policy == 'omit' and not xp_omit_okay and not is_numpy(xp):
if is_lazy_array(a):
msg = "nan_policy='omit' is not supported for lazy arrays."
raise TypeError(msg)
return contains_nan
def _rename_parameter(old_name, new_name, dep_version=None):
"""
Generate decorator for backward-compatible keyword renaming.
Apply the decorator generated by `_rename_parameter` to functions with a
recently renamed parameter to maintain backward-compatibility.
After decoration, the function behaves as follows:
If only the new parameter is passed into the function, behave as usual.
If only the old parameter is passed into the function (as a keyword), raise
a DeprecationWarning if `dep_version` is provided, and behave as usual
otherwise.
If both old and new parameters are passed into the function, raise a
DeprecationWarning if `dep_version` is provided, and raise the appropriate
TypeError (function got multiple values for argument).
Parameters
----------
old_name : str
Old name of parameter
new_name : str
New name of parameter
dep_version : str, optional
Version of SciPy in which old parameter was deprecated in the format
'X.Y.Z'. If supplied, the deprecation message will indicate that
support for the old parameter will be removed in version 'X.Y+2.Z'
Notes
-----
Untested with functions that accept *args. Probably won't work as written.
"""
def decorator(fun):
@functools.wraps(fun)
def wrapper(*args, **kwargs):
if old_name in kwargs:
if dep_version:
end_version = dep_version.split('.')
end_version[1] = str(int(end_version[1]) + 2)
end_version = '.'.join(end_version)
message = (f"Use of keyword argument `{old_name}` is "
f"deprecated and replaced by `{new_name}`. "
f"Support for `{old_name}` will be removed "
f"in SciPy {end_version}.")
warnings.warn(message, DeprecationWarning, stacklevel=2)
if new_name in kwargs:
message = (f"{fun.__name__}() got multiple values for "
f"argument now known as `{new_name}`")
raise TypeError(message)
kwargs[new_name] = kwargs.pop(old_name)
return fun(*args, **kwargs)
return wrapper
return decorator
def _rng_spawn(rng, n_children):
# spawns independent RNGs from a parent RNG
bg = rng._bit_generator
ss = bg._seed_seq
child_rngs = [np.random.Generator(type(bg)(child_ss))
for child_ss in ss.spawn(n_children)]
return child_rngs
def _get_nan(*data, shape=(), xp=None):
xp = array_namespace(*data) if xp is None else xp
# Get NaN of appropriate dtype for data
dtype = xp_result_type(*data, force_floating=True, xp=xp)
device = xp_result_device(*data)
res = xp.full(shape, xp.nan, dtype=dtype, device=device)
if not shape:
res = res[()]
# whenever mdhaber/marray#89 is resolved, could just return `res`
return res.data if is_marray(xp) else res
def normalize_axis_index(axis, ndim):
# Check if `axis` is in the correct range and normalize it
if axis < -ndim or axis >= ndim:
msg = f"axis {axis} is out of bounds for array of dimension {ndim}"
raise AxisError(msg)
if axis < 0:
axis = axis + ndim
return axis
def _call_callback_maybe_halt(callback, res):
"""Call wrapped callback; return True if algorithm should stop.
Parameters
----------
callback : callable or None
A user-provided callback wrapped with `_wrap_callback`
res : OptimizeResult
Information about the current iterate
Returns
-------
halt : bool
True if minimization should stop
"""
if callback is None:
return False
try:
callback(res)
return False
except StopIteration:
callback.stop_iteration = True
return True
| MapWrapper |
python | pola-rs__polars | py-polars/src/polars/io/scan_options/cast_options.py | {
"start": 452,
"end": 4537
} | class ____:
"""Options for scanning files."""
def __init__(
self,
*,
integer_cast: Literal["upcast", "forbid"] = "forbid",
float_cast: Literal["forbid"]
| FloatCastOption
| Collection[FloatCastOption] = "forbid",
datetime_cast: Literal["forbid"]
| DatetimeCastOption
| Collection[DatetimeCastOption] = "forbid",
missing_struct_fields: Literal["insert", "raise"] = "raise",
extra_struct_fields: Literal["ignore", "raise"] = "raise",
categorical_to_string: Literal["allow", "forbid"] = "forbid",
_internal_call: bool = False,
) -> None:
"""
Common configuration for scanning files.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
Parameters
----------
integer_cast
Configuration for casting from integer types:
* `upcast`: Allow lossless casting to wider integer types.
* `forbid`: Raises an error if dtypes do not match.
float_cast
Configuration for casting from float types:
* `upcast`: Allow casting to higher precision float types.
* `downcast`: Allow casting to lower precision float types.
* `forbid`: Raises an error if dtypes do not match.
datetime_cast
Configuration for casting from datetime types:
* `nanosecond-downcast`: Allow nanosecond precision datetime to be \
downcasted to any lower precision. This has a similar effect to \
PyArrow's `coerce_int96_timestamp_unit`.
* `convert-timezone`: Allow casting to a different timezone.
* `forbid`: Raises an error if dtypes do not match.
missing_struct_fields
Configuration for behavior when struct fields defined in the schema
are missing from the data:
* `insert`: Inserts the missing fields.
* `raise`: Raises an error.
extra_struct_fields
Configuration for behavior when extra struct fields outside of the
defined schema are encountered in the data:
* `ignore`: Silently ignores.
* `raise`: Raises an error.
categorical_to_string
Configuration for behavior when reading in a column whose expected
type is string, but type in the file is categorical.
* `allow`: Categorical is casted to string.
* `forbid`: Raises an error.
"""
if not _internal_call:
issue_unstable_warning("ScanCastOptions is considered unstable.")
self.integer_cast = integer_cast
self.float_cast = float_cast
self.datetime_cast = datetime_cast
self.missing_struct_fields = missing_struct_fields
self.extra_struct_fields = extra_struct_fields
self.categorical_to_string = categorical_to_string
# Note: We don't cache this here, it's cached on the Rust-side.
@staticmethod
def _default() -> ScanCastOptions:
return ScanCastOptions(_internal_call=True)
@classmethod
def _default_iceberg(cls) -> ScanCastOptions:
"""
Default options suitable for Iceberg / Deltalake.
This in general has all casting options enabled. Note: do not modify the
returned config object, it is a cached global object.
"""
global _DEFAULT_CAST_OPTIONS_ICEBERG
if _DEFAULT_CAST_OPTIONS_ICEBERG is None:
_DEFAULT_CAST_OPTIONS_ICEBERG = ScanCastOptions(
integer_cast="upcast",
float_cast=["upcast", "downcast"],
datetime_cast=["nanosecond-downcast", "convert-timezone"],
missing_struct_fields="insert",
extra_struct_fields="ignore",
categorical_to_string="allow",
_internal_call=True,
)
return _DEFAULT_CAST_OPTIONS_ICEBERG
| ScanCastOptions |
python | django-mptt__django-mptt | mptt/models.py | {
"start": 803,
"end": 1163
} | class ____:
def __init__(self, getter, setter=None):
self.fget = getter
self.fset = setter
def __get__(self, cls, owner):
return self.fget(owner)
def __set__(self, cls, owner, value):
if not self.fset:
raise AttributeError("This classproperty is read only")
self.fset(owner, value)
| _classproperty |
python | django__django | tests/template_tests/filter_tests/test_timeuntil.py | {
"start": 265,
"end": 4860
} | class ____(TimezoneTestCase):
# Default compare with datetime.now()
@setup({"timeuntil01": "{{ a|timeuntil }}"})
def test_timeuntil01(self):
output = self.engine.render_to_string(
"timeuntil01", {"a": datetime.now() + timedelta(minutes=2, seconds=10)}
)
self.assertEqual(output, "2\xa0minutes")
@setup({"timeuntil02": "{{ a|timeuntil }}"})
def test_timeuntil02(self):
output = self.engine.render_to_string(
"timeuntil02", {"a": (datetime.now() + timedelta(days=1, seconds=10))}
)
self.assertEqual(output, "1\xa0day")
@setup({"timeuntil03": "{{ a|timeuntil }}"})
def test_timeuntil03(self):
output = self.engine.render_to_string(
"timeuntil03",
{"a": (datetime.now() + timedelta(hours=8, minutes=10, seconds=10))},
)
self.assertEqual(output, "8\xa0hours, 10\xa0minutes")
# Compare to a given parameter
@setup({"timeuntil04": "{{ a|timeuntil:b }}"})
def test_timeuntil04(self):
output = self.engine.render_to_string(
"timeuntil04",
{"a": self.now - timedelta(days=1), "b": self.now - timedelta(days=2)},
)
self.assertEqual(output, "1\xa0day")
@setup({"timeuntil05": "{{ a|timeuntil:b }}"})
def test_timeuntil05(self):
output = self.engine.render_to_string(
"timeuntil05",
{
"a": self.now - timedelta(days=2),
"b": self.now - timedelta(days=2, minutes=1),
},
)
self.assertEqual(output, "1\xa0minute")
# Regression for #7443
@setup({"timeuntil06": "{{ earlier|timeuntil }}"})
def test_timeuntil06(self):
output = self.engine.render_to_string(
"timeuntil06", {"earlier": self.now - timedelta(days=7)}
)
self.assertEqual(output, "0\xa0minutes")
@setup({"timeuntil07": "{{ earlier|timeuntil:now }}"})
def test_timeuntil07(self):
output = self.engine.render_to_string(
"timeuntil07", {"now": self.now, "earlier": self.now - timedelta(days=7)}
)
self.assertEqual(output, "0\xa0minutes")
@setup({"timeuntil08": "{{ later|timeuntil }}"})
def test_timeuntil08(self):
output = self.engine.render_to_string(
"timeuntil08", {"later": self.now + timedelta(days=7, hours=1)}
)
self.assertEqual(output, "1\xa0week")
@setup({"timeuntil09": "{{ later|timeuntil:now }}"})
def test_timeuntil09(self):
output = self.engine.render_to_string(
"timeuntil09", {"now": self.now, "later": self.now + timedelta(days=7)}
)
self.assertEqual(output, "1\xa0week")
# Differing timezones are calculated correctly.
@requires_tz_support
@setup({"timeuntil10": "{{ a|timeuntil }}"})
def test_timeuntil10(self):
output = self.engine.render_to_string("timeuntil10", {"a": self.now_tz})
self.assertEqual(output, "0\xa0minutes")
@requires_tz_support
@setup({"timeuntil11": "{{ a|timeuntil }}"})
def test_timeuntil11(self):
output = self.engine.render_to_string("timeuntil11", {"a": self.now_tz_i})
self.assertEqual(output, "0\xa0minutes")
@setup({"timeuntil12": "{{ a|timeuntil:b }}"})
def test_timeuntil12(self):
output = self.engine.render_to_string(
"timeuntil12", {"a": self.now_tz_i, "b": self.now_tz}
)
self.assertEqual(output, "0\xa0minutes")
# Regression for #9065 (two date objects).
@setup({"timeuntil13": "{{ a|timeuntil:b }}"})
def test_timeuntil13(self):
output = self.engine.render_to_string(
"timeuntil13", {"a": self.today, "b": self.today}
)
self.assertEqual(output, "0\xa0minutes")
@setup({"timeuntil14": "{{ a|timeuntil:b }}"})
def test_timeuntil14(self):
output = self.engine.render_to_string(
"timeuntil14", {"a": self.today, "b": self.today - timedelta(hours=24)}
)
self.assertEqual(output, "1\xa0day")
@setup({"timeuntil15": "{{ a|timeuntil:b }}"})
def test_naive_aware_type_error(self):
output = self.engine.render_to_string(
"timeuntil15", {"a": self.now, "b": self.now_tz_i}
)
self.assertEqual(output, "")
@setup({"timeuntil16": "{{ a|timeuntil:b }}"})
def test_aware_naive_type_error(self):
output = self.engine.render_to_string(
"timeuntil16", {"a": self.now_tz_i, "b": self.now}
)
self.assertEqual(output, "")
| TimeuntilTests |
python | getsentry__sentry | fixtures/safe_migrations_apps/safe_run_sql_app/models.py | {
"start": 89,
"end": 171
} | class ____(models.Model):
field = BoundedPositiveIntegerField(default=0)
| TestTable |
python | fluentpython__example-code | 19-dyn-attr-prop/bulkfood/bulkfood_v2.py | {
"start": 909,
"end": 1440
} | class ____:
def __init__(self, description, weight, price):
self.description = description
self.weight = weight # <1>
self.price = price
def subtotal(self):
return self.weight * self.price
@property # <2>
def weight(self): # <3>
return self.__weight # <4>
@weight.setter # <5>
def weight(self, value):
if value > 0:
self.__weight = value # <6>
else:
raise ValueError('value must be > 0') # <7>
# END LINEITEM_V2
| LineItem |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.