language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | apache__airflow | airflow-core/src/airflow/models/backfill.py | {
"start": 2030,
"end": 2186
} | class ____(AirflowException):
"""
Raised when attempting to create backfill and one already active.
:meta private:
"""
| AlreadyRunningBackfill |
python | matplotlib__matplotlib | galleries/examples/event_handling/image_slices_viewer.py | {
"start": 517,
"end": 1511
} | class ____:
def __init__(self, ax, X):
self.index = 0
self.X = X
self.ax = ax
self.im = ax.imshow(self.X[:, :, self.index])
self.update()
def on_scroll(self, event):
print(event.button, event.step)
increment = 1 if event.button == 'up' else -1
max_index = self.X.shape[-1] - 1
self.index = np.clip(self.index + increment, 0, max_index)
self.update()
def update(self):
self.im.set_data(self.X[:, :, self.index])
self.ax.set_title(
f'Use scroll wheel to navigate\nindex {self.index}')
self.im.axes.figure.canvas.draw()
x, y, z = np.ogrid[-10:10:100j, -10:10:100j, 1:10:20j]
X = np.sin(x * y * z) / (x * y * z)
fig, ax = plt.subplots()
# create an IndexTracker and make sure it lives during the whole
# lifetime of the figure by assigning it to a variable
tracker = IndexTracker(ax, X)
fig.canvas.mpl_connect('scroll_event', tracker.on_scroll)
plt.show()
| IndexTracker |
python | huggingface__transformers | tests/models/longformer/test_modeling_longformer.py | {
"start": 1435,
"end": 12855
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
attention_window=4,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.attention_window = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but LongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window + 1` locations
# (assuming no token with global attention, otherwise the last dimension of attentions
# is x + self.attention_window + 1, where x is the number of tokens with global attention)
self.key_length = self.attention_window + 2
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def get_config(self):
return LongformerConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
attention_window=self.attention_window,
)
def get_pipeline_config(self):
config = self.get_config()
config.vocab_size = 300
return config
def create_and_check_attention_mask_determinism(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerModel(config=config)
model.to(torch_device)
model.eval()
attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
output_with_mask = model(input_ids, attention_mask=attention_mask)["last_hidden_state"]
output_without_mask = model(input_ids)["last_hidden_state"]
self.parent.assertTrue(torch.allclose(output_with_mask[0, 0, :5], output_without_mask[0, 0, :5], atol=1e-4))
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerModel(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_model_with_global_attention_mask(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerModel(config=config)
model.to(torch_device)
model.eval()
global_attention_mask = input_mask.clone()
global_attention_mask[:, input_mask.shape[-1] // 2] = 0
global_attention_mask = global_attention_mask.to(torch_device)
result = model(
input_ids,
attention_mask=input_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
)
result = model(input_ids, token_type_ids=token_type_ids, global_attention_mask=global_attention_mask)
result = model(input_ids, global_attention_mask=global_attention_mask)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerForMaskedLM(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = LongformerForQuestionAnswering(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=input_mask,
global_attention_mask=input_mask,
token_type_ids=token_type_ids,
start_positions=sequence_labels,
end_positions=sequence_labels,
)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = LongformerForSequenceClassification(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = LongformerForTokenClassification(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = LongformerForMultipleChoice(config=config)
model.to(torch_device)
model.eval()
multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
result = model(
multiple_choice_inputs_ids,
attention_mask=multiple_choice_input_mask,
global_attention_mask=multiple_choice_input_mask,
token_type_ids=multiple_choice_token_type_ids,
labels=choice_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
global_attention_mask = torch.zeros_like(input_ids)
global_attention_mask[:, -1] = 1
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
"global_attention_mask": global_attention_mask,
}
return config, inputs_dict
def prepare_config_and_inputs_for_question_answering(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
# Replace sep_token_id by some random id
input_ids[input_ids == config.sep_token_id] = torch.randint(0, config.vocab_size, (1,)).item()
# Make sure there are exactly three sep_token_id
input_ids[:, -3:] = config.sep_token_id
input_mask = torch.ones_like(input_ids)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
@require_torch
| LongformerModelTester |
python | doocs__leetcode | solution/2900-2999/2925.Maximum Score After Applying Operations on a Tree/Solution.py | {
"start": 0,
"end": 670
} | class ____:
def maximumScoreAfterOperations(
self, edges: List[List[int]], values: List[int]
) -> int:
def dfs(i: int, fa: int = -1) -> (int, int):
a = b = 0
leaf = True
for j in g[i]:
if j != fa:
leaf = False
aa, bb = dfs(j, i)
a += aa
b += bb
if leaf:
return values[i], 0
return values[i] + a, max(values[i] + b, a)
g = [[] for _ in range(len(values))]
for a, b in edges:
g[a].append(b)
g[b].append(a)
return dfs(0)[1]
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1120706,
"end": 1121047
} | class ____(sgqlc.types.Type, Contribution):
"""Represents the contribution a user made on GitHub by opening an
issue.
"""
__schema__ = github_schema
__field_names__ = ("issue",)
issue = sgqlc.types.Field(sgqlc.types.non_null("Issue"), graphql_name="issue")
"""The issue that was opened."""
| CreatedIssueContribution |
python | getsentry__sentry | tests/sentry/utils/test_projectflags.py | {
"start": 295,
"end": 1642
} | class ____(TestCase):
@patch.object(test_signal, "send_robust")
def test_basic(self, mock_send_robust: Mock) -> None:
assert not self.project.flags.has_transactions
assert set_project_flag_and_signal(self.project, "has_transactions", test_signal) == 1
mock_send_robust.assert_called_once_with(project=self.project, sender=Project)
assert self.project.flags.has_transactions
@patch.object(test_signal, "send_robust")
def test_flag_already_set(self, mock_send_robust: Mock) -> None:
self.project.update(flags=F("flags").bitor(Project.flags.has_transactions))
assert self.project.flags.has_transactions
assert set_project_flag_and_signal(self.project, "has_transactions", test_signal) == 0
mock_send_robust.assert_not_called()
assert self.project.flags.has_transactions
@patch.object(test_signal, "send_robust")
def test_signal_kwargs(self, mock_send_robust: Mock) -> None:
assert not self.project.flags.has_transactions
assert (
set_project_flag_and_signal(self.project, "has_transactions", test_signal, a=1, b="xyz")
== 1
)
mock_send_robust.assert_called_once_with(project=self.project, sender=Project, a=1, b="xyz")
assert self.project.flags.has_transactions
| SetProjectFlagsAndSignalTest |
python | bokeh__bokeh | tests/unit/bokeh/server/test_contexts.py | {
"start": 1310,
"end": 1785
} | class ____:
def test_init(self) -> None:
ac = bsc.ApplicationContext("app", io_loop="ioloop")
c = bsc.BokehServerContext(ac)
assert c.application_context == ac
assert len(gc.get_referrers(ac)) == 0
def test_sessions(self) -> None:
ac = bsc.ApplicationContext("app", io_loop="ioloop")
ac._sessions = dict(foo=1, bar=2)
c = bsc.BokehServerContext(ac)
assert set(c.sessions) == {1, 2}
| TestBokehServerContext |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/processors.py | {
"start": 4890,
"end": 5159
} | class ____(Processor):
"""
A `Processor` that doesn't do anything.
"""
def apply_transformation(
self, transformation_input: TransformationInput
) -> Transformation:
return Transformation(transformation_input.fragments)
| DummyProcessor |
python | joke2k__faker | faker/providers/ssn/nl_BE/__init__.py | {
"start": 172,
"end": 2868
} | class ____(SsnProvider):
def ssn(self) -> str:
"""
Returns a 11 digits Belgian SSN called "rijksregisternummer" as a string
The first 6 digits represent the birthdate with (in order) year, month and day.
The second group of 3 digits is represents a sequence number (order of birth).
It is even for women and odd for men.
For men the range starts at 1 and ends 997, for women 2 until 998.
The third group of 2 digits is a checksum based on the previous 9 digits (modulo 97).
Divide those 9 digits by 97, subtract the remainder from 97 and that's the result.
For persons born in or after 2000, the 9 digit number needs to be proceeded by a 2
(add 2000000000) before the division by 97.
"""
# see http://nl.wikipedia.org/wiki/Burgerservicenummer (in Dutch)
def _checksum(digits):
res = 97 - (digits % 97)
return res
# Generate a date (random)
mydate = self.generator.date()
# Convert it to an int
elms = mydate.split("-")
# Adjust for year 2000 if necessary
if elms[0][0] == "2":
above = True
else:
above = False
# Only keep the last 2 digits of the year
elms[0] = elms[0][2:4]
# Simulate the gender/sequence - should be 3 digits
seq = self.generator.random_int(1, 998)
# Right justify sequence and append to list
seq_str = f"{seq:0>3}"
elms.append(seq_str)
# Now convert list to an integer so the checksum can be calculated
date_as_int = int("".join(elms))
if above:
date_as_int += 2000000000
# Generate checksum
s = _checksum(date_as_int)
s_rjust = f"{s:0>2}"
# return result as a string
elms.append(s_rjust)
return "".join(elms)
vat_id_formats = ("BE##########",)
def vat_id(self) -> str:
vat_id_random_section = "#######"
vat_id_possible_initial_numbers = ("0", "1")
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
https://en.wikipedia.org/wiki/VAT_identification_number
:return: A random Belgian VAT ID starting with 0 or 1 and has a correct checksum with a modulo 97 check
"""
generated_initial_number: str = self.random_element(vat_id_possible_initial_numbers)
vat_without_check = self.bothify(f"{generated_initial_number}{vat_id_random_section}")
vat_as_int = int(vat_without_check)
vat_check = 97 - (vat_as_int % 97)
vat_check_str = f"{vat_check:0>2}"
return f"BE{vat_without_check}{vat_check_str}"
| Provider |
python | gevent__gevent | src/gevent/tests/test__server.py | {
"start": 563,
"end": 1965
} | class ____(StreamServer):
def handle(self, client_socket, _address): # pylint:disable=method-hidden
fd = client_socket.makefile()
try:
request_line = fd.readline()
if not request_line:
return
try:
_method, path, _rest = request_line.split(' ', 3)
except Exception:
print('Failed to parse request line: %r' % (request_line, ))
raise
if path == '/ping':
client_socket.sendall(b'HTTP/1.0 200 OK\r\n\r\nPONG')
elif path in ['/long', '/short']:
client_socket.sendall(b'hello')
while True:
data = client_socket.recv(1)
if not data:
break
else:
client_socket.sendall(b'HTTP/1.0 404 WTF?\r\n\r\n')
finally:
fd.close()
def sleep_to_clear_old_sockets(*_args):
try:
# Allow any queued callbacks needed to close sockets
# to run. On Windows, this needs to spin the event loop to
# allow proper FD cleanup. Otherwise we risk getting an
# old FD that's being closed and then get spurious connection
# errors.
gevent.sleep(0 if not WIN else SMALLEST_RELIABLE_DELAY)
except Exception: # pylint:disable=broad-except
pass
| SimpleStreamServer |
python | pennersr__django-allauth | allauth/app_settings.py | {
"start": 31,
"end": 1324
} | class ____:
def __init__(self, prefix):
self.prefix = prefix
def _setting(self, name, dflt):
from allauth.utils import get_setting
return get_setting(self.prefix + name, dflt)
@property
def SITES_ENABLED(self):
return apps.is_installed("django.contrib.sites")
@property
def SOCIALACCOUNT_ENABLED(self):
return apps.is_installed("allauth.socialaccount")
@property
def SOCIALACCOUNT_ONLY(self) -> bool:
from allauth.utils import get_setting
return get_setting("SOCIALACCOUNT_ONLY", False)
@property
def MFA_ENABLED(self):
return apps.is_installed("allauth.mfa")
@property
def USERSESSIONS_ENABLED(self):
return apps.is_installed("allauth.usersessions")
@property
def HEADLESS_ENABLED(self):
return apps.is_installed("allauth.headless")
@property
def HEADLESS_ONLY(self) -> bool:
from allauth.utils import get_setting
return get_setting("HEADLESS_ONLY", False)
@property
def DEFAULT_AUTO_FIELD(self):
return self._setting("DEFAULT_AUTO_FIELD", None)
_app_settings = AppSettings("ALLAUTH_")
def __getattr__(name):
# See https://peps.python.org/pep-0562/
return getattr(_app_settings, name)
| AppSettings |
python | tensorflow__tensorflow | tensorflow/python/ops/functional_ops_test.py | {
"start": 1119,
"end": 2776
} | class ____(test.TestCase):
def testIfWithDefun(self):
# Defun should only be used in graph mode
with ops.Graph().as_default():
@function.Defun(dtypes.float32)
def Then(x):
return x + 1
@function.Defun(dtypes.float32)
def Else(x):
return x - 1
inputs = [10.]
result = self.evaluate(functional_ops.If(False, inputs, Then, Else))
self.assertEqual([9.0], result)
def testIfWithFunction(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec((), dtypes.float32)])
def Then(x):
return x + 1
@def_function.function(
input_signature=[tensor_spec.TensorSpec((), dtypes.float32)])
def Else(x):
return x - 1
inputs = [10.]
then_cf = Then.get_concrete_function()
else_cf = Else.get_concrete_function()
result = self.evaluate(functional_ops.If(False, inputs, then_cf, else_cf))
self.assertEqual([9.0], result)
def testIfWithFunctionComposite(self):
signature = [tensor_spec.TensorSpec([], dtypes.float32)]
@def_function.function(input_signature=signature)
def Then(x):
return sparse_tensor.SparseTensor([[0]], [x + 1], [1])
@def_function.function(input_signature=signature)
def Else(x):
return sparse_tensor.SparseTensor([[0]], [x - 1], [1])
inputs = [10.]
then_cf = Then.get_concrete_function()
else_cf = Else.get_concrete_function()
result = functional_ops.If(False, inputs, then_cf, else_cf)
self.assertIsInstance(result, sparse_tensor.SparseTensor)
self.assertAllEqual([9.0], result.values)
if __name__ == '__main__':
test.main()
| FunctionalOpsTest |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/dynamic_rendezvous.py | {
"start": 32610,
"end": 32956
} | class ____:
"""Represent a rendezvous close operation."""
def __call__(self, ctx: _RendezvousContext, deadline: float) -> _Action:
if ctx.state.closed:
return _Action.FINISH
if time.monotonic() > deadline:
return _Action.ERROR_TIMEOUT
return _Action.MARK_RENDEZVOUS_CLOSED
| _RendezvousCloseOp |
python | ApeWorX__ape | tests/functional/test_deploymentscache.py | {
"start": 93,
"end": 2464
} | class ____:
CONTRACT_NAME = "DeploymentTestContractName"
@pytest.fixture(scope="class")
def contract_name(self):
return self.CONTRACT_NAME
@pytest.fixture
def cache(self):
return DeploymentDiskCache()
def test_cache_deployment(self, zero_address, cache, contract_name):
cache.cache_deployment(zero_address, contract_name)
assert contract_name in cache
assert cache[contract_name][-1].address == zero_address
def test_cache_deployment_live_network(
self, zero_address, cache, contract_name, mock_sepolia, eth_tester_provider
):
local = eth_tester_provider.network
ecosystem_name = mock_sepolia.ecosystem.name
eth_tester_provider.network = mock_sepolia
cache.cache_deployment(zero_address, contract_name)
eth_tester_provider.network = local
assert contract_name in cache
assert cache[contract_name][-1].address == zero_address
# Show it is also cached on disk.
disk_data = json.loads(cache.cachefile.read_text())
assert (
disk_data["ecosystems"][ecosystem_name][mock_sepolia.name][contract_name][0]["address"]
== zero_address
)
def test_cache_deployment_live_network_new_ecosystem(
self, zero_address, cache, contract_name, mock_sepolia, eth_tester_provider
):
"""
Tests the case when caching a deployment in a new ecosystem.
"""
ecosystem_name = mock_sepolia.ecosystem.name
local = eth_tester_provider.network
eth_tester_provider.network = mock_sepolia
# Make the ecosystem key not exist.
deployments = cache._deployments.pop(ecosystem_name, None)
cache.cache_deployment(zero_address, contract_name)
eth_tester_provider.network = local
if deployments is not None:
cache._deployments[ecosystem_name] = deployments
cache.cachefile.unlink(missing_ok=True)
# In memory cached still work.
assert contract_name in cache
assert cache[contract_name][-1].address == zero_address
# Show it did NOT cache to disk.
if cache.cachefile.is_file():
disk_data = json.loads(cache.cachefile.read_text())
assert contract_name not in disk_data["ecosystems"][ecosystem_name]["sepolia"]
| TestDeploymentDiskCache |
python | python-markdown__markdown | markdown/core.py | {
"start": 1511,
"end": 21328
} | class ____:
"""
A parser which converts Markdown to HTML.
Attributes:
Markdown.tab_length (int): The number of spaces which correspond to a single tab. Default: `4`.
Markdown.ESCAPED_CHARS (list[str]): List of characters which get the backslash escape treatment.
Markdown.block_level_elements (list[str]): List of HTML tags which get treated as block-level elements.
See [`markdown.util.BLOCK_LEVEL_ELEMENTS`][] for the full list of elements.
Markdown.registeredExtensions (list[Extension]): List of extensions which have called
[`registerExtension`][markdown.Markdown.registerExtension] during setup.
Markdown.doc_tag (str): Element used to wrap document. Default: `div`.
Markdown.stripTopLevelTags (bool): Indicates whether the `doc_tag` should be removed. Default: 'True'.
Markdown.references (dict[str, tuple[str, str]]): A mapping of link references found in a parsed document
where the key is the reference name and the value is a tuple of the URL and title.
Markdown.htmlStash (util.HtmlStash): The instance of the `HtmlStash` used by an instance of this class.
Markdown.output_formats (dict[str, Callable[xml.etree.ElementTree.Element]]): A mapping of known output
formats by name and their respective serializers. Each serializer must be a callable which accepts an
[`Element`][xml.etree.ElementTree.Element] and returns a `str`.
Markdown.output_format (str): The output format set by
[`set_output_format`][markdown.Markdown.set_output_format].
Markdown.serializer (Callable[xml.etree.ElementTree.Element]): The serializer set by
[`set_output_format`][markdown.Markdown.set_output_format].
Markdown.preprocessors (util.Registry): A collection of [`preprocessors`][markdown.preprocessors].
Markdown.parser (blockparser.BlockParser): A collection of [`blockprocessors`][markdown.blockprocessors].
Markdown.inlinePatterns (util.Registry): A collection of [`inlinepatterns`][markdown.inlinepatterns].
Markdown.treeprocessors (util.Registry): A collection of [`treeprocessors`][markdown.treeprocessors].
Markdown.postprocessors (util.Registry): A collection of [`postprocessors`][markdown.postprocessors].
"""
doc_tag = "div" # Element used to wrap document - later removed
output_formats: ClassVar[dict[str, Callable[[Element], str]]] = {
'html': to_html_string,
'xhtml': to_xhtml_string,
}
"""
A mapping of known output formats by name and their respective serializers. Each serializer must be a
callable which accepts an [`Element`][xml.etree.ElementTree.Element] and returns a `str`.
"""
def __init__(self, **kwargs):
"""
Creates a new Markdown instance.
Keyword Arguments:
extensions (list[Extension | str]): A list of extensions.
If an item is an instance of a subclass of [`markdown.extensions.Extension`][],
the instance will be used as-is. If an item is of type `str`, it is passed
to [`build_extension`][markdown.Markdown.build_extension] with its corresponding
`extension_configs` and the returned instance of [`markdown.extensions.Extension`][]
is used.
extension_configs (dict[str, dict[str, Any]]): Configuration settings for extensions.
output_format (str): Format of output. Supported formats are:
* `xhtml`: Outputs XHTML style tags. Default.
* `html`: Outputs HTML style tags.
tab_length (int): Length of tabs in the source. Default: `4`
"""
self.tab_length: int = kwargs.get('tab_length', 4)
self.ESCAPED_CHARS: list[str] = [
'\\', '`', '*', '_', '{', '}', '[', ']', '(', ')', '>', '#', '+', '-', '.', '!'
]
""" List of characters which get the backslash escape treatment. """
self.block_level_elements: list[str] = BLOCK_LEVEL_ELEMENTS.copy()
self.registeredExtensions: list[Extension] = []
self.docType = "" # TODO: Maybe delete this. It does not appear to be used anymore.
self.stripTopLevelTags: bool = True
self.build_parser()
self.references: dict[str, tuple[str, str]] = {}
self.htmlStash: util.HtmlStash = util.HtmlStash()
self.registerExtensions(extensions=kwargs.get('extensions', []),
configs=kwargs.get('extension_configs', {}))
self.set_output_format(kwargs.get('output_format', 'xhtml'))
self.reset()
def build_parser(self) -> Markdown:
"""
Build the parser from the various parts.
Assigns a value to each of the following attributes on the class instance:
* **`Markdown.preprocessors`** ([`Registry`][markdown.util.Registry]) -- A collection of
[`preprocessors`][markdown.preprocessors].
* **`Markdown.parser`** ([`BlockParser`][markdown.blockparser.BlockParser]) -- A collection of
[`blockprocessors`][markdown.blockprocessors].
* **`Markdown.inlinePatterns`** ([`Registry`][markdown.util.Registry]) -- A collection of
[`inlinepatterns`][markdown.inlinepatterns].
* **`Markdown.treeprocessors`** ([`Registry`][markdown.util.Registry]) -- A collection of
[`treeprocessors`][markdown.treeprocessors].
* **`Markdown.postprocessors`** ([`Registry`][markdown.util.Registry]) -- A collection of
[`postprocessors`][markdown.postprocessors].
This method could be redefined in a subclass to build a custom parser which is made up of a different
combination of processors and patterns.
"""
self.preprocessors = build_preprocessors(self)
self.parser = build_block_parser(self)
self.inlinePatterns = build_inlinepatterns(self)
self.treeprocessors = build_treeprocessors(self)
self.postprocessors = build_postprocessors(self)
return self
def registerExtensions(
self,
extensions: Sequence[Extension | str],
configs: Mapping[str, dict[str, Any]]
) -> Markdown:
"""
Load a list of extensions into an instance of the `Markdown` class.
Arguments:
extensions (list[Extension | str]): A list of extensions.
If an item is an instance of a subclass of [`markdown.extensions.Extension`][],
the instance will be used as-is. If an item is of type `str`, it is passed
to [`build_extension`][markdown.Markdown.build_extension] with its corresponding `configs` and the
returned instance of [`markdown.extensions.Extension`][] is used.
configs (dict[str, dict[str, Any]]): Configuration settings for extensions.
"""
for ext in extensions:
if isinstance(ext, str):
ext = self.build_extension(ext, configs.get(ext, {}))
if isinstance(ext, Extension):
ext.extendMarkdown(self)
logger.debug(
'Successfully loaded extension "%s.%s".'
% (ext.__class__.__module__, ext.__class__.__name__)
)
elif ext is not None:
raise TypeError(
'Extension "{}.{}" must be of type: "{}.{}"'.format(
ext.__class__.__module__, ext.__class__.__name__,
Extension.__module__, Extension.__name__
)
)
return self
def build_extension(self, ext_name: str, configs: Mapping[str, Any]) -> Extension:
"""
Build extension from a string name, then return an instance using the given `configs`.
Arguments:
ext_name: Name of extension as a string.
configs: Configuration settings for extension.
Returns:
An instance of the extension with the given configuration settings.
First attempt to load an entry point. The string name must be registered as an entry point in the
`markdown.extensions` group which points to a subclass of the [`markdown.extensions.Extension`][] class.
If multiple distributions have registered the same name, the first one found is returned.
If no entry point is found, assume dot notation (`path.to.module:ClassName`). Load the specified class and
return an instance. If no class is specified, import the module and call a `makeExtension` function and return
the [`markdown.extensions.Extension`][] instance returned by that function.
"""
configs = dict(configs)
entry_points = [ep for ep in util.get_installed_extensions() if ep.name == ext_name]
if entry_points:
ext = entry_points[0].load()
return ext(**configs)
# Get class name (if provided): `path.to.module:ClassName`
ext_name, class_name = ext_name.split(':', 1) if ':' in ext_name else (ext_name, '')
try:
module = importlib.import_module(ext_name)
logger.debug(
'Successfully imported extension module "%s".' % ext_name
)
except ImportError as e:
message = 'Failed loading extension "%s".' % ext_name
e.args = (message,) + e.args[1:]
raise
if class_name:
# Load given class name from module.
return getattr(module, class_name)(**configs)
else:
# Expect `makeExtension()` function to return a class.
try:
return module.makeExtension(**configs)
except AttributeError as e:
message = e.args[0]
message = "Failed to initiate extension " \
"'%s': %s" % (ext_name, message)
e.args = (message,) + e.args[1:]
raise
def registerExtension(self, extension: Extension) -> Markdown:
"""
Register an extension as having a resettable state.
Arguments:
extension: An instance of the extension to register.
This should get called once by an extension during setup. A "registered" extension's
`reset` method is called by [`Markdown.reset()`][markdown.Markdown.reset]. Not all extensions have or need a
resettable state, and so it should not be assumed that all extensions are "registered."
"""
self.registeredExtensions.append(extension)
return self
def reset(self) -> Markdown:
"""
Resets all state variables to prepare the parser instance for new input.
Called once upon creation of a class instance. Should be called manually between calls
to [`Markdown.convert`][markdown.Markdown.convert].
"""
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
if hasattr(extension, 'reset'):
extension.reset()
return self
def set_output_format(self, format: str) -> Markdown:
"""
Set the output format for the class instance.
Arguments:
format: Must be a known value in `Markdown.output_formats`.
"""
self.output_format = format.lower().rstrip('145') # ignore number
try:
self.serializer = self.output_formats[self.output_format]
except KeyError as e:
valid_formats = list(self.output_formats.keys())
valid_formats.sort()
message = 'Invalid Output Format: "%s". Use one of %s.' \
% (self.output_format,
'"' + '", "'.join(valid_formats) + '"')
e.args = (message,) + e.args[1:]
raise
return self
# Note: the `tag` argument is type annotated `Any` as ElementTree uses many various objects as tags.
# As there is no standardization in ElementTree, the type of a given tag is unpredictable.
def is_block_level(self, tag: Any) -> bool:
"""
Check if the given `tag` is a block level HTML tag.
Returns `True` for any string listed in `Markdown.block_level_elements`. A `tag` which is
not a string always returns `False`.
"""
if isinstance(tag, str):
return tag.lower().rstrip('/') in self.block_level_elements
# Some ElementTree tags are not strings, so return False.
return False
def convert(self, source: str) -> str:
"""
Convert a Markdown string to a string in the specified output format.
Arguments:
source: Markdown formatted text as Unicode or ASCII string.
Returns:
A string in the specified output format.
Markdown parsing takes place in five steps:
1. A bunch of [`preprocessors`][markdown.preprocessors] munge the input text.
2. A [`BlockParser`][markdown.blockparser.BlockParser] parses the high-level structural elements of the
pre-processed text into an [`ElementTree`][xml.etree.ElementTree.ElementTree] object.
3. A bunch of [`treeprocessors`][markdown.treeprocessors] are run against the
[`ElementTree`][xml.etree.ElementTree.ElementTree] object. One such `treeprocessor`
([`markdown.treeprocessors.InlineProcessor`][]) runs [`inlinepatterns`][markdown.inlinepatterns]
against the [`ElementTree`][xml.etree.ElementTree.ElementTree] object, parsing inline markup.
4. Some [`postprocessors`][markdown.postprocessors] are run against the text after the
[`ElementTree`][xml.etree.ElementTree.ElementTree] object has been serialized into text.
5. The output is returned as a string.
"""
# Fix up the source text
if not source.strip():
return '' # a blank Unicode string
try:
source = str(source)
except UnicodeDecodeError as e: # pragma: no cover
# Customize error message while maintaining original traceback
e.reason += '. -- Note: Markdown only accepts Unicode input!'
raise
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors:
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors:
newRoot = treeprocessor.run(root)
if newRoot is not None:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output = self.serializer(root)
if self.stripTopLevelTags:
try:
start = output.index(
'<%s>' % self.doc_tag) + len(self.doc_tag) + 2
end = output.rindex('</%s>' % self.doc_tag)
output = output[start:end].strip()
except ValueError as e: # pragma: no cover
if output.strip().endswith('<%s />' % self.doc_tag):
# We have an empty document
output = ''
else:
# We have a serious problem
raise ValueError('Markdown failed to strip top-level '
'tags. Document=%r' % output.strip()) from e
# Run the text post-processors
for pp in self.postprocessors:
output = pp.run(output)
return output.strip()
def convertFile(
self,
input: str | BinaryIO | None = None,
output: str | BinaryIO | None = None,
encoding: str | None = None,
) -> Markdown:
"""
Converts a Markdown file and returns the HTML as a Unicode string.
Decodes the file using the provided encoding (defaults to `utf-8`),
passes the file content to markdown, and outputs the HTML to either
the provided stream or the file with provided name, using the same
encoding as the source file. The
[`xmlcharrefreplace`](https://docs.python.org/3/library/codecs.html#error-handlers)
error handler is used when encoding the output.
**Note:** This is the only place that decoding and encoding of Unicode
takes place in Python-Markdown. (All other code is Unicode-in /
Unicode-out.)
Arguments:
input: File object or path. Reads from `stdin` if `None`.
output: File object or path. Writes to `stdout` if `None`.
encoding: Encoding of input and output files. Defaults to `utf-8`.
"""
encoding = encoding or "utf-8"
# Read the source
if input:
if isinstance(input, str):
input_file = open(input, mode="r", encoding=encoding)
else:
input_file = codecs.getreader(encoding)(input)
text = input_file.read()
input_file.close()
else:
text = sys.stdin.read()
text = text.lstrip('\ufeff') # remove the byte-order mark
# Convert
html = self.convert(text)
# Write to file or stdout
if output:
if isinstance(output, str):
output_file = codecs.open(output, "w",
encoding=encoding,
errors="xmlcharrefreplace")
output_file.write(html)
output_file.close()
else:
writer = codecs.getwriter(encoding)
output_file = writer(output, errors="xmlcharrefreplace")
output_file.write(html)
# Don't close here. User may want to write more.
else:
# Encode manually and write bytes to stdout.
html = html.encode(encoding, "xmlcharrefreplace")
sys.stdout.buffer.write(html)
return self
"""
EXPORTED FUNCTIONS
=============================================================================
Those are the two functions we really mean to export: `markdown()` and
`markdownFromFile()`.
"""
def markdown(text: str, **kwargs: Any) -> str:
"""
Convert a markdown string to HTML and return HTML as a Unicode string.
This is a shortcut function for [`Markdown`][markdown.Markdown] class to cover the most
basic use case. It initializes an instance of [`Markdown`][markdown.Markdown], loads the
necessary extensions and runs the parser on the given text.
Arguments:
text: Markdown formatted text as Unicode or ASCII string.
Keyword arguments:
**kwargs: Any arguments accepted by the Markdown class.
Returns:
A string in the specified output format.
"""
md = Markdown(**kwargs)
return md.convert(text)
def markdownFromFile(**kwargs: Any):
"""
Read Markdown text from a file and write output to a file or a stream.
This is a shortcut function which initializes an instance of [`Markdown`][markdown.Markdown],
and calls the [`convertFile`][markdown.Markdown.convertFile] method rather than
[`convert`][markdown.Markdown.convert].
Keyword arguments:
input (str | BinaryIO): A file name or readable object.
output (str | BinaryIO): A file name or writable object.
encoding (str): Encoding of input and output.
**kwargs: Any arguments accepted by the `Markdown` class.
"""
md = Markdown(**kwargs)
md.convertFile(kwargs.get('input', None),
kwargs.get('output', None),
kwargs.get('encoding', None))
| Markdown |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/overloadOverlap1.py | {
"start": 1864,
"end": 2321
} | class ____(Generic[_T1, _T2]):
@overload
def method1(self, a: _T1, b: tuple[_T2, ...]) -> int: ...
@overload
def method1(self, a: _T1, b: tuple[Any, ...]) -> int: ...
def method1(self, *args: Any, **kwargs: Any) -> Any: ...
@overload
def method2(self, a: _T2, b: int) -> int: ...
@overload
def method2(self, a: _T1, b: _T2) -> int: ...
def method2(self, *args: Any, **kwargs: Any) -> Any:
pass
| GenericClass |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_self/SLF001_1.py | {
"start": 280,
"end": 393
} | class ____:
def __new__(cls):
instance = super().__new__(cls)
instance._value = 1 # fine
| Ipsum |
python | scrapy__scrapy | scrapy/http/cookies.py | {
"start": 4370,
"end": 4479
} | class ____:
def acquire(self) -> None:
pass
def release(self) -> None:
pass
| _DummyLock |
python | conda__conda | conda/common/_logic.py | {
"start": 3615,
"end": 5143
} | class ____:
"""Simple wrapper to call a SAT solver given a _ClauseList/_ClauseArray instance."""
def __init__(self, **run_kwargs):
self._run_kwargs = run_kwargs or {}
self._clauses = _ClauseList()
# Bind some methods of _clauses to reduce lookups and call overhead.
self.add_clause = self._clauses.append
self.add_clauses = self._clauses.extend
def get_clause_count(self):
return self._clauses.get_clause_count()
def as_list(self):
return self._clauses.as_list()
def save_state(self):
return self._clauses.save_state()
def restore_state(self, saved_state):
return self._clauses.restore_state(saved_state)
def run(self, m, **kwargs):
run_kwargs = self._run_kwargs.copy()
run_kwargs.update(kwargs)
solver = self.setup(m, **run_kwargs)
sat_solution = self.invoke(solver)
solution = self.process_solution(sat_solution)
return solution
def setup(self, m, **kwargs):
"""Create a solver instance, add the clauses to it, and return it."""
raise NotImplementedError()
def invoke(self, solver):
"""Start the actual SAT solving and return the calculated solution."""
raise NotImplementedError()
def process_solution(self, sat_solution):
"""
Process the solution returned by self.invoke.
Returns a list of satisfied variables or None if no solution is found.
"""
raise NotImplementedError()
| _SatSolver |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_nevada_zip.py | {
"start": 737,
"end": 1735
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_nevada_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_nevada_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidNevadaZip |
python | getsentry__sentry | fixtures/safe_migrations_apps/bad_flow_rename_field_app/migrations/0002_rename_field.py | {
"start": 145,
"end": 432
} | class ____(CheckedMigration):
dependencies = [
("bad_flow_rename_field_app", "0001_initial"),
]
operations = [
migrations.RenameField(
model_name="TestTable",
old_name="field",
new_name="new_field",
),
]
| Migration |
python | sympy__sympy | sympy/simplify/hyperexpand.py | {
"start": 43238,
"end": 44743
} | class ____(Operator):
""" Decrement a lower b index. """
# XXX this is "essentially" the same as MeijerUnShiftA. This "essentially"
# can be made rigorous using the functional equation G(1/z) = G'(z),
# where G' denotes a G function of slightly altered parameters.
# However, sorting out the details seems harder than just coding it
# again.
def __init__(self, an, ap, bm, bq, i, z):
""" Note: i counts from zero! """
an, ap, bm, bq, i = list(map(sympify, [an, ap, bm, bq, i]))
self._an = an
self._ap = ap
self._bm = bm
self._bq = bq
self._i = i
an = list(an)
ap = list(ap)
bm = list(bm)
bq = list(bq)
bi = bq.pop(i) - 1
m = Poly(1, _x)
for b in bm:
m *= Poly(b - _x, _x)
for b in bq:
m *= Poly(_x - b, _x)
C = Dummy('C')
D = Poly(bi + C, C)
n = Poly(z, C)
for a in an:
n *= (D + 1 - a)
for a in ap:
n *= (-D + a - 1)
b0 = n.nth(0)
if b0 == 0:
raise ValueError('Cannot decrement lower b index (cancels)')
n = Poly(Poly(n.all_coeffs()[:-1], C).as_expr().subs(C, _x - bi), _x)
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Decrement lower b index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
| MeijerUnShiftC |
python | django__django | django/contrib/gis/geos/prototypes/io.py | {
"start": 4088,
"end": 4610
} | class ____(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
# Loading the real destructor function at this point as doing it in
# __del__ is too late (import error).
self.destructor.func
# ### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
| IOBase |
python | spyder-ide__spyder | spyder/utils/workers.py | {
"start": 6732,
"end": 11730
} | class ____(QObject):
"""Manager for generic workers."""
def __init__(self, parent=None, max_threads=10):
super().__init__(parent=parent)
self.parent = parent
self._queue = deque()
self._queue_workers = deque()
self._threads = []
self._workers = []
self._timer = QTimer(self)
self._timer_worker_delete = QTimer(self)
self._running_threads = 0
self._max_threads = max_threads
# Keeps references to old workers
# Needed to avoid C++/python object errors
self._bag_collector = deque()
self._timer.setInterval(333)
self._timer.timeout.connect(self._start)
self._timer_worker_delete.setInterval(5000)
self._timer_worker_delete.timeout.connect(self._clean_workers)
def _clean_workers(self):
"""Delete periodically workers in workers bag."""
while self._bag_collector:
self._bag_collector.popleft()
self._timer_worker_delete.stop()
def _start(self, worker=None):
"""Start threads and check for inactive workers."""
if worker:
self._queue_workers.append(worker)
if self._queue_workers and self._running_threads < self._max_threads:
if self.parent is not None:
logger.debug(
f"Workers managed in {self.parent} -- "
f"In queue: {len(self._queue_workers)} -- "
f"Running threads: {self._running_threads} -- "
f"Workers: {len(self._workers)} -- "
f"Threads: {len(self._threads)}"
)
worker = self._queue_workers.popleft()
if isinstance(worker, PythonWorker):
self._running_threads += 1
thread = QThread(None)
self._threads.append(thread)
worker.moveToThread(thread)
worker.sig_finished.connect(thread.quit)
thread.started.connect(worker._start)
thread.start()
elif isinstance(worker, ProcessWorker):
worker._start()
else:
self._timer.start()
if self._workers:
for w in self._workers:
if w.is_finished():
self._bag_collector.append(w)
self._workers.remove(w)
if self._threads:
for t in self._threads:
if t.isFinished():
self._threads.remove(t)
self._running_threads -= 1
if len(self._threads) == 0 and len(self._workers) == 0:
self._timer.stop()
self._timer_worker_delete.start()
def create_python_worker(self, func, *args, **kwargs):
"""Create a new python worker instance."""
worker = PythonWorker(func, args, kwargs)
self._create_worker(worker)
return worker
def create_process_worker(self, cmd_list, environ=None):
"""Create a new process worker instance."""
worker = ProcessWorker(self, cmd_list, environ=environ)
self._create_worker(worker)
return worker
def terminate_all(self):
"""Terminate all worker processes."""
for worker in self._workers:
worker.terminate()
for thread in self._threads:
try:
thread.quit()
thread.wait()
except Exception:
pass
self._queue_workers = deque()
def _create_worker(self, worker):
"""Common worker setup."""
worker.sig_started.connect(self._start)
self._workers.append(worker)
# --- Local testing
# -----------------------------------------------------------------------------
def ready_print(worker, output, error): # pragma: no cover
"""Print worker output for tests."""
print(worker, output, error) # spyder: test-skip
def sleeping_func(arg, secs=10, result_queue=None):
"""This methods illustrates how the workers can be used."""
import time
time.sleep(secs)
if result_queue is not None:
result_queue.put(arg)
else:
return arg
def local_test(): # pragma: no cover
"""Main local test."""
from spyder.utils.qthelpers import qapplication
app = qapplication()
wm = WorkerManager(max_threads=3)
for i in range(7):
worker = wm.create_python_worker(sleeping_func, 'BOOM! {}'.format(i),
secs=5)
worker.sig_finished.connect(ready_print)
worker.start()
worker = wm.create_python_worker(sleeping_func, 'BOOM!', secs=5)
worker.sig_finished.connect(ready_print)
worker.start()
worker = wm.create_process_worker(['conda', 'info', '--json'])
worker.sig_finished.connect(ready_print)
worker.start()
# wm.terminate_all()
# wm.terminate_all()
sys.exit(app.exec_())
if __name__ == '__main__': # pragma: no cover
local_test()
| WorkerManager |
python | scikit-image__scikit-image | tests/skimage/feature/test_canny.py | {
"start": 230,
"end": 6388
} | class ____(unittest.TestCase):
def test_00_00_zeros(self):
'''Test that the Canny filter finds no points for a blank field'''
result = feature.canny(np.zeros((20, 20)), 4, 0, 0, np.ones((20, 20), bool))
self.assertFalse(np.any(result))
def test_00_01_zeros_mask(self):
'''Test that the Canny filter finds no points in a masked image'''
result = feature.canny(
np.random.uniform(size=(20, 20)), 4, 0, 0, np.zeros((20, 20), bool)
)
self.assertFalse(np.any(result))
def test_01_01_circle(self):
'''Test that the Canny filter finds the outlines of a circle'''
i, j = np.mgrid[-200:200, -200:200].astype(float) / 200
c = np.abs(np.sqrt(i * i + j * j) - 0.5) < 0.02
result = feature.canny(c.astype(float), 4, 0, 0, np.ones(c.shape, bool))
#
# erode and dilate the circle to get rings that should contain the
# outlines
#
cd = binary_dilation(c, iterations=3)
ce = binary_erosion(c, iterations=3)
cde = np.logical_and(cd, np.logical_not(ce))
self.assertTrue(np.all(cde[result]))
#
# The circle has a radius of 100. There are two rings here, one
# for the inside edge and one for the outside. So that's
# 100 * 2 * 2 * 3 for those places where pi is still 3.
# The edge contains both pixels if there's a tie, so we
# bump the count a little.
point_count = np.sum(result)
self.assertTrue(point_count > 1200)
self.assertTrue(point_count < 1600)
def test_01_02_circle_with_noise(self):
'''Test that the Canny filter finds the circle outlines
in a noisy image'''
np.random.seed(0)
i, j = np.mgrid[-200:200, -200:200].astype(float) / 200
c = np.abs(np.sqrt(i * i + j * j) - 0.5) < 0.02
cf = c.astype(float) * 0.5 + np.random.uniform(size=c.shape) * 0.5
result = feature.canny(cf, 4, 0.1, 0.2, np.ones(c.shape, bool))
#
# erode and dilate the circle to get rings that should contain the
# outlines
#
cd = binary_dilation(c, iterations=4)
ce = binary_erosion(c, iterations=4)
cde = np.logical_and(cd, np.logical_not(ce))
self.assertTrue(np.all(cde[result]))
point_count = np.sum(result)
self.assertTrue(point_count > 1200)
self.assertTrue(point_count < 1600)
def test_image_shape(self):
self.assertRaises(ValueError, feature.canny, np.zeros((20, 20, 20)), 4, 0, 0)
def test_mask_none(self):
result1 = feature.canny(np.zeros((20, 20)), 4, 0, 0, np.ones((20, 20), bool))
result2 = feature.canny(np.zeros((20, 20)), 4, 0, 0)
self.assertTrue(np.all(result1 == result2))
def test_use_quantiles(self):
image = img_as_float(data.camera()[::100, ::100])
# Correct output produced manually with quantiles
# of 0.8 and 0.6 for high and low respectively
correct_output = np.array(
[
[False, False, False, False, False, False],
[False, True, True, True, False, False],
[False, False, False, True, False, False],
[False, False, False, True, False, False],
[False, False, True, True, False, False],
[False, False, False, False, False, False],
]
)
result = feature.canny(
image, low_threshold=0.6, high_threshold=0.8, use_quantiles=True
)
assert_equal(result, correct_output)
def test_img_all_ones(self):
image = np.ones((10, 10))
assert np.all(feature.canny(image) == 0)
def test_invalid_use_quantiles(self):
image = img_as_float(data.camera()[::50, ::50])
self.assertRaises(
ValueError,
feature.canny,
image,
use_quantiles=True,
low_threshold=0.5,
high_threshold=3.6,
)
self.assertRaises(
ValueError,
feature.canny,
image,
use_quantiles=True,
low_threshold=-5,
high_threshold=0.5,
)
self.assertRaises(
ValueError,
feature.canny,
image,
use_quantiles=True,
low_threshold=99,
high_threshold=0.9,
)
self.assertRaises(
ValueError,
feature.canny,
image,
use_quantiles=True,
low_threshold=0.5,
high_threshold=-100,
)
# Example from issue #4282
image = data.camera()
self.assertRaises(
ValueError,
feature.canny,
image,
use_quantiles=True,
low_threshold=50,
high_threshold=150,
)
def test_dtype(self):
"""Check that the same output is produced regardless of image dtype."""
image_uint8 = data.camera()
image_float = img_as_float(image_uint8)
result_uint8 = feature.canny(image_uint8)
result_float = feature.canny(image_float)
assert_equal(result_uint8, result_float)
low = 0.1
high = 0.2
assert_equal(
feature.canny(image_float, 1.0, low, high),
feature.canny(image_uint8, 1.0, 255 * low, 255 * high),
)
def test_full_mask_matches_no_mask(self):
"""The masked and unmasked algorithms should return the same result."""
image = data.camera()
for mode in ('constant', 'nearest', 'reflect'):
assert_equal(
feature.canny(image, mode=mode),
feature.canny(image, mode=mode, mask=np.ones_like(image, dtype=bool)),
)
def test_unsupported_int64(self):
for dtype in (np.int64, np.uint64):
image = np.zeros((10, 10), dtype=dtype)
image[3, 3] = np.iinfo(dtype).max
with pytest.raises(
ValueError, match="64-bit integer images are not supported"
):
feature.canny(image)
| TestCanny |
python | qdrant__qdrant-client | qdrant_client/embed/embed_inspector.py | {
"start": 362,
"end": 6751
} | class ____:
"""Inspector which collects paths to objects requiring inference in the received models
Attributes:
parser: ModelSchemaParser instance
"""
def __init__(self, parser: Optional[ModelSchemaParser] = None) -> None:
self.parser = ModelSchemaParser() if parser is None else parser
def inspect(self, points: Union[Iterable[BaseModel], BaseModel]) -> list[FieldPath]:
"""Looks for all the paths to objects requiring inference in the received models
Args:
points: models to inspect
Returns:
list of FieldPath objects
"""
paths = []
if isinstance(points, BaseModel):
self.parser.parse_model(points.__class__)
paths.extend(self._inspect_model(points))
elif isinstance(points, dict):
for value in points.values():
paths.extend(self.inspect(value))
elif isinstance(points, Iterable):
for point in points:
if isinstance(point, BaseModel):
self.parser.parse_model(point.__class__)
paths.extend(self._inspect_model(point))
paths = sorted(set(paths))
return convert_paths(paths)
def _inspect_model(
self, mod: BaseModel, paths: Optional[list[FieldPath]] = None, accum: Optional[str] = None
) -> list[str]:
"""Looks for all the paths to objects requiring inference in the received model
Args:
mod: model to inspect
paths: list of paths to the fields possibly containing objects for inference
accum: accumulator for the path. Path is a dot separated string of field names which we assemble recursively
Returns:
list of paths to the model fields containing objects for inference
"""
paths = self.parser.path_cache.get(mod.__class__.__name__, []) if paths is None else paths
found_paths = []
for path in paths:
found_paths.extend(
self._inspect_inner_models(
mod, path.current, path.tail if path.tail else [], accum
)
)
return found_paths
def _inspect_inner_models(
self,
original_model: BaseModel,
current_path: str,
tail: list[FieldPath],
accum: Optional[str] = None,
) -> list[str]:
"""Looks for all the paths to objects requiring inference in the received model
Args:
original_model: model to inspect
current_path: the field to inspect on the current iteration
tail: list of FieldPath objects to the fields possibly containing objects for inference
accum: accumulator for the path. Path is a dot separated string of field names which we assemble recursively
Returns:
list of paths to the model fields containing objects for inference
"""
found_paths = []
if accum is None:
accum = current_path
else:
accum += f".{current_path}"
def inspect_recursive(member: BaseModel, accumulator: str) -> list[str]:
"""Iterates over the set model fields, expand recursive ones and find paths to objects requiring inference
Args:
member: currently inspected model, which may or may not contain recursive fields
accumulator: accumulator for the path, which is a dot separated string assembled recursively
"""
recursive_paths = []
for field in model_fields_set(member):
if field in self.parser.name_recursive_ref_mapping:
mapped_field = self.parser.name_recursive_ref_mapping[field]
recursive_paths.extend(self.parser.path_cache[mapped_field])
return self._inspect_model(member, copy(recursive_paths), accumulator)
model = getattr(original_model, current_path, None)
if model is None:
return []
if isinstance(model, get_args(INFERENCE_OBJECT_TYPES)):
return [accum]
if isinstance(model, BaseModel):
found_paths.extend(inspect_recursive(model, accum))
for next_path in tail:
found_paths.extend(
self._inspect_inner_models(
model, next_path.current, next_path.tail if next_path.tail else [], accum
)
)
return found_paths
elif isinstance(model, list):
for current_model in model:
if not isinstance(current_model, BaseModel):
continue
if isinstance(current_model, get_args(INFERENCE_OBJECT_TYPES)):
found_paths.append(accum)
found_paths.extend(inspect_recursive(current_model, accum))
for next_path in tail:
for current_model in model:
found_paths.extend(
self._inspect_inner_models(
current_model,
next_path.current,
next_path.tail if next_path.tail else [],
accum,
)
)
return found_paths
elif isinstance(model, dict):
found_paths = []
for key, values in model.items():
values = [values] if not isinstance(values, list) else values
for current_model in values:
if not isinstance(current_model, BaseModel):
continue
if isinstance(current_model, get_args(INFERENCE_OBJECT_TYPES)):
found_paths.append(accum)
found_paths.extend(inspect_recursive(current_model, accum))
for next_path in tail:
for current_model in values:
found_paths.extend(
self._inspect_inner_models(
current_model,
next_path.current,
next_path.tail if next_path.tail else [],
accum,
)
)
return found_paths
| InspectorEmbed |
python | kamyu104__LeetCode-Solutions | Python/string-transformation.py | {
"start": 1311,
"end": 3071
} | class ____(object):
def numberOfWays(self, s, t, k):
"""
:type s: str
:type t: str
:type k: int
:rtype: int
"""
MOD = 10**9+7
def matrix_mult(A, B):
ZB = zip(*B)
return [[sum(a*b % MOD for a, b in itertools.izip(row, col)) % MOD for col in ZB] for row in A]
def matrix_expo(A, K):
result = [[int(i == j) for j in xrange(len(A))] for i in xrange(len(A))]
while K:
if K % 2:
result = matrix_mult(result, A)
A = matrix_mult(A, A)
K /= 2
return result
def getPrefix(pattern):
prefix = [-1]*len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j+1 > 0 and pattern[j+1] != pattern[i]:
j = prefix[j]
if pattern[j+1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
def KMP(text, pattern):
prefix = getPrefix(pattern)
j = -1
for i in xrange(len(text)):
while j+1 > 0 and pattern[j+1] != text[i]:
j = prefix[j]
if pattern[j+1] == text[i]:
j += 1
if j+1 == len(pattern):
yield i-j
j = prefix[j]
n = len(s)
T = [[0, 1],
[n-1, (n-1)-1]]
dp = [1, 0]
dp = matrix_mult([dp], matrix_expo(T, k))[0] # [dp[0], dp[1]] * T^k
return reduce(lambda a, b: (a+b)%MOD, (dp[int(i != 0)] for i in KMP(s+s[:-1], t)), 0)
# Time: O(n + logk)
# Space: O(n)
# dp, matrix exponentiation, z-function
| Solution2 |
python | mozilla__bleach | bleach/_vendor/parse.py | {
"start": 5051,
"end": 5322
} | class ____(object):
"""Standard approach to decoding parsed results from bytes to str"""
__slots__ = ()
def decode(self, encoding='ascii', errors='strict'):
return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self))
| _ResultMixinBytes |
python | huggingface__transformers | src/transformers/models/dab_detr/modeling_dab_detr.py | {
"start": 1976,
"end": 3796
} | class ____(BaseModelOutputWithCrossAttentions):
r"""
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax,
used to compute the weighted average in the cross-attention heads.
intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`):
Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a
layernorm.
reference_points (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, 2 (anchor points))`):
Reference points (reference points of each layer of the decoder).
"""
intermediate_hidden_states: Optional[torch.FloatTensor] = None
reference_points: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the Conditional DETR encoder-decoder model. This class adds one attribute to
Seq2SeqModelOutput, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder
layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding
losses.
"""
)
# Copied from transformers.models.conditional_detr.modeling_conditional_detr.ConditionalDetrModelOutput with ConditionalDetr->DabDetr,Conditional DETR->DAB-DETR,2 (anchor points)->4 (anchor points)
| DabDetrDecoderOutput |
python | numba__numba | numba/core/typing/arraydecl.py | {
"start": 20779,
"end": 21270
} | class ____(AttributeTemplate):
key = types.DType
def resolve_type(self, ary):
# Wrap the numeric type in NumberClass
return types.NumberClass(ary.dtype)
def resolve_kind(self, ary):
if isinstance(ary.key, types.scalars.Float):
val = 'f'
elif isinstance(ary.key, types.scalars.Integer):
val = 'i'
else:
return None # other types not supported yet
return types.StringLiteral(val)
@infer
| DTypeAttr |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 84579,
"end": 85728
} | class ____:
xlPatternAutomatic = -4105 # from enum XlPattern
xlPatternChecker = 9 # from enum XlPattern
xlPatternCrissCross = 16 # from enum XlPattern
xlPatternDown = -4121 # from enum XlPattern
xlPatternGray16 = 17 # from enum XlPattern
xlPatternGray25 = -4124 # from enum XlPattern
xlPatternGray50 = -4125 # from enum XlPattern
xlPatternGray75 = -4126 # from enum XlPattern
xlPatternGray8 = 18 # from enum XlPattern
xlPatternGrid = 15 # from enum XlPattern
xlPatternHorizontal = -4128 # from enum XlPattern
xlPatternLightDown = 13 # from enum XlPattern
xlPatternLightHorizontal = 11 # from enum XlPattern
xlPatternLightUp = 14 # from enum XlPattern
xlPatternLightVertical = 12 # from enum XlPattern
xlPatternLinearGradient = 4000 # from enum XlPattern
xlPatternNone = -4142 # from enum XlPattern
xlPatternRectangularGradient = 4001 # from enum XlPattern
xlPatternSemiGray75 = 10 # from enum XlPattern
xlPatternSolid = 1 # from enum XlPattern
xlPatternUp = -4162 # from enum XlPattern
xlPatternVertical = -4166 # from enum XlPattern
| Pattern |
python | catalyst-team__catalyst | catalyst/metrics/_accuracy.py | {
"start": 4948,
"end": 10302
} | class ____(AdditiveMetric, ICallbackBatchMetric):
"""
This metric computes accuracy for multilabel classification case.
It computes mean value of accuracy and it's approximate std value
(note that it's not a real accuracy std but std of accuracy over batch mean values).
Args:
compute_on_call: if True, computes and returns metric value during metric call
prefix: metric prefix
suffix: metric suffix
threshold: thresholds for model scores
Examples:
.. code-block:: python
import torch
from catalyst import metrics
outputs = torch.tensor([
[0.1, 0.9, 0.0, 0.8],
[0.96, 0.01, 0.85, 0.2],
[0.98, 0.4, 0.2, 0.1],
[0.1, 0.89, 0.2, 0.0],
])
targets = torch.tensor([
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
])
metric = metrics.MultilabelAccuracyMetric(threshold=0.6)
metric.reset()
metric.update(outputs, targets)
metric.compute()
# (0.75, 0.0) # mean, std
metric.compute_key_value()
# {
# 'accuracy': 0.75,
# 'accuracy/std': 0.0,
# }
metric.reset()
metric(outputs, targets)
# (0.75, 0.0) # mean, std
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst import dl
# sample data
num_samples, num_features, num_classes = int(1e4), int(1e1), 4
X = torch.rand(num_samples, num_features)
y = (torch.rand(num_samples, num_classes) > 0.5).to(torch.float32)
# pytorch loaders
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loaders = {"train": loader, "valid": loader}
# model, criterion, optimizer, scheduler
model = torch.nn.Linear(num_features, num_classes)
criterion = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters())
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, [2])
# model training
runner = dl.SupervisedRunner(
input_key="features",
output_key="logits",
target_key="targets",
loss_key="loss"
)
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
scheduler=scheduler,
loaders=loaders,
logdir="./logdir",
num_epochs=3,
valid_loader="valid",
valid_metric="accuracy",
minimize_valid_metric=False,
verbose=True,
callbacks=[
dl.AUCCallback(input_key="logits", target_key="targets"),
dl.MultilabelAccuracyCallback(
input_key="logits", target_key="targets", threshold=0.5
)
]
)
.. note::
Please follow the `minimal examples`_ sections for more use cases.
.. _`minimal examples`: https://github.com/catalyst-team/catalyst#minimal-examples # noqa: E501, W505
"""
def __init__(
self,
threshold: Union[float, torch.Tensor] = 0.5,
compute_on_call: bool = True,
prefix: Optional[str] = None,
suffix: Optional[str] = None,
):
"""Init MultilabelAccuracyMetric"""
super().__init__(compute_on_call=compute_on_call)
self.prefix = prefix or ""
self.suffix = suffix or ""
self.metric_name_mean = f"{self.prefix}accuracy{self.suffix}"
self.metric_name_std = f"{self.prefix}accuracy{self.suffix}/std"
self.threshold = threshold
def update(self, outputs: torch.Tensor, targets: torch.Tensor) -> float:
"""
Update metric value with accuracy for new data
and return intermediate metric value.
Args:
outputs: tensor of outputs
targets: tensor of true answers
Returns:
accuracy metric for outputs and targets
"""
metric = multilabel_accuracy(
outputs=outputs, targets=targets, threshold=self.threshold
).item()
super().update(value=metric, num_samples=np.prod(targets.shape))
return metric
def update_key_value(
self, outputs: torch.Tensor, targets: torch.Tensor
) -> Dict[str, float]:
"""
Update metric value with accuracy for new data and return intermediate metric
value in key-value format.
Args:
outputs: tensor of outputs
targets: tensor of true answers
Returns:
accuracy metric for outputs and targets
"""
metric = self.update(outputs=outputs, targets=targets)
return {self.metric_name_mean: metric}
def compute_key_value(self) -> Dict[str, float]:
"""
Compute accuracy for all data and return results in key-value format
Returns:
dict of metrics
"""
metric_mean, metric_std = self.compute()
return {
self.metric_name_mean: metric_mean,
self.metric_name_std: metric_std,
}
__all__ = ["AccuracyMetric", "MultilabelAccuracyMetric"]
| MultilabelAccuracyMetric |
python | sympy__sympy | sympy/utilities/codegen.py | {
"start": 54844,
"end": 64114
} | class ____(CodeGen):
"""Generator for Octave code.
The .write() method inherited from CodeGen will output a code file
<prefix>.m.
Octave .m files usually contain one function. That function name should
match the filename (``prefix``). If you pass multiple ``name_expr`` pairs,
the latter ones are presumed to be private functions accessed by the
primary function.
You should only pass inputs to ``argument_sequence``: outputs are ordered
according to their order in ``name_expr``.
"""
code_extension = "m"
def __init__(self, project='project', printer=None):
super().__init__(project)
self.printer = printer or OctaveCodePrinter()
def routine(self, name, expr, argument_sequence, global_vars):
"""Specialized Routine creation for Octave."""
# FIXME: this is probably general enough for other high-level
# languages, perhaps its the C/Fortran one that is specialized!
if is_sequence(expr) and not isinstance(expr, (MatrixBase, MatrixExpr)):
if not expr:
raise ValueError("No expression given")
expressions = Tuple(*expr)
else:
expressions = Tuple(expr)
# local variables
local_vars = {i.label for i in expressions.atoms(Idx)}
# global variables
global_vars = set() if global_vars is None else set(global_vars)
# symbols that should be arguments
old_symbols = expressions.free_symbols - local_vars - global_vars
symbols = set()
for s in old_symbols:
if isinstance(s, Idx):
symbols.update(s.args[1].free_symbols)
elif not isinstance(s, Indexed):
symbols.add(s)
# Octave supports multiple return values
return_vals = []
for (i, expr) in enumerate(expressions):
if isinstance(expr, Equality):
out_arg = expr.lhs
expr = expr.rhs
symbol = out_arg
if isinstance(out_arg, Indexed):
symbol = out_arg.base.label
if not isinstance(out_arg, (Indexed, Symbol, MatrixSymbol)):
raise CodeGenError("Only Indexed, Symbol, or MatrixSymbol "
"can define output arguments.")
return_vals.append(Result(expr, name=symbol, result_var=out_arg))
if not expr.has(symbol):
# this is a pure output: remove from the symbols list, so
# it doesn't become an input.
symbols.remove(symbol)
else:
# we have no name for this output
return_vals.append(Result(expr, name='out%d' % (i+1)))
# setup input argument list
arg_list = []
array_symbols = {}
for array in expressions.atoms(Indexed):
array_symbols[array.base.label] = array
for array in expressions.atoms(MatrixSymbol):
array_symbols[array] = array
for symbol in sorted(symbols, key=str):
arg_list.append(InputArgument(symbol))
if argument_sequence is not None:
# if the user has supplied IndexedBase instances, we'll accept that
new_sequence = []
for arg in argument_sequence:
if isinstance(arg, IndexedBase):
new_sequence.append(arg.label)
else:
new_sequence.append(arg)
argument_sequence = new_sequence
missing = [x for x in arg_list if x.name not in argument_sequence]
if missing:
msg = "Argument list didn't specify: {0} "
msg = msg.format(", ".join([str(m.name) for m in missing]))
raise CodeGenArgumentListError(msg, missing)
# create redundant arguments to produce the requested sequence
name_arg_dict = {x.name: x for x in arg_list}
new_args = []
for symbol in argument_sequence:
try:
new_args.append(name_arg_dict[symbol])
except KeyError:
new_args.append(InputArgument(symbol))
arg_list = new_args
return Routine(name, arg_list, return_vals, local_vars, global_vars)
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
if line == '':
code_lines.append("%\n")
else:
code_lines.append("%% %s\n" % line)
return code_lines
def _preprocessor_statements(self, prefix):
return []
def _get_routine_opening(self, routine):
"""Returns the opening statements of the routine."""
code_list = []
code_list.append("function ")
# Outputs
outs = []
for result in routine.results:
if isinstance(result, Result):
# Note: name not result_var; want `y` not `y(i)` for Indexed
s = self._get_symbol(result.name)
else:
raise CodeGenError("unexpected object in Routine results")
outs.append(s)
if len(outs) > 1:
code_list.append("[" + (", ".join(outs)) + "]")
else:
code_list.append("".join(outs))
code_list.append(" = ")
# Inputs
args = []
for arg in routine.arguments:
if isinstance(arg, (OutputArgument, InOutArgument)):
raise CodeGenError("Octave: invalid argument of type %s" %
str(type(arg)))
if isinstance(arg, InputArgument):
args.append("%s" % self._get_symbol(arg.name))
args = ", ".join(args)
code_list.append("%s(%s)\n" % (routine.name, args))
code_list = [ "".join(code_list) ]
return code_list
def _declare_arguments(self, routine):
return []
def _declare_globals(self, routine):
if not routine.global_vars:
return []
s = " ".join(sorted([self._get_symbol(g) for g in routine.global_vars]))
return ["global " + s + "\n"]
def _declare_locals(self, routine):
return []
def _get_routine_ending(self, routine):
return ["end\n"]
def _call_printer(self, routine):
declarations = []
code_lines = []
for result in routine.results:
if isinstance(result, Result):
assign_to = result.result_var
else:
raise CodeGenError("unexpected object in Routine results")
constants, not_supported, oct_expr = self._printer_method_with_settings(
'doprint', {"human": False, "strict": False}, result.expr, assign_to=assign_to)
for obj, v in sorted(constants, key=str):
declarations.append(
" %s = %s; %% constant\n" % (obj, v))
for obj in sorted(not_supported, key=str):
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append(
" %% unsupported: %s\n" % (name))
code_lines.append("%s\n" % (oct_expr))
return declarations + code_lines
def _indent_code(self, codelines):
return self._printer_method_with_settings(
'indent_code', {"human": False, "strict": False}, codelines)
def dump_m(self, routines, f, prefix, header=True, empty=True, inline=True):
# Note used to call self.dump_code() but we need more control for header
code_lines = self._preprocessor_statements(prefix)
for i, routine in enumerate(routines):
if i > 0:
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_opening(routine))
if i == 0:
if routine.name != prefix:
raise ValueError('Octave function name should match prefix')
if header:
code_lines.append("%" + prefix.upper() +
" Autogenerated by SymPy\n")
code_lines.append(''.join(self._get_header()))
code_lines.extend(self._declare_arguments(routine))
code_lines.extend(self._declare_globals(routine))
code_lines.extend(self._declare_locals(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._call_printer(routine))
if empty:
code_lines.append("\n")
code_lines.extend(self._get_routine_ending(routine))
code_lines = self._indent_code(''.join(code_lines))
if code_lines:
f.write(code_lines)
dump_m.extension = code_extension # type: ignore
dump_m.__doc__ = CodeGen.dump_code.__doc__
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_m]
| OctaveCodeGen |
python | pytest-dev__pytest | src/_pytest/capture.py | {
"start": 9017,
"end": 9807
} | class ____(abc.ABC, Generic[AnyStr]):
EMPTY_BUFFER: AnyStr
@abc.abstractmethod
def __init__(self, fd: int) -> None:
raise NotImplementedError()
@abc.abstractmethod
def start(self) -> None:
raise NotImplementedError()
@abc.abstractmethod
def done(self) -> None:
raise NotImplementedError()
@abc.abstractmethod
def suspend(self) -> None:
raise NotImplementedError()
@abc.abstractmethod
def resume(self) -> None:
raise NotImplementedError()
@abc.abstractmethod
def writeorg(self, data: AnyStr) -> None:
raise NotImplementedError()
@abc.abstractmethod
def snap(self) -> AnyStr:
raise NotImplementedError()
patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"}
| CaptureBase |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_macro02.py | {
"start": 315,
"end": 953
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("macro02.xlsm")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
workbook.add_vba_project(self.vba_dir + "vbaProject03.bin")
workbook.set_vba_name("MyWorkbook")
worksheet.set_vba_name("MySheet1")
worksheet.write("A1", 123)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pypa__pip | src/pip/_vendor/rich/containers.py | {
"start": 461,
"end": 1678
} | class ____:
"""A list subclass which renders its contents to the console."""
def __init__(
self, renderables: Optional[Iterable["RenderableType"]] = None
) -> None:
self._renderables: List["RenderableType"] = (
list(renderables) if renderables is not None else []
)
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
"""Console render method to insert line-breaks."""
yield from self._renderables
def __rich_measure__(
self, console: "Console", options: "ConsoleOptions"
) -> "Measurement":
dimensions = [
Measurement.get(console, options, renderable)
for renderable in self._renderables
]
if not dimensions:
return Measurement(1, 1)
_min = max(dimension.minimum for dimension in dimensions)
_max = max(dimension.maximum for dimension in dimensions)
return Measurement(_min, _max)
def append(self, renderable: "RenderableType") -> None:
self._renderables.append(renderable)
def __iter__(self) -> Iterable["RenderableType"]:
return iter(self._renderables)
| Renderables |
python | scipy__scipy | scipy/linalg/tests/test_basic.py | {
"start": 59966,
"end": 74529
} | class ____:
lapack_drivers = ('gelsd', 'gelss', 'gelsy', None)
def test_simple_exact(self):
for dtype in REAL_DTYPES:
a = np.array([[1, 20], [-30, 4]], dtype=dtype)
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
for bt in (((1, 0), (0, 1)), (1, 0),
((2, 1), (-30, 4))):
# Store values in case they are overwritten
# later
a1 = a.copy()
b = np.array(bt, dtype=dtype)
b1 = b.copy()
out = lstsq(a1, b1,
lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == 2,
f'expected efficient rank 2, got {r}')
assert_allclose(dot(a, x), b,
atol=25 * _eps_cast(a1.dtype),
rtol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_simple_overdet(self):
for dtype in REAL_DTYPES:
a = np.array([[1, 2], [4, 5], [3, 4]], dtype=dtype)
b = np.array([1, 2, 3], dtype=dtype)
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
if lapack_driver == 'gelsy':
residuals = np.sum((b - a.dot(x))**2)
else:
residuals = out[1]
r = out[2]
assert_(r == 2, f'expected efficient rank 2, got {r}')
assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0),
residuals,
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
assert_allclose(x, (-0.428571428571429, 0.85714285714285),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_simple_overdet_complex(self):
for dtype in COMPLEX_DTYPES:
a = np.array([[1+2j, 2], [4, 5], [3, 4]], dtype=dtype)
b = np.array([1, 2+4j, 3], dtype=dtype)
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
if lapack_driver == 'gelsy':
res = b - a.dot(x)
residuals = np.sum(res * res.conj())
else:
residuals = out[1]
r = out[2]
assert_(r == 2, f'expected efficient rank 2, got {r}')
assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0),
residuals,
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
assert_allclose(
x, (-0.4831460674157303 + 0.258426966292135j,
0.921348314606741 + 0.292134831460674j),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_simple_underdet(self):
for dtype in REAL_DTYPES:
a = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
b = np.array([1, 2], dtype=dtype)
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == 2, f'expected efficient rank 2, got {r}')
assert_allclose(x, (-0.055555555555555, 0.111111111111111,
0.277777777777777),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
@pytest.mark.parametrize("dtype", REAL_DTYPES)
@pytest.mark.parametrize("n", (20, 200))
@pytest.mark.parametrize("lapack_driver", lapack_drivers)
@pytest.mark.parametrize("overwrite", (True, False))
def test_random_exact(self, dtype, n, lapack_driver, overwrite):
rng = np.random.RandomState(1234)
a = np.asarray(rng.random([n, n]), dtype=dtype)
for i in range(n):
a[i, i] = 20 * (0.1 + a[i, i])
for i in range(4):
b = np.asarray(rng.random([n, 3]), dtype=dtype)
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1,
lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == n, f'expected efficient rank {n}, '
f'got {r}')
if dtype is np.float32:
assert_allclose(
dot(a, x), b,
rtol=500 * _eps_cast(a1.dtype),
atol=500 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
else:
assert_allclose(
dot(a, x), b,
rtol=1000 * _eps_cast(a1.dtype),
atol=1000 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
@pytest.mark.skipif(IS_MUSL, reason="may segfault on Alpine, see gh-17630")
@pytest.mark.parametrize("dtype", COMPLEX_DTYPES)
@pytest.mark.parametrize("n", (20, 200))
@pytest.mark.parametrize("lapack_driver", lapack_drivers)
@pytest.mark.parametrize("overwrite", (True, False))
def test_random_complex_exact(self, dtype, n, lapack_driver, overwrite):
rng = np.random.RandomState(1234)
a = np.asarray(rng.random([n, n]) + 1j*rng.random([n, n]),
dtype=dtype)
for i in range(n):
a[i, i] = 20 * (0.1 + a[i, i])
for i in range(2):
b = np.asarray(rng.random([n, 3]), dtype=dtype)
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == n, f'expected efficient rank {n}, '
f'got {r}')
if dtype is np.complex64:
assert_allclose(
dot(a, x), b,
rtol=400 * _eps_cast(a1.dtype),
atol=400 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
else:
assert_allclose(
dot(a, x), b,
rtol=1000 * _eps_cast(a1.dtype),
atol=1000 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_random_overdet(self):
rng = np.random.RandomState(1234)
for dtype in REAL_DTYPES:
for (n, m) in ((20, 15), (200, 2)):
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
a = np.asarray(rng.random([n, m]), dtype=dtype)
for i in range(m):
a[i, i] = 20 * (0.1 + a[i, i])
for i in range(4):
b = np.asarray(rng.random([n, 3]), dtype=dtype)
# Store values in case they are overwritten later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1,
lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == m, f'expected efficient rank {m}, '
f'got {r}')
assert_allclose(
x, direct_lstsq(a, b, cmplx=0),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_random_complex_overdet(self):
rng = np.random.RandomState(1234)
for dtype in COMPLEX_DTYPES:
for (n, m) in ((20, 15), (200, 2)):
for lapack_driver in TestLstsq.lapack_drivers:
for overwrite in (True, False):
a = np.asarray(rng.random([n, m]) + 1j*rng.random([n, m]),
dtype=dtype)
for i in range(m):
a[i, i] = 20 * (0.1 + a[i, i])
for i in range(2):
b = np.asarray(rng.random([n, 3]), dtype=dtype)
# Store values in case they are overwritten
# later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1,
lapack_driver=lapack_driver,
overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == m, f'expected efficient rank {m}, '
f'got {r}')
assert_allclose(
x, direct_lstsq(a, b, cmplx=1),
rtol=25 * _eps_cast(a1.dtype),
atol=25 * _eps_cast(a1.dtype),
err_msg=f"driver: {lapack_driver}")
def test_check_finite(self):
with warnings.catch_warnings():
# On (some) OSX this tests triggers a warning (gh-7538)
warnings.filterwarnings("ignore",
"internal gelsd driver lwork query error,.*"
"Falling back to 'gelss' driver.", RuntimeWarning)
at = np.array(((1, 20), (-30, 4)))
for dtype, bt, lapack_driver, overwrite, check_finite in \
itertools.product(REAL_DTYPES,
(((1, 0), (0, 1)), (1, 0), ((2, 1), (-30, 4))),
TestLstsq.lapack_drivers,
(True, False),
(True, False)):
a = at.astype(dtype)
b = np.array(bt, dtype=dtype)
# Store values in case they are overwritten
# later
a1 = a.copy()
b1 = b.copy()
out = lstsq(a1, b1, lapack_driver=lapack_driver,
check_finite=check_finite, overwrite_a=overwrite,
overwrite_b=overwrite)
x = out[0]
r = out[2]
assert_(r == 2, f'expected efficient rank 2, got {r}')
assert_allclose(dot(a, x), b,
rtol=25 * _eps_cast(a.dtype),
atol=25 * _eps_cast(a.dtype),
err_msg=f"driver: {lapack_driver}")
def test_empty(self):
for a_shape, b_shape in (((0, 2), (0,)),
((0, 4), (0, 2)),
((4, 0), (4,)),
((4, 0), (4, 2))):
b = np.ones(b_shape)
x, residues, rank, s = lstsq(np.zeros(a_shape), b)
assert_equal(x, np.zeros((a_shape[1],) + b_shape[1:]))
residues_should_be = (np.empty((0,)) if a_shape[1]
else np.linalg.norm(b, axis=0)**2)
assert_equal(residues, residues_should_be)
assert_(rank == 0, 'expected rank 0')
assert_equal(s, np.empty((0,)))
@pytest.mark.parametrize('dt_a', [int, float, np.float32, complex, np.complex64])
@pytest.mark.parametrize('dt_b', [int, float, np.float32, complex, np.complex64])
def test_empty_dtype(self, dt_a, dt_b):
a = np.empty((0, 0), dtype=dt_a)
b = np.empty(0, dtype=dt_b)
x, residues, rank, s = lstsq(a, b)
assert x.size == 0
dt_nonempty = lstsq(np.eye(2, dtype=dt_a), np.ones(2, dtype=dt_b))[0].dtype
assert x.dtype == dt_nonempty
| TestLstsq |
python | huggingface__transformers | tests/models/longt5/test_modeling_longt5.py | {
"start": 49581,
"end": 66717
} | class ____(unittest.TestCase):
@cached_property
def model(self):
return LongT5ForConditionalGeneration.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps").to(
torch_device
)
@cached_property
def tokenizer(self):
return AutoTokenizer.from_pretrained("Stancld/longt5-tglobal-large-16384-pubmed-3k_steps")
def expected_summary(self):
return [
"background : coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in"
" developing world . it provides an excellent resolution for visualization of the coronaryarteries for"
" catheter - based or operating interventions . although the association of this technique with major"
" complications such as mortality is highly uncommon , it is frequently associated with various cardiac"
" and noncardiac complications.materials and methods : in aortic stenosis , we aimed to report the"
" diagnostic performance of 128-slice computed tomography coronary angiogram in 50 patients undergoing for"
" major noncoron ary cardiac surgery referred"
]
@slow
def test_summarization(self):
model = self.model
tok = self.tokenizer
ARTICLE = """coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in developing world . \n it provides an excellent resolution for visualization of the coronary arteries for catheter - based or operating interventions . \n
although the association of this technique with major complications such as mortality is highly uncommon , it is frequently associated with various cardiac and noncardiac complications . computed tomography ( ct ) coronary angiography is
a promising technique for the evaluation of cad noninvasively . \n it assesses disease within the coronary artery and provides qualitative and quantitative information about nonobstructive atherosclerotic plaque burden within the vessel
wall . \n thus , ct angiography - based disease evaluation may provide clinically more significant information than conventional angiography . the introduction of multi - slice computed tomography ( msct ) technology such as 64-slice , 12
8-slice , 256-slice , and now 320-slice msct has produced a high diagnostic accuracy of ct coronary angiography . \n it has consistently showed to have a very high negative predictive value ( well above 90% ) in ruling out patients with s
ignificant cad defined as coronary luminal stenosis of > 50% . \n the american college of cardiology / american heart association recommends that coronary angiography should be performed before valve surgery in men aged > 40 years , women
aged > 35 years with coronary risk factors and in postmenopausal women . \n the prevalence of cad in patients undergoing valve replacement is 2040% in developed countries . in the previous studies , \n the incidence of angiographically p
roven cad in acquired valvular diseases has been shown to vary widely from 9% to 41% . in aortic stenosis , \n we aimed to report the diagnostic performance of 128-slice ct coronary angiography in 50 patients undergoing for major noncoron
ary cardiac surgery referred for diagnostic invasive coronary angiography to assess the extent and severity of coronary stenosis . \n during january 2013 to december 2014 , we enrolled fifty major noncoronary cardiac surgery patients sche
duled for invasive coronary angiography who fulfilled the following inclusion criteria of age 40 years , having low or intermediate probability of cad , left ventricular ejection fraction ( lvef ) > 35% , and patient giving informed conse
nt for undergoing msct and conventional coronary angiography . \n those having any contraindication for contrast injection , lvef < 35% , high pretest probability of cad , and hemodynamic instability were excluded from the study . \n pati
ents with heart rates of > 70 bpm received ( unless they had known overt heart failure or electrocardiogram ( ecg ) atrioventricular conduction abnormalities ) a single oral dose of 100 mg metoprolol 45 min before the scan . \n patients w
ith heart rates of > 80 bpm received an additional oral dose of metoprolol if not contraindicated . \n all patients were scanned with a 128-slice ct scanner ( siemens , somatom definition as ) equipped with a new feature in msct technolog
y , so - called z - axis flying - focus technology . \n the central 32 detector rows acquire 0.6-mm slices , and the flying - focus spot switches back and forth between 2 z positions between each reading . \n two slices per detector row a
re acquired , which results in a higher oversampling rate in the z - axis , thereby reducing artifacts related to the spiral acquisition and improving spatial resolution down to 0.4 mm . \n a bolus of 6580 ml contrast material ( omnipaque
) was injected through an arm vein at a flow rate of 5 ml / s . \n a bolus tracking technique was used to synchronize the arrival of contrast in the coronary arteries with the initiation of the scan . to monitor the arrival of contrast m
aterial , \n axial scans were obtained at the level of the ascending aorta with a delay of 10 s after the start of the contrast injection . \n the scan was automatically started when a threshold of 150 hounsfield units was reached in a re
gion of interest positioned in the ascending aorta . \n images were reconstructed with ecg gating to obtain optimal , motion - free image quality . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a s
ingle observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiograp
hy . \n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean
lumen diameter reduction was 50% using a validated quantitative coronary angiography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiograp
hy . \n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of th
e number , areas , and peak hounsfield units of the detected calcified lesions . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were
used to identify coronary lesions and ( curved ) multiplanar reconstructions to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the di
agnostic performance of ct coronary angiography for the detection of significant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and
positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease p
er vessel ) , and patient by patient ( no or any disease per patient ) . \n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a single observer unaware of the multi - slice ct results identified coronary
lesion as a single vessel , double vessel , or triple vessel disease . \n all lesion , regardless of size , were included for comparison with ct coronary angiography . \n lesions were classified as having nonsignificant disease ( luminal
irregularities or < 50% stenosis ) or as having significant stenosis . \n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary an
giography ( qca ) . \n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiography . \n total calcium scores of all patients were calculated with dedicated
software and expressed as agatston scores . \n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of the number , areas , and peak hounsfield units of the detected calcified lesi
ons . \n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstruction
s to classify lesions as significant or nonsignificant . \n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \n the diagnostic performance of ct coronary angiography for the detection of signif
icant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of
confidence interval ( cis ) . \n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease per vessel ) , and patient by patient ( no or any disease per patient ) . \n
in this study , 29 ( 58% ) subjects were female , and 21 ( 42% ) were male showing an average age of 50.36 8.39 years . \n of fifty patients 24 ( 48% ) , 13 ( 26% ) , eight ( 16% ) , and five ( 10% ) underwent mitral valve replacement ,
double valve replacement ( dvr ) , aortic valve replacement , and other surgeries , respectively . \n high distribution of cad risk factors such as hypertension ( 24% ) , smoking ( 22% ) , and dyslipidemia ( 18% ) was observed in the stu
dy group . \n the mean creatinine level was 0.766 0.17 and average dye used in conventional angiography was 48.5 26.6 whereas for ct angiography it was 72.8 6.32 . \n average radiation dose in conventional coronary angiography and msct
coronary angiography was 5.2 msv and 9.2 msv , respectively . \n the majority of the patients had sinus rhythm ( 68% ) , whereas atrial fibrillation was found in 32% of the subjects . \n patients included in the study had low to intermed
iate probability of cad . in this study , three patients had complications after conventional angiography . \n complications were of local site hematoma , acute kidney injury managed conservatively , and acute heart failure . \n a patient
who developed hematoma was obese female patients with body mass index > 30 kg / m . \n the patient suffered from pseudoaneurysm , had hospitalized for 9 days , which leads to increased morbidity and cost of hospital stay . \n the diagnos
tic accuracy of ct coronary angiography was evaluated regarding true positive , true negative values and is presented in table 1 . the overall sensitivity and \n specificity of ct angiography technique was 100% ( 95% ci : 39.76%100% ) and
91.30% ( 95% ci : 79.21%97.58% ) , respectively [ table 2 ] . \n the positive predictive value ( 50% ; 95% ci : 15.70%84.30% ) and negative predictive value ( 100% ; 95% ci : 91.59%100% ) of ct angiography were also fairly high in these
patients . \n recent reports from multiple studies demonstrated that recent - generation msct scanners showed promise for noninvasive detection of coronary stenosis however , until now no studies were found regarding the clinical efficacy
or prognostic value of 128-slice ct coronary angiography versus conventional invasive coronary angiography in the diagnosis of patients planned for major noncoronary surgeries such as dvr , bentall , atrial septal defect closure , etc .
in our study , we reported 8% cad prevalence in patients planned for major noncoronary cardiac surgery . \n we performed conventional and msct coronary angiography in all patients and the results showed that ct coronary angiography with i
nvasive coronary angiography as the reference standard had a considerably high sensitivity ( 100% ) and specificity ( 95.65% ) . \n the health economic model using invasive coronary angiography as the reference standard showed that at a p
retest probability of cad of 70% or lower , ct coronary angiography resulted in lower cost per patient with a true positive diagnosis . at a pretest probability of cad of 70% or higher , invasive coronary angiography was associated with a
lower cost per patient with a true positive diagnosis . in our study population , \n two patients developed local site complications in the form of hematoma and pseudoaneurysm after conventional angiography . \n hence , msct coronary ang
iography will be more favorable in female obese patients with intermediate likelihood of cad . \n hence , msct coronary angiography will be cost - effective in patients of valvular heart diseases . \n however , ct angiography suffers from
a drawback that average amount of dye used in msct coronary angiography were 72.8 6.32 ml which is higher than average amount of dye required for conventional angiography ( 48.6 26.6 ml ) . \n hence , the use of ct coronary angiography
could not be used in patients with known renal dysfunction , where reduction of contrast dye load is highly advocated . \n our results show that 128-slice ct coronary angiography is a reliable technique to detect coronary stenosis in pat
ients planned for noncoronary cardiac surgery . \n although there has been important technological progress in the development of ct coronary angiography , its clinical application remains limited . \n a study wth large numbers of patient
s is required for the recommendation of only ct coronary angiography for the coronary evaluation in major non - cardiac surgeries . \n mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , guja
rat , india ) . \n u.n . mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , gujarat , india ) . \n """
dct = tok(
[ARTICLE],
max_length=1024,
padding="max_length",
truncation=True,
return_tensors="pt",
).to(torch_device)
hypotheses_batch = model.generate(
**dct,
num_beams=4,
length_penalty=2.0,
max_length=142,
min_length=56,
no_repeat_ngram_size=3,
do_sample=False,
early_stopping=True,
)
decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False)
self.assertListEqual(
self.expected_summary(),
decoded,
)
@slow
def test_inference_hidden_states(self):
model = self.model
input_ids = torch.tensor(
[[100, 19, 3, 9, 7142, 1200, 145, 8, 1252, 14145, 2034, 812, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=torch.long,
device=torch_device,
)
decoder_input_ids = torch.tensor(
[[100, 19, 3, 9, 7142, 1200, 145, 8, 1252, 14145, 2034, 812, 5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=torch.long,
device=torch_device,
)
attention_mask = torch.tensor(
[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
dtype=torch.long,
device=torch_device,
)
output = model(
input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, output_hidden_states=True
)
# check if encoder_outputs match
expected_output_slice = torch.tensor([0.0629, -0.1294, -0.0089, 0.0772, 0.0663], device=torch_device)
torch.testing.assert_close(
output.encoder_hidden_states[-1][0, 0, :5], expected_output_slice, rtol=1e-4, atol=1e-4
)
# check if logits match
expected_output_slice = torch.tensor([5.5231, 6.1058, 3.1766, 8.2391, -5.9453], device=torch_device)
torch.testing.assert_close(output.logits[0, 0, :5], expected_output_slice, rtol=1e-4, atol=1e-4)
| LongT5ModelIntegrationTests |
python | tensorflow__tensorflow | tensorflow/python/distribute/test_util.py | {
"start": 1622,
"end": 10622
} | class ____:
cluster: dict
max_num_worker: int
max_num_ps: int
def get_cluster_def(cluster_params, num_workers, num_ps):
if (num_workers > cluster_params.max_num_worker or
num_ps > cluster_params.max_num_ps):
raise ValueError("Requesting more servers than the maximum, adjust"
"cluster params' max_num_ps and max_num_worker")
if cluster_params.cluster is None:
cluster_params.cluster = multi_worker_test_base.create_in_process_cluster(
num_workers=cluster_params.max_num_worker,
num_ps=cluster_params.max_num_ps)
return {
"worker": cluster_params.cluster["worker"][:num_workers],
"ps": cluster_params.cluster["ps"][:num_ps],
}
def gather(strategy, value):
"""Gathers value from all workers.
This is intended for tests before we implement an official all-gather API.
Args:
strategy: a `tf.distribute.Strategy`.
value: a nested structure of n-dim `tf.distribute.DistributedValue` of
`tf.Tensor`, or of a `tf.Tensor` if the strategy only has one replica.
Cannot contain tf.sparse.SparseTensor.
Returns:
a (n+1)-dim `tf.Tensor`.
"""
return nest.map_structure(functools.partial(_gather, strategy), value)
def _gather(strategy, value):
"""Gathers a single value."""
# pylint: disable=protected-access
if not isinstance(value, values.DistributedValues):
value = values.PerReplica([ops.convert_to_tensor(value)])
if not isinstance(strategy.extended,
collective_all_reduce_strategy.CollectiveAllReduceExtended):
return array_ops_stack.stack(value._values)
assert len(strategy.extended.worker_devices) == len(value._values)
inputs = [array_ops.expand_dims_v2(v, axis=0) for v in value._values]
return strategy.gather(values.PerReplica(inputs), axis=0)
# pylint: enable=protected-access
def set_logical_devices_to_at_least(device, num):
"""Create logical devices of at least a given number."""
if num < 1:
raise ValueError("`num` must be at least 1 not %r" % (num,))
physical_devices = config.list_physical_devices(device)
if not physical_devices:
raise RuntimeError("No {} found".format(device))
if len(physical_devices) >= num:
return
# By default each physical device corresponds to one logical device. We create
# multiple logical devices for the last physical device so that we have `num`
# logical devices.
num = num - len(physical_devices) + 1
logical_devices = []
for _ in range(num):
if device.upper() == "GPU":
logical_devices.append(
context.LogicalDeviceConfiguration(memory_limit=2048))
else:
logical_devices.append(context.LogicalDeviceConfiguration())
# Create logical devices from the last device since sometimes the first GPU
# is the primary graphic card and may have less memory available.
config.set_logical_device_configuration(physical_devices[-1], logical_devices)
def _set_logical_devices():
if config.list_physical_devices("GPU"):
set_logical_devices_to_at_least("GPU", 2)
if config.list_physical_devices("CPU"):
set_logical_devices_to_at_least("CPU", 2)
def main(enable_v2_behavior=True, config_logical_devices=True):
"""All-in-one main function for tf.distribute tests."""
if config_logical_devices:
app.call_after_init(_set_logical_devices)
if enable_v2_behavior:
v2_compat.enable_v2_behavior()
else:
v2_compat.disable_v2_behavior()
multi_process_runner.test_main()
def _op_dependencies(op):
"""Returns the data and control dependencies of a tf.Operation combined."""
deps = []
for node in itertools.chain(op.inputs, op.control_inputs):
if isinstance(node, tensor.Tensor):
node = node.op
assert isinstance(node, ops.Operation)
deps.append(node)
return deps
def topological_sort_operations(operations):
"""Topological sorts a list of operations.
This does a topological sort of the operations in a graph. The edges include
both data dependencies and control dependencies. Note that the edge goes from
an operation to its dependencies.
The sort is intentionally unstable, reversing orders of operations and
dependencies on ties.
Args:
operations: a list of tf.Operation in the same graph.
Returns:
A map from a tf.Operation to its topological order.
"""
in_degrees = collections.OrderedDict()
for op in reversed(operations):
if op not in in_degrees:
in_degrees[op] = 0
for next_op in reversed(_op_dependencies(op)):
in_degrees[next_op] = in_degrees.get(next_op, 0) + 1
nexts = []
for op, in_degree in in_degrees.items():
if in_degree == 0:
nexts.append(op)
order = {}
next_order = 0
while nexts:
op, nexts = nexts[0], nexts[1:]
order[op] = next_order
next_order += 1
for next_op in reversed(_op_dependencies(op)):
in_degrees[next_op] -= 1
if in_degrees[next_op] == 0:
nexts.append(next_op)
assert len(order) == len(operations)
return order
def _exists_dependency(start, end):
"""Returns whether there exists a dependency chain from start to end."""
nexts = [start]
while nexts:
op, nexts = nexts[0], nexts[1:]
for next_op in _op_dependencies(op):
if next_op == end:
return True
nexts.append(next_op)
return False
def assert_sequential_execution(order, operations):
"""Asserts there's a deterministic execution order between the operations.
Args:
order: a map from a tf.Operation to its topological order.
operations: a list of operations that should be executed sequentially. It
can be given in any order.
"""
# Topological ordering guarantees that, if there's a dependency from N_a to
# N_b, then order[N_a] < order[N_b]. If there do exist a path of dependencies
# among the operations, it always goes from a operation with a smaller
# topological order to one with a larger topological order. Therefore, we only
# need to sort the operations by their topological orders, and verify that
# there's a path of dependency between adjacent pairs.
operations = sorted(operations, key=lambda op: order[op])
for i in range(len(operations) - 1):
if not _exists_dependency(operations[i], operations[i + 1]):
print(operations[i].graph.as_graph_def())
raise AssertionError(
"No dependency between {} and {}. Graph is dumped to stdout.".format(
operations[i].name, operations[i + 1].name))
def get_running_threads():
"""Returns a set of all running thread names."""
running_threads = set()
for thread in threading.enumerate():
if thread.name is not None:
running_threads.add(thread.name)
return running_threads
def has_thread(prefix, running_threads):
"""Returns whether any 'running_threads' is prefixed with 'prefix'.
Args:
prefix: The prefix of the expected thread name.
running_threads: A collection of the running thread names.
"""
for thread in running_threads:
if thread.startswith(prefix):
return True
return False
def show_backref(target, max_depth=3):
"""Returns a dot graph of all the objects that are referencing the target.
A object referencing graph is useful to debug memory leak like circular
reference. objgraph provides a good visualization of the memory graph than
most python built-in utilities like gc.get_referrers(), which are not
human-readable sometimes.
The dot graph will be written to a string IO object, and can be rendered with
graphviz in operating system.
E.g. dot -Tpng {$dot_graph} -o output.png
Args:
target: The target object for the memory graph.
max_depth: The maximum depth of the graph. By default 3 layers of references
are used. Increases this a lot may result in the graph growing too big.
Returns:
A string that contains the object reference graph.
Raises:
NotImplementedError: if objgraph is not installed.
"""
if objgraph is None:
raise NotImplementedError("objgraph is not installed.")
string_io = io.StringIO()
objgraph.show_backrefs(target, max_depth=max_depth, output=string_io)
graph = string_io.getvalue()
string_io.close()
return graph
def create_per_replica(strategy, value_list):
"""Creates a PerReplica of Tensors from the value_list."""
if len(strategy.extended.worker_devices) != len(value_list):
raise ValueError(
"the length of values must be the same as the number of worker devices")
tensors = []
for device, value in zip(strategy.extended.worker_devices, value_list):
with ops.device(device):
tensors.append(ops.convert_to_tensor(value))
return values.PerReplica(tensors)
def is_tpu_strategy(strategy):
"""Returns whether the strategy is a TPU strategy."""
return isinstance(strategy,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1,
tpu_strategy.TPUStrategyV2))
def reset_context():
"""Resets eager context."""
context._reset_context() # pylint: disable=protected-access
| TestClusterParams |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/repo.py | {
"start": 46184,
"end": 60832
} | class ____(AutomationCondition):
@property
def name(self) -> str:
return "some_custom_name"
def evaluate(self): ... # pyright: ignore[reportIncompatibleMethodOverride]
@asset(automation_condition=MyAutomationCondition().since_last_handled())
def asset_with_custom_automation_condition() -> None: ...
fresh_diamond_assets_job = define_asset_job(
"fresh_diamond_assets_job", AssetSelection.assets(fresh_diamond_bottom).upstream()
)
multipartitions_def = MultiPartitionsDefinition(
{
"date": DailyPartitionsDefinition(start_date="2022-01-01"),
"ab": StaticPartitionsDefinition(["a", "b", "c"]),
}
)
@asset(partitions_def=multipartitions_def)
def multipartitions_1():
return 1
@asset(partitions_def=multipartitions_def)
def multipartitions_2(multipartitions_1):
return multipartitions_1
@asset(partitions_def=multipartitions_def)
def multipartitions_fail(context):
if context.run.tags.get("fail") == "true":
raise Exception("multipartitions_fail")
return 1
multi_partitions_job = define_asset_job(
"multipartitions_job", AssetSelection.assets(multipartitions_1, multipartitions_2)
)
no_partitions_multipartitions_def = MultiPartitionsDefinition(
{
"a": StaticPartitionsDefinition([]),
"b": StaticPartitionsDefinition([]),
}
)
@asset(partitions_def=no_partitions_multipartitions_def)
def no_multipartitions_1():
return 1
dynamic_in_multipartitions_def = MultiPartitionsDefinition(
{
"dynamic": DynamicPartitionsDefinition(name="dynamic"),
"static": StaticPartitionsDefinition(["a", "b", "c"]),
}
)
no_multi_partitions_job = define_asset_job(
"no_multipartitions_job", AssetSelection.assets(no_multipartitions_1)
)
multi_partitions_fail_job = define_asset_job(
"multipartitions_fail_job", AssetSelection.assets(multipartitions_fail)
)
@asset(partitions_def=dynamic_in_multipartitions_def)
def dynamic_in_multipartitions_success():
return 1
@asset(partitions_def=dynamic_in_multipartitions_def)
def dynamic_in_multipartitions_fail(context, dynamic_in_multipartitions_success):
raise Exception("oops")
dynamic_in_multipartitions_success_job = define_asset_job(
"dynamic_in_multipartitions_success_job",
AssetSelection.assets(dynamic_in_multipartitions_success, dynamic_in_multipartitions_fail),
)
@asset(
partitions_def=DailyPartitionsDefinition("2023-01-01"),
backfill_policy=BackfillPolicy.single_run(),
)
def single_run_backfill_policy_asset(context):
pass
@asset(
partitions_def=DailyPartitionsDefinition("2023-01-03"),
backfill_policy=BackfillPolicy.multi_run(10),
)
def multi_run_backfill_policy_asset(context):
pass
named_groups_job = define_asset_job(
"named_groups_job",
[
grouped_asset_1,
grouped_asset_2,
ungrouped_asset_3,
grouped_asset_4,
ungrouped_asset_5,
],
)
@repository
def empty_repo():
return []
typed_assets_job = define_asset_job(
"typed_assets",
AssetSelection.assets(typed_multi_asset, typed_asset, untyped_asset),
)
@schedule(cron_schedule="* * * * *", job=typed_assets_job)
def asset_job_schedule():
return {}
@asset_check(asset=asset_1, description="asset_1 check", blocking=True, additional_deps=[asset_two])
def my_check(asset_1):
return AssetCheckResult(
passed=True,
metadata={
"foo": "bar",
"baz": "quux",
},
)
@asset_check(asset=asset_3, description="asset_3 check", blocking=True)
def asset_3_check(asset_3):
return AssetCheckResult(
passed=True,
metadata={
"foo": "baz",
"baz": "bar",
},
)
@asset_check(asset=asset_3, description="asset_3 second check", blocking=True)
def asset_3_other_check(asset_3):
return AssetCheckResult(
passed=True,
metadata={
"foo": "baz",
"baz": "bar",
},
)
@asset(check_specs=[AssetCheckSpec(asset="check_in_op_asset", name="my_check")])
def check_in_op_asset():
yield Output(1)
yield AssetCheckResult(passed=True)
asset_check_job = define_asset_job("asset_check_job", [asset_1, check_in_op_asset])
@multi_asset(
outs={
"one": AssetOut(key="one", is_required=False),
"two": AssetOut(key="two", is_required=False),
},
check_specs=[
AssetCheckSpec("my_check", asset="one"),
AssetCheckSpec("my_other_check", asset="one"),
],
can_subset=True,
)
def subsettable_checked_multi_asset(context: OpExecutionContext):
if AssetKey("one") in context.selected_asset_keys:
yield Output(1, output_name="one")
if AssetKey("two") in context.selected_asset_keys:
yield Output(1, output_name="two")
if AssetCheckKey(AssetKey("one"), "my_check") in context.selected_asset_check_keys:
yield AssetCheckResult(check_name="my_check", passed=True)
if AssetCheckKey(AssetKey("one"), "my_other_check") in context.selected_asset_check_keys:
yield AssetCheckResult(check_name="my_other_check", passed=True)
checked_multi_asset_job = define_asset_job(
"checked_multi_asset_job", AssetSelection.assets(subsettable_checked_multi_asset)
)
@asset(pool="foo")
def concurrency_asset():
pass
@op(pool="bar")
def concurrency_op_1():
pass
@op(pool="baz")
def concurrency_op_2(input_1):
return input_1
@graph_asset
def concurrency_graph_asset():
return concurrency_op_2(concurrency_op_1())
@multi_asset(
specs=[
AssetSpec("concurrency_multi_asset_1"),
AssetSpec("concurrency_multi_asset_2"),
],
pool="buzz",
)
def concurrency_multi_asset():
pass
# These are defined separately because the dict repo does not handle unresolved asset jobs
def define_asset_jobs() -> Sequence[UnresolvedAssetJobDefinition]:
return [
asset_check_job,
checked_multi_asset_job,
dynamic_in_multipartitions_success_job,
dynamic_partitioned_assets_job,
executable_test_job,
fail_partition_materialization_job,
failure_assets_job,
foo_job,
fresh_diamond_assets_job,
hanging_graph_asset_job,
hanging_job,
hanging_partition_asset_job,
integers_asset_job,
output_then_hang_job,
multi_partitions_fail_job,
multi_partitions_job,
named_groups_job,
no_multi_partitions_job,
observation_job,
partition_materialization_job,
static_partitioned_assets_job,
time_partitioned_assets_job,
two_assets_job,
typed_assets_job,
]
def define_standard_jobs() -> Sequence[JobDefinition]:
return [
asset_tag_job,
basic_job,
chained_failure_job,
composed_graph.to_job(),
composites_job,
config_with_map,
csv_hello_world,
csv_hello_world_df_input,
csv_hello_world_two,
csv_hello_world_with_expectations,
daily_partitioned_job,
dynamic_job,
eventually_successful,
hard_failer,
hello_world_with_tags,
infinite_loop_job,
hanging_partitioned_job,
integers,
job_with_default_config,
job_with_enum_config,
job_with_expectations,
job_with_input_output_metadata,
job_with_invalid_definition_error,
job_with_list,
loggers_job,
materialization_job,
more_complicated_config,
more_complicated_nested_config,
multi_asset_job,
naughty_programmer_job,
nested_job,
no_config_chain_job,
no_config_job,
noop_job,
partitioned_asset_job,
req_config_job,
required_resource_config_job,
required_resource_job,
retry_multi_input_early_terminate_job,
retry_multi_output_job,
retry_resource_job,
scalar_output_job,
simple_graph.to_job("simple_job_a"),
simple_graph.to_job("simple_job_b"),
single_asset_job,
spew_job,
static_partitioned_job,
tagged_job,
two_ins_job,
some_external_job,
owned_job,
unowned_job,
owned_partitioned_job,
unowned_partitioned_job,
]
partitions_def_for_permissions = StaticPartitionsDefinition(["a", "b", "c"])
@asset(
owners=["test@elementl.com", "team:foo"],
)
def owned_asset():
return 1
@asset_check(asset=owned_asset, description="owned asset check", blocking=True)
def owned_asset_check(owned_asset):
return AssetCheckResult(passed=True)
@asset
def unowned_asset():
return 2
@asset_check(asset=unowned_asset, description="unowned asset check", blocking=True)
def unowned_asset_check(unowned_asset):
return AssetCheckResult(passed=True)
@asset(partitions_def=partitions_def_for_permissions, owners=["test@elementl.com", "team:foo"])
def owned_partitioned_asset():
return 1
@asset(partitions_def=partitions_def_for_permissions)
def unowned_partitioned_asset():
return 2
@op
def permission_test_op():
pass
@job(owners=["test@elementl.com", "team:foo"])
def owned_job():
permission_test_op()
@job
def unowned_job():
permission_test_op()
@op
def permission_partitioned_op(context):
context.log.info(f"Processing partition: {context.partition_key}")
return context.partition_key
@job(partitions_def=partitions_def_for_permissions, owners=["test@elementl.com", "team:foo"])
def owned_partitioned_job():
permission_partitioned_op()
@job(partitions_def=partitions_def_for_permissions)
def unowned_partitioned_job():
permission_partitioned_op()
@sensor(job=owned_job, owners=["test@elementl.com", "team:foo"])
def owned_sensor():
pass
@sensor(job=unowned_job)
def unowned_sensor():
pass
@schedule(job=owned_job, cron_schedule="* * * * *", owners=["test@elementl.com", "team:foo"])
def owned_schedule():
return {}
@schedule(job=unowned_job, cron_schedule="* * * * *")
def unowned_schedule():
return {}
def define_assets():
return [
asset_one,
asset_two,
untyped_asset,
typed_asset,
typed_multi_asset,
multipartitions_1,
multipartitions_2,
no_multipartitions_1,
multipartitions_fail,
dynamic_in_multipartitions_success,
dynamic_in_multipartitions_fail,
SourceAsset("diamond_source"),
fresh_diamond_top,
fresh_diamond_left,
fresh_diamond_right,
fresh_diamond_bottom,
integers_asset,
upstream_daily_partitioned_asset,
downstream_weekly_partitioned_asset,
unpartitioned_upstream_of_partitioned,
not_included_asset,
upstream_static_partitioned_asset,
middle_static_partitioned_asset_1,
middle_static_partitioned_asset_2,
downstream_static_partitioned_asset,
first_asset,
hanging_asset,
never_runs_asset,
dummy_source_asset,
hanging_partition_asset,
hanging_graph_asset,
output_then_hang_asset,
downstream_asset,
subsettable_checked_multi_asset,
check_in_op_asset,
single_run_backfill_policy_asset,
multi_run_backfill_policy_asset,
executable_asset,
unexecutable_asset,
upstream_dynamic_partitioned_asset,
downstream_dynamic_partitioned_asset,
upstream_time_partitioned_asset,
downstream_time_partitioned_asset,
yield_partition_materialization,
fail_partition_materialization,
asset_yields_observation,
asset_1,
asset_2,
asset_3,
foo,
bar,
foo_bar,
baz,
unconnected,
grouped_asset_1,
grouped_asset_2,
ungrouped_asset_3,
grouped_asset_4,
ungrouped_asset_5,
observable_asset_same_version,
multi_asset_with_kinds,
asset_with_compute_storage_kinds,
asset_with_automation_condition,
asset_with_custom_automation_condition,
concurrency_asset,
concurrency_graph_asset,
concurrency_multi_asset,
asset_with_prefix_1,
asset_with_prefix_2,
asset_with_prefix_3,
asset_with_prefix_4,
asset_with_prefix_5,
owned_asset,
unowned_asset,
owned_partitioned_asset,
unowned_partitioned_asset,
]
def define_resources():
return {
"dummy_io_manager": IOManagerDefinition.hardcoded_io_manager(DummyIOManager()),
"hanging_asset_resource": hanging_asset_resource,
}
def define_asset_checks():
return [
my_check,
asset_3_check,
asset_3_other_check,
owned_asset_check,
unowned_asset_check,
]
asset_jobs = define_asset_jobs()
asset_job_names = [job.name for job in asset_jobs]
test_repo = Definitions(
assets=define_assets(),
asset_checks=define_asset_checks(),
jobs=[*asset_jobs, *define_standard_jobs()],
schedules=define_schedules(),
sensors=define_sensors(),
resources=define_resources(),
executor=in_process_executor,
).get_repository_def()
# Many tests reference the "test_repo" name directly, so we override the default
# SINGLETON_REPOSITORY NAME. This should be removed in a followup PR when references to "test_repo"
# are removed.
test_repo._name = "test_repo" # noqa: SLF001
def _targets_asset_job(instigator: Union[ScheduleDefinition, SensorDefinition]) -> bool:
if isinstance(instigator, SensorDefinition) and instigator.sensor_type in (
# these rely on asset selections, which are invalid with the repos constructed
# using the legacy dictionary pattern
SensorType.AUTOMATION,
SensorType.AUTO_MATERIALIZE,
):
return True
try:
return instigator.job_name in asset_job_names or instigator.has_anonymous_job
except DagsterInvalidDefinitionError: # thrown when `job_name` is invalid
return False
# asset jobs are incompatible with dict repository so we exclude them and any schedules/sensors that target them,
# e.g. AutomationConditionSensorDefinitions
@repository(default_executor_def=in_process_executor)
def test_dict_repo():
return {
"jobs": {job.name: job for job in define_standard_jobs()},
"schedules": {
schedule.name: schedule
for schedule in define_schedules()
if not _targets_asset_job(schedule)
},
"sensors": {
sensor.name: sensor for sensor in define_sensors() if not _targets_asset_job(sensor)
},
}
| MyAutomationCondition |
python | walkccc__LeetCode | solutions/831. Masking Personal Information/831.py | {
"start": 0,
"end": 337
} | class ____:
def maskPII(self, s: str) -> str:
atIndex = s.find('@')
if atIndex != -1:
s = s.lower()
return s[0] + '*' * 5 + s[atIndex - 1:]
ans = ''.join(c for c in s if c.isdigit())
if len(ans) == 10:
return '***-***-' + ans[-4:]
return '+' + '*' * (len(ans) - 10) + '-***-***-' + ans[-4:]
| Solution |
python | numba__numba | numba/tests/test_struct_ref.py | {
"start": 10426,
"end": 11316
} | class ____(structref.StructRefProxy):
def __new__(cls, value, parent):
return structref.StructRefProxy.__new__(cls, value, parent)
@property
def value(self):
return PolygonStruct_get_value(self)
@property
def parent(self):
return PolygonStruct_get_parent(self)
@njit
def PolygonStruct_get_value(self):
return self.value
@njit
def PolygonStruct_get_parent(self):
return self.parent
structref.define_proxy(
PolygonStruct,
PolygonStructType,
["value", "parent"]
)
@overload_method(PolygonStructType, "flip")
def _ol_polygon_struct_flip(self):
def impl(self):
if self.value is not None:
self.value = -self.value
return impl
@overload_attribute(PolygonStructType, "prop")
def _ol_polygon_struct_prop(self):
def get(self):
return self.value, self.parent
return get
| PolygonStruct |
python | huggingface__transformers | src/transformers/models/perception_lm/processing_perception_lm.py | {
"start": 1379,
"end": 11052
} | class ____(ProcessorMixin):
r"""
Constructs a PerceptionLM processor which wraps a PerceptionLM image processor, a PerceptionLM video processor, and a tokenizer into a single processor.
[`PerceptionLMProcessor`] offers all the functionalities of [`PerceptionLMImageProcessorFast`], [`PerceptionLMVideoProcessor`], and the tokenizer (e.g. [`LlamaTokenizerFast`]). See the
[`~PerceptionLMProcessor.__call__`] and [`~PerceptionLMProcessor.decode`] for more information.
Args:
video_processor ([`PerceptionLMVideoProcessor`], *optional*):
The video processor to process video inputs.
image_processor ([`PerceptionLMImageProcessorFast`], *optional*):
The image processor to process image inputs.
tokenizer ([`LlamaTokenizerFast`] or similar, *optional*):
The tokenizer to process text inputs.
patch_size (`int`, *optional*):
Patch size from the vision tower.
chat_template (`str`, *optional*):
A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string.
pooling_ratio (`int`, *optional*, defaults to 2):
Pooling ratio for vision tokens. If not 1, 2D adaptive pooling is applied over projected vision tokens.
"""
def __init__(
self,
video_processor=None,
image_processor=None,
tokenizer=None,
patch_size=None,
chat_template=None,
pooling_ratio=2,
**kwargs,
):
self.patch_size = patch_size
self.pooling_ratio = pooling_ratio
self.image_token = tokenizer.image_token
self.video_token = tokenizer.video_token
self.image_token_id = tokenizer.image_token_id
self.video_token_id = tokenizer.video_token_id
super().__init__(video_processor, image_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
videos: Optional[VideoInput] = None,
**kwargs: Unpack[PerceptionLMProcessorKwargs],
) -> BatchFeature:
"""
Prepares a batch containing one or more sequences of text and/or images and/or videos.
If `text` is provided, it is tokenized using the tokenizer.
If `images` is provided, they are processed using the image processor.
If `videos` is provided, they are processed using the video processor.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, *optional*):
The image or batch of images to be processed. Each image can be a PIL image, NumPy array, or PyTorch tensor.
Both channels-first and channels-last formats are supported.
text (`str`, `List[str]`, *optional*):
The sequence or batch of sequences to be tokenized. Each sequence can be a string.
videos (`Any`, *optional*):
The video or batch of videos to be processed.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is provided.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is provided).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is provided.
- **pixel_values_videos** -- Video pixel values to be fed to a model. Returned when `videos` is provided.
"""
if text is None:
raise ValueError(
"You have to specify at least `text` input. Optionally, you can also specify `images` or `videos`."
)
output_kwargs = self._merge_kwargs(
PerceptionLMProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
else:
image_inputs = {}
if videos is not None:
videos_inputs = self.video_processor(videos, **output_kwargs["videos_kwargs"])
else:
videos_inputs = {}
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
# try to expand inputs in processing if we have the necessary parts
prompt_strings = []
pixel_values = iter(image_inputs.get("pixel_values", []))
pixel_values_videos = iter(videos_inputs.get("pixel_values_videos", []))
for sample in text:
# Replace the media token with the expanded media token sequence
sample = self._expand_media_tokens(sample, self.tokenizer.image_token, pixel_values)
sample = self._expand_media_tokens(sample, self.tokenizer.video_token, pixel_values_videos)
prompt_strings.append(sample)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"], return_tensors=None)
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image", "video"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
def _expand_media_tokens(self, sample, media_token: str, media_iter: Iterable):
media_count = sample.count(media_token)
if media_count > 0:
media_list = [next(media_iter) for _ in range(media_count)]
sample_splits = sample.split(media_token)
media_token_list = []
for media in media_list:
height, width = get_image_size(to_numpy_array(media))
num_tiles = media.shape[0]
num_media_tokens = (
(height // self.patch_size // self.pooling_ratio)
* (width // self.patch_size // self.pooling_ratio)
* num_tiles
)
media_token_list.append(num_media_tokens)
sample = ""
for i, num_media_tokens in enumerate(media_token_list):
sample += sample_splits[i]
sample += media_token * num_media_tokens
sample += sample_splits[-1]
return sample
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = PerceptionLMProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
tile_size = images_kwargs.get("tile_size", None) or self.image_processor.tile_size
vision_input_type = images_kwargs.get("vision_input_type", None) or self.image_processor.vision_input_type
num_image_tokens = []
num_image_patches = []
for height, width in image_sizes:
if vision_input_type == "thumb+tile":
aspect_ratio = self.image_processor._fit_image_to_canvas(
img_width=width, img_height=height, tile_size=tile_size
)
if aspect_ratio is None:
aspect_ratio = self.image_processor._find_closest_aspect_ratio(
img_width=width, img_height=height, tile_size=tile_size
)
num_tiles = aspect_ratio[0] * aspect_ratio[1] + 1 # base image and tiles
else:
num_tiles = 1
num_image_tokens.append(
(tile_size // self.patch_size // self.pooling_ratio)
* (tile_size // self.patch_size // self.pooling_ratio)
* num_tiles
)
num_image_patches.append(num_tiles)
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
__all__ = ["PerceptionLMProcessor"]
| PerceptionLMProcessor |
python | mlflow__mlflow | mlflow/pyfunc/__init__.py | {
"start": 46676,
"end": 164117
} | class ____(PyFuncModel):
def __init__(self, model_meta: Model, client: Any, server_pid: int, env_manager="local"):
super().__init__(model_meta=model_meta, model_impl=client, predict_fn="invoke")
self._client = client
self._server_pid = server_pid
# We need to set `env_manager` attribute because it is used by Databricks runtime
# evaluate usage logging to log 'env_manager' tag in `_evaluate` function patching.
self._env_manager = env_manager
def predict(self, data, params=None):
"""
Args:
data: Model input data.
params: Additional parameters to pass to the model for inference.
Returns:
Model predictions.
"""
if "params" in inspect.signature(self._client.invoke).parameters:
result = self._client.invoke(data, params=params).get_predictions()
else:
_log_warning_if_params_not_in_predict_signature(_logger, params)
result = self._client.invoke(data).get_predictions()
if isinstance(result, pandas.DataFrame):
result = result[result.columns[0]]
return result
@property
def pid(self):
if self._server_pid is None:
raise MlflowException("Served PyFunc Model is missing server process ID.")
return self._server_pid
@property
def env_manager(self):
return self._env_manager
@env_manager.setter
def env_manager(self, value):
self._env_manager = value
def _load_model_or_server(
model_uri: str, env_manager: str, model_config: dict[str, Any] | None = None
):
"""
Load a model with env restoration. If a non-local ``env_manager`` is specified, prepare an
independent Python environment with the training time dependencies of the specified model
installed and start a MLflow Model Scoring Server process with that model in that environment.
Return a _ServedPyFuncModel that invokes the scoring server for prediction. Otherwise, load and
return the model locally as a PyFuncModel using :py:func:`mlflow.pyfunc.load_model`.
Args:
model_uri: The uri of the model.
env_manager: The environment manager to load the model.
model_config: The model configuration to use by the model, only if the model
accepts it.
Returns:
A _ServedPyFuncModel for non-local ``env_manager``s or a PyFuncModel otherwise.
"""
from mlflow.pyfunc.scoring_server.client import (
ScoringServerClient,
StdinScoringServerClient,
)
if env_manager == _EnvManager.LOCAL:
return load_model(model_uri, model_config=model_config)
_logger.info("Starting model server for model environment restoration.")
local_path = _download_artifact_from_uri(artifact_uri=model_uri)
model_meta = Model.load(os.path.join(local_path, MLMODEL_FILE_NAME))
is_port_connectable = check_port_connectivity()
pyfunc_backend = get_flavor_backend(
local_path,
env_manager=env_manager,
install_mlflow=os.environ.get("MLFLOW_HOME") is not None,
create_env_root_dir=not is_port_connectable,
)
_logger.info("Restoring model environment. This can take a few minutes.")
# Set capture_output to True in Databricks so that when environment preparation fails, the
# exception message of the notebook cell output will include child process command execution
# stdout/stderr output.
pyfunc_backend.prepare_env(model_uri=local_path, capture_output=is_in_databricks_runtime())
if is_port_connectable:
server_port = find_free_port()
scoring_server_proc = pyfunc_backend.serve(
model_uri=local_path,
port=server_port,
host="127.0.0.1",
timeout=MLFLOW_SCORING_SERVER_REQUEST_TIMEOUT.get(),
enable_mlserver=False,
synchronous=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
model_config=model_config,
)
client = ScoringServerClient("127.0.0.1", server_port)
else:
scoring_server_proc = pyfunc_backend.serve_stdin(local_path, model_config=model_config)
client = StdinScoringServerClient(scoring_server_proc)
_logger.info(f"Scoring server process started at PID: {scoring_server_proc.pid}")
try:
client.wait_server_ready(timeout=90, scoring_server_proc=scoring_server_proc)
except Exception as e:
if scoring_server_proc.poll() is None:
# the scoring server is still running but client can't connect to it.
# kill the server.
scoring_server_proc.kill()
server_output, _ = scoring_server_proc.communicate(timeout=15)
if isinstance(server_output, bytes):
server_output = server_output.decode("UTF-8")
raise MlflowException(
"MLflow model server failed to launch, server process stdout and stderr are:\n"
+ server_output
) from e
return _ServedPyFuncModel(
model_meta=model_meta,
client=client,
server_pid=scoring_server_proc.pid,
env_manager=env_manager,
)
def _get_model_dependencies(model_uri, format="pip"):
model_dir = _download_artifact_from_uri(model_uri)
def get_conda_yaml_path():
model_config = _get_flavor_configuration_from_ml_model_file(
os.path.join(model_dir, MLMODEL_FILE_NAME), flavor_name=FLAVOR_NAME
)
return os.path.join(model_dir, _extract_conda_env(model_config[ENV]))
if format == "pip":
requirements_file = os.path.join(model_dir, _REQUIREMENTS_FILE_NAME)
if os.path.exists(requirements_file):
return requirements_file
_logger.info(
f"{_REQUIREMENTS_FILE_NAME} is not found in the model directory. Falling back to"
f" extracting pip requirements from the model's 'conda.yaml' file. Conda"
" dependencies will be ignored."
)
with open(get_conda_yaml_path()) as yf:
conda_yaml = yaml.safe_load(yf)
conda_deps = conda_yaml.get("dependencies", [])
for index, dep in enumerate(conda_deps):
if isinstance(dep, dict) and "pip" in dep:
pip_deps_index = index
break
else:
raise MlflowException(
"No pip section found in conda.yaml file in the model directory.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
pip_deps = conda_deps.pop(pip_deps_index)["pip"]
tmp_dir = tempfile.mkdtemp()
pip_file_path = os.path.join(tmp_dir, _REQUIREMENTS_FILE_NAME)
with open(pip_file_path, "w") as f:
f.write("\n".join(pip_deps) + "\n")
if len(conda_deps) > 0:
_logger.warning(
"The following conda dependencies have been excluded from the environment file:"
f" {', '.join(conda_deps)}."
)
return pip_file_path
elif format == "conda":
return get_conda_yaml_path()
else:
raise MlflowException(
f"Illegal format argument '{format}'.", error_code=INVALID_PARAMETER_VALUE
)
def get_model_dependencies(model_uri, format="pip"):
"""
Downloads the model dependencies and returns the path to requirements.txt or conda.yaml file.
.. warning::
This API downloads all the model artifacts to the local filesystem. This may take
a long time for large models. To avoid this overhead, use
``mlflow.artifacts.download_artifacts("<model_uri>/requirements.txt")`` or
``mlflow.artifacts.download_artifacts("<model_uri>/conda.yaml")`` instead.
Args:
model_uri: The uri of the model to get dependencies from.
format: The format of the returned dependency file. If the ``"pip"`` format is
specified, the path to a pip ``requirements.txt`` file is returned.
If the ``"conda"`` format is specified, the path to a ``"conda.yaml"``
file is returned . If the ``"pip"`` format is specified but the model
was not saved with a ``requirements.txt`` file, the ``pip`` section
of the model's ``conda.yaml`` file is extracted instead, and any
additional conda dependencies are ignored. Default value is ``"pip"``.
Returns:
The local filesystem path to either a pip ``requirements.txt`` file
(if ``format="pip"``) or a ``conda.yaml`` file (if ``format="conda"``)
specifying the model's dependencies.
"""
dep_file = _get_model_dependencies(model_uri, format)
if format == "pip":
prefix = "%" if _is_in_ipython_notebook() else ""
_logger.info(
"To install the dependencies that were used to train the model, run the "
f"following command: '{prefix}pip install -r {dep_file}'."
)
return dep_file
@deprecated("mlflow.pyfunc.load_model", 1.0)
def load_pyfunc(model_uri, suppress_warnings=False):
"""
Load a model stored in Python function format.
Args:
model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
suppress_warnings: If ``True``, non-fatal warning messages associated with the model
loading process will be suppressed. If ``False``, these warning messages will be
emitted.
"""
return load_model(model_uri, suppress_warnings)
def _warn_potentially_incompatible_py_version_if_necessary(model_py_version=None):
"""
Compares the version of Python that was used to save a given model with the version
of Python that is currently running. If a major or minor version difference is detected,
logs an appropriate warning.
"""
if model_py_version is None:
_logger.warning(
"The specified model does not have a specified Python version. It may be"
" incompatible with the version of Python that is currently running: Python %s",
PYTHON_VERSION,
)
elif get_major_minor_py_version(model_py_version) != get_major_minor_py_version(PYTHON_VERSION):
_logger.warning(
"The version of Python that the model was saved in, `Python %s`, differs"
" from the version of Python that is currently running, `Python %s`,"
" and may be incompatible",
model_py_version,
PYTHON_VERSION,
)
def _create_model_downloading_tmp_dir(should_use_nfs):
root_tmp_dir = get_or_create_nfs_tmp_dir() if should_use_nfs else get_or_create_tmp_dir()
root_model_cache_dir = os.path.join(root_tmp_dir, "models")
os.makedirs(root_model_cache_dir, exist_ok=True)
tmp_model_dir = tempfile.mkdtemp(dir=root_model_cache_dir)
# mkdtemp creates a directory with permission 0o700
# change it to be 0o770 to ensure it can be seen in spark UDF
os.chmod(tmp_model_dir, 0o770)
return tmp_model_dir
_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP = 200
def _is_variant_type(spark_type):
try:
from pyspark.sql.types import VariantType
return isinstance(spark_type, VariantType)
except ImportError:
return False
def _convert_spec_type_to_spark_type(spec_type):
from pyspark.sql.types import ArrayType, MapType, StringType, StructField, StructType
from mlflow.types.schema import AnyType, Array, DataType, Map, Object
if isinstance(spec_type, DataType):
return spec_type.to_spark()
if isinstance(spec_type, AnyType):
try:
from pyspark.sql.types import VariantType
return VariantType()
except ImportError:
raise MlflowException.invalid_parameter_value(
"`AnyType` is not supported in PySpark versions older than 4.0.0. "
"Upgrade your PySpark version to use this feature.",
)
if isinstance(spec_type, Array):
return ArrayType(_convert_spec_type_to_spark_type(spec_type.dtype))
if isinstance(spec_type, Object):
return StructType(
[
StructField(
property.name,
_convert_spec_type_to_spark_type(property.dtype),
# we set nullable to True for all properties
# to avoid some errors like java.lang.NullPointerException
# when the signature is not inferred based on correct data.
)
for property in spec_type.properties
]
)
# Map only supports string as key
if isinstance(spec_type, Map):
return MapType(
keyType=StringType(), valueType=_convert_spec_type_to_spark_type(spec_type.value_type)
)
raise MlflowException(f"Failed to convert schema type `{spec_type}` to spark type.")
def _cast_output_spec_to_spark_type(spec):
from pyspark.sql.types import ArrayType
from mlflow.types.schema import ColSpec, DataType, TensorSpec
# TODO: handle optional output columns.
if isinstance(spec, ColSpec):
return _convert_spec_type_to_spark_type(spec.type)
elif isinstance(spec, TensorSpec):
data_type = DataType.from_numpy_type(spec.type)
if data_type is None:
raise MlflowException(
f"Model output tensor spec type {spec.type} is not supported in spark_udf.",
error_code=INVALID_PARAMETER_VALUE,
)
if len(spec.shape) == 1:
return ArrayType(data_type.to_spark())
elif len(spec.shape) == 2:
return ArrayType(ArrayType(data_type.to_spark()))
else:
raise MlflowException(
"Only 1D or 2D tensors are supported as spark_udf "
f"return value, but model output '{spec.name}' has shape {spec.shape}.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
raise MlflowException(
f"Unknown schema output spec {spec}.", error_code=INVALID_PARAMETER_VALUE
)
def _infer_spark_udf_return_type(model_output_schema):
from pyspark.sql.types import StructField, StructType
if len(model_output_schema.inputs) == 1:
return _cast_output_spec_to_spark_type(model_output_schema.inputs[0])
return StructType(
[
StructField(name=spec.name or str(i), dataType=_cast_output_spec_to_spark_type(spec))
for i, spec in enumerate(model_output_schema.inputs)
]
)
def _parse_spark_datatype(datatype: str):
from pyspark.sql.functions import udf
from pyspark.sql.session import SparkSession
return_type = "boolean" if datatype == "bool" else datatype
parsed_datatype = udf(lambda x: x, returnType=return_type).returnType
if parsed_datatype.typeName() == "unparseddata":
# For spark 3.5.x, `udf(lambda x: x, returnType=return_type).returnType`
# returns UnparsedDataType, which is not compatible with signature inference.
# Note: SparkSession.active only exists for spark >= 3.5.0
schema = (
SparkSession.active()
.range(0)
.select(udf(lambda x: x, returnType=return_type)("id"))
.schema
)
return schema[0].dataType
return parsed_datatype
def _is_none_or_nan(value):
# The condition `isinstance(value, float)` is needed to avoid error
# from `np.isnan(value)` if value is a non-numeric type.
return value is None or isinstance(value, float) and np.isnan(value)
def _convert_array_values(values, result_type):
"""
Convert list or numpy array values to spark dataframe column values.
"""
from pyspark.sql.types import ArrayType, StructType
if not isinstance(result_type, ArrayType):
raise MlflowException.invalid_parameter_value(
f"result_type must be ArrayType, got {result_type.simpleString()}",
)
spark_primitive_type_to_np_type = _get_spark_primitive_type_to_np_type()
if type(result_type.elementType) in spark_primitive_type_to_np_type:
np_type = spark_primitive_type_to_np_type[type(result_type.elementType)]
# For array type result values, if provided value is None or NaN, regard it as a null array.
# see https://github.com/mlflow/mlflow/issues/8986
return None if _is_none_or_nan(values) else np.array(values, dtype=np_type)
if isinstance(result_type.elementType, ArrayType):
return [_convert_array_values(v, result_type.elementType) for v in values]
if isinstance(result_type.elementType, StructType):
return [_convert_struct_values(v, result_type.elementType) for v in values]
if _is_variant_type(result_type.elementType):
return values
raise MlflowException.invalid_parameter_value(
"Unsupported array type field with element type "
f"{result_type.elementType.simpleString()} in Array type.",
)
def _get_spark_primitive_types():
from pyspark.sql import types
return (
types.IntegerType,
types.LongType,
types.FloatType,
types.DoubleType,
types.StringType,
types.BooleanType,
)
def _get_spark_primitive_type_to_np_type():
from pyspark.sql import types
return {
types.IntegerType: np.int32,
types.LongType: np.int64,
types.FloatType: np.float32,
types.DoubleType: np.float64,
types.BooleanType: np.bool_,
types.StringType: np.str_,
}
def _get_spark_primitive_type_to_python_type():
from pyspark.sql import types
return {
types.IntegerType: int,
types.LongType: int,
types.FloatType: float,
types.DoubleType: float,
types.BooleanType: bool,
types.StringType: str,
}
def _check_udf_return_type(data_type):
from pyspark.sql.types import ArrayType, MapType, StringType, StructType
primitive_types = _get_spark_primitive_types()
if isinstance(data_type, primitive_types):
return True
if isinstance(data_type, ArrayType):
return _check_udf_return_type(data_type.elementType)
if isinstance(data_type, StructType):
return all(_check_udf_return_type(field.dataType) for field in data_type.fields)
if isinstance(data_type, MapType):
return isinstance(data_type.keyType, StringType) and _check_udf_return_type(
data_type.valueType
)
return False
def _convert_struct_values(
result: pandas.DataFrame | dict[str, Any],
result_type,
):
"""
Convert spark StructType values to spark dataframe column values.
"""
from pyspark.sql.types import ArrayType, MapType, StructType
if not isinstance(result_type, StructType):
raise MlflowException.invalid_parameter_value(
f"result_type must be StructType, got {result_type.simpleString()}",
)
if not isinstance(result, (dict, pandas.DataFrame)):
raise MlflowException.invalid_parameter_value(
f"Unsupported result type {type(result)}, expected dict or pandas DataFrame",
)
spark_primitive_type_to_np_type = _get_spark_primitive_type_to_np_type()
is_pandas_df = isinstance(result, pandas.DataFrame)
result_dict = {}
for field_name in result_type.fieldNames():
field_type = result_type[field_name].dataType
field_values = result[field_name]
if type(field_type) in spark_primitive_type_to_np_type:
np_type = spark_primitive_type_to_np_type[type(field_type)]
if is_pandas_df:
# it's possible that field_values contain only Nones
# in this case, we don't need to cast the type
if not all(_is_none_or_nan(field_value) for field_value in field_values):
field_values = field_values.astype(np_type)
else:
field_values = (
None
if _is_none_or_nan(field_values)
else np.array(field_values, dtype=np_type).item()
)
elif isinstance(field_type, ArrayType):
if is_pandas_df:
field_values = pandas.Series(
_convert_array_values(field_value, field_type) for field_value in field_values
)
else:
field_values = _convert_array_values(field_values, field_type)
elif isinstance(field_type, StructType):
if is_pandas_df:
field_values = pandas.Series(
[
_convert_struct_values(field_value, field_type)
for field_value in field_values
]
)
else:
if isinstance(field_values, pydantic.BaseModel):
field_values = field_values.model_dump()
field_values = _convert_struct_values(field_values, field_type)
elif isinstance(field_type, MapType):
if is_pandas_df:
field_values = pandas.Series(
[
{
key: _convert_value_based_on_spark_type(value, field_type.valueType)
for key, value in field_value.items()
}
for field_value in field_values
]
).astype(object)
else:
field_values = {
key: _convert_value_based_on_spark_type(value, field_type.valueType)
for key, value in field_values.items()
}
elif _is_variant_type(field_type):
return field_values
else:
raise MlflowException.invalid_parameter_value(
f"Unsupported field type {field_type.simpleString()} in struct type.",
)
result_dict[field_name] = field_values
if is_pandas_df:
return pandas.DataFrame(result_dict)
return result_dict
def _convert_value_based_on_spark_type(value, spark_type):
"""
Convert value to python types based on the given spark type.
"""
from pyspark.sql.types import ArrayType, MapType, StructType
spark_primitive_type_to_python_type = _get_spark_primitive_type_to_python_type()
if type(spark_type) in spark_primitive_type_to_python_type:
python_type = spark_primitive_type_to_python_type[type(spark_type)]
return None if _is_none_or_nan(value) else python_type(value)
if isinstance(spark_type, StructType):
return _convert_struct_values(value, spark_type)
if isinstance(spark_type, ArrayType):
return [_convert_value_based_on_spark_type(v, spark_type.elementType) for v in value]
if isinstance(spark_type, MapType):
return {
key: _convert_value_based_on_spark_type(value[key], spark_type.valueType)
for key in value
}
if _is_variant_type(spark_type):
return value
raise MlflowException.invalid_parameter_value(
f"Unsupported type {spark_type} for value {value}"
)
# This location is used to prebuild python environment in Databricks runtime.
# The location for prebuilding env should be located under /local_disk0
# because the python env will be uploaded to NFS and mounted to Serverless UDF sandbox,
# for serverless client image case, it doesn't have "/local_disk0" directory
_PREBUILD_ENV_ROOT_LOCATION = "/tmp"
def _gen_prebuilt_env_archive_name(spark, local_model_path):
"""
Generate prebuilt env archive file name.
The format is:
'mlflow-{sha of python env config and dependencies}-{runtime version}-{platform machine}'
Note: The runtime version and platform machine information are included in the
archive name because the prebuilt env might not be compatible across different
runtime versions or platform machines.
"""
python_env = _get_python_env(Path(local_model_path))
env_name = _get_virtualenv_name(python_env, local_model_path)
dbconnect_udf_sandbox_info = get_dbconnect_udf_sandbox_info(spark)
return (
f"{env_name}-{dbconnect_udf_sandbox_info.image_version}-"
f"{dbconnect_udf_sandbox_info.platform_machine}"
)
def _verify_prebuilt_env(spark, local_model_path, env_archive_path):
# Use `[:-7]` to truncate ".tar.gz" in the end
archive_name = os.path.basename(env_archive_path)[:-7]
prebuilt_env_sha, prebuilt_runtime_version, prebuilt_platform_machine = archive_name.split("-")[
-3:
]
python_env = _get_python_env(Path(local_model_path))
env_sha = _get_virtualenv_name(python_env, local_model_path).split("-")[-1]
dbconnect_udf_sandbox_info = get_dbconnect_udf_sandbox_info(spark)
runtime_version = dbconnect_udf_sandbox_info.image_version
platform_machine = dbconnect_udf_sandbox_info.platform_machine
if prebuilt_env_sha != env_sha:
raise MlflowException(
f"The prebuilt env '{env_archive_path}' does not match the model required environment."
)
if prebuilt_runtime_version != runtime_version:
raise MlflowException(
f"The prebuilt env '{env_archive_path}' runtime version '{prebuilt_runtime_version}' "
f"does not match UDF sandbox runtime version {runtime_version}."
)
if prebuilt_platform_machine != platform_machine:
raise MlflowException(
f"The prebuilt env '{env_archive_path}' platform machine '{prebuilt_platform_machine}' "
f"does not match UDF sandbox platform machine {platform_machine}."
)
def _prebuild_env_internal(local_model_path, archive_name, save_path, env_manager):
env_root_dir = os.path.join(_PREBUILD_ENV_ROOT_LOCATION, archive_name)
archive_path = os.path.join(save_path, archive_name + ".tar.gz")
if os.path.exists(env_root_dir):
shutil.rmtree(env_root_dir)
if os.path.exists(archive_path):
os.remove(archive_path)
try:
pyfunc_backend = get_flavor_backend(
local_model_path,
env_manager=env_manager,
install_mlflow=False,
create_env_root_dir=False,
env_root_dir=env_root_dir,
)
pyfunc_backend.prepare_env(model_uri=local_model_path, capture_output=False)
# exclude pip cache from the archive file.
cache_path = os.path.join(env_root_dir, "pip_cache_pkgs")
if os.path.exists(cache_path):
shutil.rmtree(cache_path)
return archive_directory(env_root_dir, archive_path)
finally:
shutil.rmtree(env_root_dir, ignore_errors=True)
def _download_prebuilt_env_if_needed(prebuilt_env_uri):
from mlflow.utils.file_utils import get_or_create_tmp_dir
parsed_url = urlparse(prebuilt_env_uri)
if parsed_url.scheme in {"", "file"}:
# local path
return parsed_url.path
if parsed_url.scheme == "dbfs":
tmp_dir = MLFLOW_MODEL_ENV_DOWNLOADING_TEMP_DIR.get() or get_or_create_tmp_dir()
model_env_uc_path = parsed_url.path
# download file from DBFS.
local_model_env_path = os.path.join(tmp_dir, os.path.basename(model_env_uc_path))
if os.path.exists(local_model_env_path):
# file is already downloaded.
return local_model_env_path
try:
from databricks.sdk import WorkspaceClient
ws = WorkspaceClient()
# Download model env file from UC volume.
with (
ws.files.download(model_env_uc_path).contents as rf,
open(local_model_env_path, "wb") as wf,
):
while chunk := rf.read(4096 * 1024):
wf.write(chunk)
return local_model_env_path
except (Exception, KeyboardInterrupt):
if os.path.exists(local_model_env_path):
# clean the partially saved file if downloading fails.
os.remove(local_model_env_path)
raise
raise MlflowException(
f"Unsupported prebuilt env file path '{prebuilt_env_uri}', "
f"invalid scheme: '{parsed_url.scheme}'."
)
def build_model_env(model_uri, save_path, env_manager=_EnvManager.VIRTUALENV):
"""
Prebuild model python environment and generate an archive file saved to provided
`save_path`.
Typical usages:
- Pre-build a model's environment in Databricks Runtime and then download the prebuilt
python environment archive file. This pre-built environment archive can then be used
in `mlflow.pyfunc.spark_udf` for remote inference execution when using Databricks Connect
to remotely connect to a Databricks environment for code execution.
.. note::
The `build_model_env` API is intended to only work when executed within Databricks runtime,
serving the purpose of capturing the required execution environment that is needed for
remote code execution when using DBConnect. The environment archive is designed to be used
when performing remote execution using `mlflow.pyfunc.spark_udf` in
Databricks runtime or Databricks Connect client and has no other purpose.
The prebuilt env archive file cannot be used across different Databricks runtime
versions or different platform machines. As such, if you connect to a different cluster
that is running a different runtime version on Databricks, you will need to execute this
API in a notebook and retrieve the generated archive to your local machine. Each
environment snapshot is unique to the the model, the runtime version of your remote
Databricks cluster, and the specification of the udf execution environment.
When using the prebuilt env in `mlflow.pyfunc.spark_udf`, MLflow will verify
whether the spark UDF sandbox environment matches the prebuilt env requirements and will
raise Exceptions if there are compatibility issues. If these occur, simply re-run this API
in the cluster that you are attempting to attach to.
.. code-block:: python
:caption: Example
from mlflow.pyfunc import build_model_env
# Create a python environment archive file at the path `prebuilt_env_uri`
prebuilt_env_uri = build_model_env(f"runs:/{run_id}/model", "/path/to/save_directory")
Args:
model_uri: URI to the model that is used to build the python environment.
save_path: The directory path that is used to save the prebuilt model environment
archive file path.
The path can be either local directory path or
mounted DBFS path such as '/dbfs/...' or
mounted UC volume path such as '/Volumes/...'.
env_manager: The environment manager to use in order to create the python environment
for model inference, the value can be either 'virtualenv' or 'uv', the default
value is 'virtualenv'.
Returns:
Return the path of an archive file containing the python environment data.
"""
from mlflow.utils._spark_utils import _get_active_spark_session
if not is_in_databricks_runtime():
raise RuntimeError("'build_model_env' only support running in Databricks runtime.")
if os.path.isfile(save_path):
raise RuntimeError(f"The saving path '{save_path}' must be a directory.")
os.makedirs(save_path, exist_ok=True)
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri, output_path=_create_model_downloading_tmp_dir(should_use_nfs=False)
)
archive_name = _gen_prebuilt_env_archive_name(_get_active_spark_session(), local_model_path)
dest_path = os.path.join(save_path, archive_name + ".tar.gz")
if os.path.exists(dest_path):
raise RuntimeError(
"A pre-built model python environment already exists "
f"in '{dest_path}'. To rebuild it, please remove "
"the existing one first."
)
# Archive the environment directory as a `tar.gz` format archive file,
# and then move the archive file to the destination directory.
# Note:
# - all symlink files in the input directory are kept as it is in the
# archive file.
# - the destination directory could be UC-volume fuse mounted directory
# which only supports limited filesystem operations, so to ensure it works,
# we generate the archive file under /tmp and then move it into the
# destination directory.
tmp_archive_path = None
try:
tmp_archive_path = _prebuild_env_internal(
local_model_path, archive_name, _PREBUILD_ENV_ROOT_LOCATION, env_manager
)
shutil.move(tmp_archive_path, save_path)
return dest_path
finally:
shutil.rmtree(local_model_path, ignore_errors=True)
if tmp_archive_path and os.path.exists(tmp_archive_path):
os.remove(tmp_archive_path)
def spark_udf(
spark,
model_uri,
result_type=None,
env_manager=None,
params: dict[str, Any] | None = None,
extra_env: dict[str, str] | None = None,
prebuilt_env_uri: str | None = None,
model_config: str | Path | dict[str, Any] | None = None,
):
"""
A Spark UDF that can be used to invoke the Python function formatted model.
Parameters passed to the UDF are forwarded to the model as a DataFrame where the column names
are ordinals (0, 1, ...). On some versions of Spark (3.0 and above), it is also possible to
wrap the input in a struct. In that case, the data will be passed as a DataFrame with column
names given by the struct definition (e.g. when invoked as my_udf(struct('x', 'y')), the model
will get the data as a pandas DataFrame with 2 columns 'x' and 'y').
If a model contains a signature with tensor spec inputs, you will need to pass a column of
array type as a corresponding UDF argument. The column values of which must be one dimensional
arrays. The UDF will reshape the column values to the required shape with 'C' order
(i.e. read / write the elements using C-like index order) and cast the values as the required
tensor spec type.
If a model contains a signature, the UDF can be called without specifying column name
arguments. In this case, the UDF will be called with column names from signature, so the
evaluation dataframe's column names must match the model signature's column names.
The predictions are filtered to contain only the columns that can be represented as the
``result_type``. If the ``result_type`` is string or array of strings, all predictions are
converted to string. If the result type is not an array type, the left most column with
matching type is returned.
.. note::
Inputs of type ``pyspark.sql.types.DateType`` are not supported on earlier versions of
Spark (2.4 and below).
.. note::
When using Databricks Connect to connect to a remote Databricks cluster,
the Databricks cluster must use runtime version >= 16, and if the 'prebuilt_env_uri'
parameter is set, 'env_manager' parameter should not be set.
the Databricks cluster must use runtime version >= 15.4,and if the 'prebuilt_env_uri'
parameter is set, 'env_manager' parameter should not be set,
if the runtime version is 15.4 and the cluster is
standard access mode, the cluster need to configure
"spark.databricks.safespark.archive.artifact.unpack.disabled" to "false".
.. note::
Please be aware that when operating in Databricks Serverless,
spark tasks run within the confines of the Databricks Serverless UDF sandbox.
This environment has a total capacity limit of 1GB, combining both available
memory and local disk capacity. Furthermore, there are no GPU devices available
in this setup. Therefore, any deep-learning models that contain large weights
or require a GPU are not suitable for deployment on Databricks Serverless.
.. code-block:: python
:caption: Example
from pyspark.sql.functions import struct
predict = mlflow.pyfunc.spark_udf(spark, "/my/local/model")
df.withColumn("prediction", predict(struct("name", "age"))).show()
Args:
spark: A SparkSession object.
model_uri: The location, in URI format, of the MLflow model with the
:py:mod:`mlflow.pyfunc` flavor. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
- ``mlflow-artifacts:/path/to/model``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
result_type: the return type of the user-defined function. The value can be either a
``pyspark.sql.types.DataType`` object or a DDL-formatted type string. Only a primitive
type, an array ``pyspark.sql.types.ArrayType`` of primitive type, or a struct type
containing fields of above 2 kinds of types are allowed.
If unspecified, it tries to infer result type from model signature
output schema, if model output schema is not available, it fallbacks to use ``double``
type.
The following classes of result type are supported:
- "int" or ``pyspark.sql.types.IntegerType``: The leftmost integer that can fit in an
``int32`` or an exception if there is none.
- "long" or ``pyspark.sql.types.LongType``: The leftmost long integer that can fit in an
``int64`` or an exception if there is none.
- ``ArrayType(IntegerType|LongType)``: All integer columns that can fit into the
requested size.
- "float" or ``pyspark.sql.types.FloatType``: The leftmost numeric result cast to
``float32`` or an exception if there is none.
- "double" or ``pyspark.sql.types.DoubleType``: The leftmost numeric result cast to
``double`` or an exception if there is none.
- ``ArrayType(FloatType|DoubleType)``: All numeric columns cast to the requested type or
an exception if there are no numeric columns.
- "string" or ``pyspark.sql.types.StringType``: The leftmost column converted to
``string``.
- "boolean" or "bool" or ``pyspark.sql.types.BooleanType``: The leftmost column
converted to ``bool`` or an exception if there is none.
- ``ArrayType(StringType)``: All columns converted to ``string``.
- "field1 FIELD1_TYPE, field2 FIELD2_TYPE, ...": A struct type containing multiple
fields separated by comma, each field type must be one of types listed above.
env_manager: The environment manager to use in order to create the python environment
for model inference. Note that environment is only restored in the context
of the PySpark UDF; the software environment outside of the UDF is
unaffected. If `prebuilt_env_uri` parameter is not set, the default value
is ``local``, and the following values are supported:
- ``virtualenv``: Use virtualenv to restore the python environment that
was used to train the model. This is the default option if ``env_manager``
is not set.
- ``uv`` : Use uv to restore the python environment that
was used to train the model.
- ``conda``: Use Conda to restore the software environment
that was used to train the model.
- ``local``: Use the current Python environment for model inference, which
may differ from the environment used to train the model and may lead to
errors or invalid predictions.
If the `prebuilt_env_uri` parameter is set, `env_manager` parameter should not
be set.
params: Additional parameters to pass to the model for inference.
extra_env: Extra environment variables to pass to the UDF executors.
For overrides that need to propagate to the Spark workers (i.e.,
overriding the scoring server timeout via `MLFLOW_SCORING_SERVER_REQUEST_TIMEOUT`).
prebuilt_env_uri: The path of the prebuilt env archive file created by
`mlflow.pyfunc.build_model_env` API.
This parameter can only be used in Databricks Serverless notebook REPL,
Databricks Shared cluster notebook REPL, and Databricks Connect client
environment.
The path can be either local file path or DBFS path such as
'dbfs:/Volumes/...', in this case, MLflow automatically downloads it
to local temporary directory, "MLFLOW_MODEL_ENV_DOWNLOADING_TEMP_DIR"
environmental variable can be set to specify the temporary directory
to use.
If this parameter is set, `env_manger` parameter must not be set.
model_config: The model configuration to set when loading the model.
See 'model_config' argument in `mlflow.pyfunc.load_model` API for details.
Returns:
Spark UDF that applies the model's ``predict`` method to the data and returns a
type specified by ``result_type``, which by default is a double.
"""
# Scope Spark import to this method so users don't need pyspark to use non-Spark-related
# functionality.
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import (
ArrayType,
BooleanType,
DoubleType,
FloatType,
IntegerType,
LongType,
MapType,
StringType,
)
from pyspark.sql.types import StructType as SparkStructType
from mlflow.pyfunc.spark_model_cache import SparkModelCache
from mlflow.utils._spark_utils import _SparkDirectoryDistributor
is_spark_connect = is_spark_connect_mode()
# Used in test to force install local version of mlflow when starting a model server
mlflow_home = os.environ.get("MLFLOW_HOME")
openai_env_vars = mlflow.openai.model._OpenAIEnvVar.read_environ()
mlflow_testing = _MLFLOW_TESTING.get_raw()
if prebuilt_env_uri:
if env_manager not in (None, _EnvManager.VIRTUALENV, _EnvManager.UV):
raise MlflowException(
"If 'prebuilt_env_uri' parameter is set, 'env_manager' parameter must "
"be either None, 'virtualenv', or 'uv'."
)
env_manager = _EnvManager.VIRTUALENV
else:
env_manager = env_manager or _EnvManager.LOCAL
_EnvManager.validate(env_manager)
if is_spark_connect:
is_spark_in_local_mode = False
else:
# Check whether spark is in local or local-cluster mode
# this case all executors and driver share the same filesystem
is_spark_in_local_mode = spark.conf.get("spark.master").startswith("local")
is_dbconnect_mode = is_databricks_connect(spark)
if prebuilt_env_uri is not None and not is_dbconnect_mode:
raise RuntimeError(
"'prebuilt_env' parameter can only be used in Databricks Serverless "
"notebook REPL, atabricks Shared cluster notebook REPL, and Databricks Connect client "
"environment."
)
if prebuilt_env_uri is None and is_dbconnect_mode and not is_in_databricks_runtime():
raise RuntimeError(
"'prebuilt_env_uri' param is required if using Databricks Connect to connect "
"to Databricks cluster from your own machine."
)
# Databricks connect can use `spark.addArtifact` to upload artifact to NFS.
# But for Databricks shared cluster runtime, it can directly write to NFS, so exclude it
# Note for Databricks Serverless runtime (notebook REPL), it runs on Servereless VM that
# can't access NFS, so it needs to use `spark.addArtifact`.
use_dbconnect_artifact = is_dbconnect_mode and not is_in_databricks_shared_cluster_runtime()
if use_dbconnect_artifact:
udf_sandbox_info = get_dbconnect_udf_sandbox_info(spark)
if Version(udf_sandbox_info.mlflow_version) < Version("2.18.0"):
raise MlflowException(
"Using 'mlflow.pyfunc.spark_udf' in Databricks Serverless or in remote "
"Databricks Connect requires UDF sandbox image installed with MLflow "
"of version >= 2.18.0"
)
# `udf_sandbox_info.runtime_version` format is like '<major_version>.<minor_version>'.
# It's safe to apply `Version`.
dbr_runtime_version = Version(udf_sandbox_info.runtime_version)
if dbr_runtime_version < Version("15.4"):
raise MlflowException(
"Using 'mlflow.pyfunc.spark_udf' in Databricks Serverless or in remote "
"Databricks Connect requires Databricks runtime version >= 15.4."
)
if dbr_runtime_version == Version("15.4"):
if spark.conf.get("spark.databricks.pyspark.udf.isolation.enabled").lower() == "true":
# The connected cluster is standard (shared) mode.
if (
spark.conf.get(
"spark.databricks.safespark.archive.artifact.unpack.disabled"
).lower()
!= "false"
):
raise MlflowException(
"Using 'mlflow.pyfunc.spark_udf' in remote Databricks Connect requires "
"Databricks cluster setting "
"'spark.databricks.safespark.archive.artifact.unpack.disabled' to 'false' "
"if Databricks runtime version is 15.4"
)
nfs_root_dir = get_nfs_cache_root_dir()
should_use_nfs = nfs_root_dir is not None
should_use_spark_to_broadcast_file = not (
is_spark_in_local_mode or should_use_nfs or is_spark_connect or use_dbconnect_artifact
)
# For spark connect mode,
# If client code is executed in databricks runtime and NFS is available,
# we save model to NFS temp directory in the driver
# and load the model in the executor.
should_spark_connect_use_nfs = is_in_databricks_runtime() and should_use_nfs
if (
is_spark_connect
and not is_dbconnect_mode
and env_manager in (_EnvManager.VIRTUALENV, _EnvManager.CONDA, _EnvManager.UV)
):
raise MlflowException.invalid_parameter_value(
f"Environment manager {env_manager!r} is not supported in Spark Connect "
"client environment if it connects to non-Databricks Spark cluster.",
)
local_model_path = _download_artifact_from_uri(
artifact_uri=model_uri,
output_path=_create_model_downloading_tmp_dir(should_use_nfs),
)
if prebuilt_env_uri:
prebuilt_env_uri = _download_prebuilt_env_if_needed(prebuilt_env_uri)
_verify_prebuilt_env(spark, local_model_path, prebuilt_env_uri)
if use_dbconnect_artifact and env_manager == _EnvManager.CONDA:
raise MlflowException(
"Databricks connect mode or Databricks Serverless python REPL doesn't "
"support env_manager 'conda'."
)
if env_manager == _EnvManager.LOCAL:
# Assume spark executor python environment is the same with spark driver side.
model_requirements = _get_pip_requirements_from_model_path(local_model_path)
warn_dependency_requirement_mismatches(model_requirements)
_logger.warning(
'Calling `spark_udf()` with `env_manager="local"` does not recreate the same '
"environment that was used during training, which may lead to errors or inaccurate "
'predictions. We recommend specifying `env_manager="conda"`, which automatically '
"recreates the environment that was used to train the model and performs inference "
"in the recreated environment."
)
else:
_logger.info(
f"This UDF will use {env_manager} to recreate the model's software environment for "
"inference. This may take extra time during execution."
)
if not sys.platform.startswith("linux"):
# TODO: support killing mlflow server launched in UDF task when spark job canceled
# for non-linux system.
# https://stackoverflow.com/questions/53208/how-do-i-automatically-destroy-child-processes-in-windows
_logger.warning(
"In order to run inference code in restored python environment, PySpark UDF "
"processes spawn MLflow Model servers as child processes. Due to system "
"limitations with handling SIGKILL signals, these MLflow Model server child "
"processes cannot be cleaned up if the Spark Job is canceled."
)
if prebuilt_env_uri:
env_cache_key = os.path.basename(prebuilt_env_uri)[:-7]
elif use_dbconnect_artifact:
env_cache_key = _gen_prebuilt_env_archive_name(spark, local_model_path)
else:
env_cache_key = None
if use_dbconnect_artifact or prebuilt_env_uri is not None:
prebuilt_env_root_dir = os.path.join(_PREBUILD_ENV_ROOT_LOCATION, env_cache_key)
pyfunc_backend_env_root_config = {
"create_env_root_dir": False,
"env_root_dir": prebuilt_env_root_dir,
}
else:
pyfunc_backend_env_root_config = {"create_env_root_dir": True}
pyfunc_backend = get_flavor_backend(
local_model_path,
env_manager=env_manager,
install_mlflow=os.environ.get("MLFLOW_HOME") is not None,
**pyfunc_backend_env_root_config,
)
dbconnect_artifact_cache = DBConnectArtifactCache.get_or_create(spark)
if use_dbconnect_artifact:
# Upload model artifacts and python environment to NFS as DBConnect artifacts.
if env_manager in (_EnvManager.VIRTUALENV, _EnvManager.UV):
if not dbconnect_artifact_cache.has_cache_key(env_cache_key):
if prebuilt_env_uri:
env_archive_path = prebuilt_env_uri
else:
env_archive_path = _prebuild_env_internal(
local_model_path, env_cache_key, get_or_create_tmp_dir(), env_manager
)
dbconnect_artifact_cache.add_artifact_archive(env_cache_key, env_archive_path)
if not dbconnect_artifact_cache.has_cache_key(model_uri):
model_archive_path = os.path.join(
os.path.dirname(local_model_path), f"model-{uuid.uuid4()}.tar.gz"
)
archive_directory(local_model_path, model_archive_path)
dbconnect_artifact_cache.add_artifact_archive(model_uri, model_archive_path)
elif not should_use_spark_to_broadcast_file:
if prebuilt_env_uri:
# Extract prebuilt env archive file to NFS directory.
prebuilt_env_nfs_dir = os.path.join(
get_or_create_nfs_tmp_dir(), "prebuilt_env", env_cache_key
)
if not os.path.exists(prebuilt_env_nfs_dir):
extract_archive_to_dir(prebuilt_env_uri, prebuilt_env_nfs_dir)
else:
# Prepare restored environment in driver side if possible.
# Note: In databricks runtime, because databricks notebook cell output cannot capture
# child process output, so that set capture_output to be True so that when `conda
# prepare env` command failed, the exception message will include command stdout/stderr
# output. Otherwise user have to check cluster driver log to find command stdout/stderr
# output.
# In non-databricks runtime, set capture_output to be False, because the benefit of
# "capture_output=False" is the output will be printed immediately, otherwise you have
# to wait conda command fail and suddenly get all output printed (included in error
# message).
if env_manager != _EnvManager.LOCAL:
pyfunc_backend.prepare_env(
model_uri=local_model_path, capture_output=is_in_databricks_runtime()
)
else:
# Broadcast local model directory to remote worker if needed.
archive_path = SparkModelCache.add_local_model(spark, local_model_path)
model_metadata = Model.load(os.path.join(local_model_path, MLMODEL_FILE_NAME))
if result_type is None:
if model_output_schema := model_metadata.get_output_schema():
result_type = _infer_spark_udf_return_type(model_output_schema)
else:
_logger.warning(
"No 'result_type' provided for spark_udf and the model does not "
"have an output schema. 'result_type' is set to 'double' type."
)
result_type = DoubleType()
else:
if isinstance(result_type, str):
result_type = _parse_spark_datatype(result_type)
# if result type is inferred by MLflow, we don't need to check it
if not _check_udf_return_type(result_type):
raise MlflowException.invalid_parameter_value(
f"""Invalid 'spark_udf' result type: {result_type}.
It must be one of the following types:
Primitive types:
- int
- long
- float
- double
- string
- boolean
Compound types:
- ND array of primitives / structs.
- struct<field: primitive | array<primitive> | array<array<primitive>>, ...>:
A struct with primitive, ND array<primitive/structs>,
e.g., struct<a:int, b:array<int>>.
"""
)
params = _validate_params(params, model_metadata)
def _predict_row_batch(predict_fn, args):
input_schema = model_metadata.get_input_schema()
args = list(args)
if len(args) == 1 and isinstance(args[0], pandas.DataFrame):
pdf = args[0]
else:
if input_schema is None:
names = [str(i) for i in range(len(args))]
else:
names = input_schema.input_names()
required_names = input_schema.required_input_names()
if len(args) > len(names):
args = args[: len(names)]
if len(args) < len(required_names):
raise MlflowException(
f"Model input is missing required columns. Expected {len(names)} required"
f" input columns {names}, but the model received only {len(args)} "
"unnamed input columns (Since the columns were passed unnamed they are"
" expected to be in the order specified by the schema)."
)
pdf = pandas.DataFrame(
data={
names[i]: arg
if isinstance(arg, pandas.Series)
# pandas_udf receives a StructType column as a pandas DataFrame.
# We need to convert it back to a dict of pandas Series.
else arg.apply(lambda row: row.to_dict(), axis=1)
for i, arg in enumerate(args)
},
columns=names,
)
result = predict_fn(pdf, params)
if isinstance(result, dict):
result = {k: list(v) for k, v in result.items()}
if isinstance(result_type, ArrayType) and isinstance(result_type.elementType, ArrayType):
result_values = _convert_array_values(result, result_type)
return pandas.Series(result_values)
if isinstance(result_type, SparkStructType):
if (
isinstance(result, list)
and len(result) > 0
and isinstance(result[0], pydantic.BaseModel)
):
result = pandas.DataFrame([r.model_dump() for r in result])
else:
result = pandas.DataFrame(result)
return _convert_struct_values(result, result_type)
if not isinstance(result, pandas.DataFrame):
if isinstance(result_type, MapType):
# list of dicts should be converted into a single column
result = pandas.DataFrame([result])
else:
result = (
pandas.DataFrame([result]) if np.isscalar(result) else pandas.DataFrame(result)
)
elem_type = result_type.elementType if isinstance(result_type, ArrayType) else result_type
if type(elem_type) == IntegerType:
result = result.select_dtypes(
[np.byte, np.ubyte, np.short, np.ushort, np.int32]
).astype(np.int32)
elif type(elem_type) == LongType:
result = result.select_dtypes([np.byte, np.ubyte, np.short, np.ushort, int]).astype(
np.int64
)
elif type(elem_type) == FloatType:
result = result.select_dtypes(include=(np.number,)).astype(np.float32)
elif type(elem_type) == DoubleType:
result = result.select_dtypes(include=(np.number,)).astype(np.float64)
elif type(elem_type) == BooleanType:
result = result.select_dtypes([bool, np.bool_]).astype(bool)
if len(result.columns) == 0:
raise MlflowException(
message="The model did not produce any values compatible with the requested "
f"type '{elem_type}'. Consider requesting udf with StringType or "
"Arraytype(StringType).",
error_code=INVALID_PARAMETER_VALUE,
)
if type(elem_type) == StringType:
if Version(pandas.__version__) >= Version("2.1.0"):
result = result.map(str)
else:
result = result.applymap(str)
if type(result_type) == ArrayType:
return pandas.Series(result.to_numpy().tolist())
else:
return result[result.columns[0]]
result_type_hint = (
pandas.DataFrame if isinstance(result_type, SparkStructType) else pandas.Series
)
tracking_uri = mlflow.get_tracking_uri()
enforce_stdin_scoring_server = MLFLOW_ENFORCE_STDIN_SCORING_SERVER_FOR_SPARK_UDF.get()
@pandas_udf(result_type)
def udf(
# `pandas_udf` does not support modern type annotations
iterator: Iterator[Tuple[Union[pandas.Series, pandas.DataFrame], ...]], # noqa: UP006,UP007
) -> Iterator[result_type_hint]:
# importing here to prevent circular import
from mlflow.pyfunc.scoring_server.client import (
ScoringServerClient,
StdinScoringServerClient,
)
# Note: this is a pandas udf function in iteration style, which takes an iterator of
# tuple of pandas.Series and outputs an iterator of pandas.Series.
update_envs = {}
if mlflow_home is not None:
update_envs["MLFLOW_HOME"] = mlflow_home
if openai_env_vars:
update_envs.update(openai_env_vars)
if mlflow_testing:
update_envs[_MLFLOW_TESTING.name] = mlflow_testing
if extra_env:
update_envs.update(extra_env)
# use `modified_environ` to temporarily set the envs and restore them finally
with modified_environ(update=update_envs):
scoring_server_proc = None
# set tracking_uri inside udf so that with spark_connect
# we can load the model from correct path
mlflow.set_tracking_uri(tracking_uri)
if env_manager != _EnvManager.LOCAL:
if use_dbconnect_artifact:
local_model_path_on_executor = (
dbconnect_artifact_cache.get_unpacked_artifact_dir(model_uri)
)
env_src_dir = dbconnect_artifact_cache.get_unpacked_artifact_dir(env_cache_key)
# Create symlink if it does not exist
if not os.path.exists(prebuilt_env_root_dir):
os.symlink(env_src_dir, prebuilt_env_root_dir)
elif prebuilt_env_uri is not None:
# prebuilt env is extracted to `prebuilt_env_nfs_dir` directory,
# and model is downloaded to `local_model_path` which points to an NFS
# directory too.
local_model_path_on_executor = None
# Create symlink if it does not exist
if not os.path.exists(prebuilt_env_root_dir):
os.symlink(prebuilt_env_nfs_dir, prebuilt_env_root_dir)
elif should_use_spark_to_broadcast_file:
local_model_path_on_executor = _SparkDirectoryDistributor.get_or_extract(
archive_path
)
# Call "prepare_env" in advance in order to reduce scoring server launch time.
# So that we can use a shorter timeout when call `client.wait_server_ready`,
# otherwise we have to set a long timeout for `client.wait_server_ready` time,
# this prevents spark UDF task failing fast if other exception raised
# when scoring server launching.
# Set "capture_output" so that if "conda env create" command failed, the command
# stdout/stderr output will be attached to the exception message and included in
# driver side exception.
pyfunc_backend.prepare_env(
model_uri=local_model_path_on_executor, capture_output=True
)
else:
local_model_path_on_executor = None
if not enforce_stdin_scoring_server and check_port_connectivity():
# launch scoring server
server_port = find_free_port()
host = "127.0.0.1"
scoring_server_proc = pyfunc_backend.serve(
model_uri=local_model_path_on_executor or local_model_path,
port=server_port,
host=host,
timeout=MLFLOW_SCORING_SERVER_REQUEST_TIMEOUT.get(),
enable_mlserver=False,
synchronous=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
model_config=model_config,
)
client = ScoringServerClient(host, server_port)
else:
scoring_server_proc = pyfunc_backend.serve_stdin(
model_uri=local_model_path_on_executor or local_model_path,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
model_config=model_config,
)
client = StdinScoringServerClient(scoring_server_proc)
_logger.info("Using %s", client.__class__.__name__)
server_tail_logs = collections.deque(
maxlen=_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP
)
def server_redirect_log_thread_func(child_stdout):
for line in child_stdout:
decoded = line.decode() if isinstance(line, bytes) else line
server_tail_logs.append(decoded)
sys.stdout.write("[model server] " + decoded)
server_redirect_log_thread = threading.Thread(
target=server_redirect_log_thread_func,
args=(scoring_server_proc.stdout,),
daemon=True,
name=f"mlflow_pyfunc_model_server_log_redirector_{uuid.uuid4().hex[:8]}",
)
server_redirect_log_thread.start()
try:
client.wait_server_ready(timeout=90, scoring_server_proc=scoring_server_proc)
except Exception as e:
err_msg = (
"During spark UDF task execution, mlflow model server failed to launch. "
)
if len(server_tail_logs) == _MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP:
err_msg += (
f"Last {_MLFLOW_SERVER_OUTPUT_TAIL_LINES_TO_KEEP} "
"lines of MLflow model server output:\n"
)
else:
err_msg += "MLflow model server output:\n"
err_msg += "".join(server_tail_logs)
raise MlflowException(err_msg) from e
def batch_predict_fn(pdf, params=None):
if "params" in inspect.signature(client.invoke).parameters:
return client.invoke(pdf, params=params).get_predictions()
_log_warning_if_params_not_in_predict_signature(_logger, params)
return client.invoke(pdf).get_predictions()
elif env_manager == _EnvManager.LOCAL:
if use_dbconnect_artifact:
model_path = dbconnect_artifact_cache.get_unpacked_artifact_dir(model_uri)
loaded_model = mlflow.pyfunc.load_model(model_path, model_config=model_config)
elif is_spark_connect and not should_spark_connect_use_nfs:
model_path = os.path.join(
tempfile.gettempdir(),
"mlflow",
hashlib.sha1(model_uri.encode(), usedforsecurity=False).hexdigest(),
# Use pid to avoid conflict when multiple spark UDF tasks
str(os.getpid()),
)
try:
loaded_model = mlflow.pyfunc.load_model(
model_path, model_config=model_config
)
except Exception:
os.makedirs(model_path, exist_ok=True)
loaded_model = mlflow.pyfunc.load_model(
model_uri, dst_path=model_path, model_config=model_config
)
elif should_use_spark_to_broadcast_file:
loaded_model, _ = SparkModelCache.get_or_load(archive_path)
else:
loaded_model = mlflow.pyfunc.load_model(
local_model_path, model_config=model_config
)
def batch_predict_fn(pdf, params=None):
if "params" in inspect.signature(loaded_model.predict).parameters:
return loaded_model.predict(pdf, params=params)
_log_warning_if_params_not_in_predict_signature(_logger, params)
return loaded_model.predict(pdf)
try:
for input_batch in iterator:
# If the UDF is called with only multiple arguments,
# the `input_batch` is a tuple which composes of several pd.Series/pd.DataFrame
# objects.
# If the UDF is called with only one argument,
# the `input_batch` instance will be an instance of `pd.Series`/`pd.DataFrame`,
if isinstance(input_batch, (pandas.Series, pandas.DataFrame)):
# UDF is called with only one argument
row_batch_args = (input_batch,)
else:
row_batch_args = input_batch
if len(row_batch_args[0]) > 0:
yield _predict_row_batch(batch_predict_fn, row_batch_args)
except SystemError as e:
if "error return without exception set" in str(e):
raise MlflowException(
"A system error related to the Python C extension has occurred. "
"This is usually caused by an incompatible Python library that uses the "
"C extension. To address this, we recommend you to log the model "
"with fixed version python libraries that use the C extension "
"(such as 'numpy' library), and set spark_udf `env_manager` argument "
"to 'virtualenv' or 'uv' so that spark_udf can restore the original "
"python library version before running model inference."
) from e
finally:
if scoring_server_proc is not None:
os.kill(scoring_server_proc.pid, signal.SIGTERM)
udf.metadata = model_metadata
@functools.wraps(udf)
def udf_with_default_cols(*args):
if len(args) == 0:
input_schema = model_metadata.get_input_schema()
if input_schema and len(input_schema.optional_input_names()) > 0:
raise MlflowException(
message="Cannot apply UDF without column names specified when"
" model signature contains optional columns.",
error_code=INVALID_PARAMETER_VALUE,
)
if input_schema and len(input_schema.inputs) > 0:
if input_schema.has_input_names():
input_names = input_schema.input_names()
return udf(*input_names)
else:
raise MlflowException(
message="Cannot apply udf because no column names specified. The udf "
f"expects {len(input_schema.inputs)} columns with types: "
"{input_schema.inputs}. Input column names could not be inferred from the"
" model signature (column names not found).",
error_code=INVALID_PARAMETER_VALUE,
)
else:
raise MlflowException(
"Attempting to apply udf on zero columns because no column names were "
"specified as arguments or inferred from the model signature.",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return udf(*args)
return udf_with_default_cols
def _validate_function_python_model(python_model):
if not (isinstance(python_model, PythonModel) or callable(python_model)):
raise MlflowException(
"`python_model` must be a PythonModel instance, callable object, or path to a script "
"that uses set_model() to set a PythonModel instance or callable object.",
error_code=INVALID_PARAMETER_VALUE,
)
if callable(python_model):
num_args = len(inspect.signature(python_model).parameters)
if num_args != 1:
raise MlflowException(
"When `python_model` is a callable object, it must accept exactly one argument. "
f"Found {num_args} arguments.",
error_code=INVALID_PARAMETER_VALUE,
)
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
@trace_disabled # Suppress traces for internal predict calls while saving model
def save_model(
path,
loader_module=None,
data_path=None,
code_paths=None,
infer_code_paths=False,
conda_env=None,
mlflow_model=None,
python_model=None,
artifacts=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
pip_requirements=None,
extra_pip_requirements=None,
metadata=None,
model_config=None,
streamable=None,
resources: str | list[Resource] | None = None,
auth_policy: AuthPolicy | None = None,
**kwargs,
):
"""
Save a Pyfunc model with custom inference logic and optional data dependencies to a path on the
local filesystem.
For information about the workflows that this method supports, please see :ref:`"workflows for
creating custom pyfunc models" <pyfunc-create-custom-workflows>` and
:ref:`"which workflow is right for my use case?" <pyfunc-create-custom-selecting-workflow>`.
Note that the parameters for the second workflow: ``loader_module``, ``data_path`` and the
parameters for the first workflow: ``python_model``, ``artifacts``, cannot be
specified together.
Args:
path: The path to which to save the Python model.
loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_paths`` parameter.
data_path: Path to a file or directory containing model data.
code_paths: {{ code_paths_pyfunc }}
infer_code_paths: {{ infer_code_paths }}
conda_env: {{ conda_env }}
mlflow_model: :py:mod:`mlflow.models.Model` configuration to which to add the
**python_function** flavor.
python_model:
An instance of a subclass of :class:`~PythonModel` or a callable object with a single
argument (see the examples below). The passed-in object is serialized using the
CloudPickle library. The python_model can also be a file path to the PythonModel
which defines the model from code artifact rather than serializing the model object.
Any dependencies of the class should be included in one of the
following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by the ``conda_env``
parameter.
- One or more of the files specified by the ``code_paths`` parameter.
Note: If the class is imported from another module, as opposed to being defined in the
``__main__`` scope, the defining module should also be included in one of the listed
locations.
**Examples**
Class model
.. code-block:: python
from typing import List, Dict
import mlflow
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input: List[str], params=None) -> List[str]:
return [i.upper() for i in model_input]
mlflow.pyfunc.save_model("model", python_model=MyModel(), input_example=["a"])
model = mlflow.pyfunc.load_model("model")
print(model.predict(["a", "b", "c"])) # -> ["A", "B", "C"]
Functional model
.. note::
Experimental: Functional model support is experimental and may change or be removed
in a future release without warning.
.. code-block:: python
from typing import List
import mlflow
def predict(model_input: List[str]) -> List[str]:
return [i.upper() for i in model_input]
mlflow.pyfunc.save_model("model", python_model=predict, input_example=["a"])
model = mlflow.pyfunc.load_model("model")
print(model.predict(["a", "b", "c"])) # -> ["A", "B", "C"]
Model from code
.. note::
Experimental: Model from code model support is experimental and may change or
be removed in a future release without warning.
.. code-block:: python
# code.py
from typing import List
import mlflow
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input: List[str], params=None) -> List[str]:
return [i.upper() for i in model_input]
mlflow.models.set_model(MyModel())
# log_model.py
import mlflow
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model="code.py",
)
If the `predict` method or function has type annotations, MLflow automatically
constructs a model signature based on the type annotations (unless the ``signature``
argument is explicitly specified), and converts the input value to the specified type
before passing it to the function. Currently, the following type annotations are
supported:
- ``List[str]``
- ``List[Dict[str, str]]``
artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{"my_file": "s3://my-bucket/path/to/my/file"}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
input_example: {{ input_example }}
pip_requirements: {{ pip_requirements }}
extra_pip_requirements: {{ extra_pip_requirements }}
metadata: {{ metadata }}
model_config: The model configuration to apply to the model. The configuration will
be available as the ``model_config`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
The configuration can be passed as a file path, or a dict with string keys.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
streamable: A boolean value indicating if the model supports streaming prediction,
If None, MLflow will try to inspect if the model supports streaming
by checking if `predict_stream` method exists. Default None.
resources: A list of model resources or a resources.yaml file containing a list of
resources required to serve the model.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
auth_policy: {{ auth_policy }}
kwargs: Extra keyword arguments.
"""
_validate_env_arguments(conda_env, pip_requirements, extra_pip_requirements)
_validate_pyfunc_model_config(model_config)
_validate_and_prepare_target_save_path(path)
with tempfile.TemporaryDirectory() as temp_dir:
model_code_path = None
if python_model:
if isinstance(model_config, Path):
model_config = os.fspath(model_config)
if isinstance(model_config, str):
model_config = _validate_and_get_model_config_from_file(model_config)
if isinstance(python_model, Path):
python_model = os.fspath(python_model)
if isinstance(python_model, str):
model_code_path = _validate_and_get_model_code_path(python_model, temp_dir)
_validate_and_copy_file_to_directory(model_code_path, path, "code")
python_model = _load_model_code_path(model_code_path, model_config)
_validate_function_python_model(python_model)
if callable(python_model) and all(
a is None for a in (input_example, pip_requirements, extra_pip_requirements)
):
raise MlflowException(
"If `python_model` is a callable object, at least one of `input_example`, "
"`pip_requirements`, or `extra_pip_requirements` must be specified."
)
mlflow_model = kwargs.pop("model", mlflow_model)
if len(kwargs) > 0:
raise TypeError(f"save_model() got unexpected keyword arguments: {kwargs}")
if code_paths is not None:
if not isinstance(code_paths, list):
raise TypeError(f"Argument code_paths should be a list, not {type(code_paths)}")
first_argument_set = {
"loader_module": loader_module,
"data_path": data_path,
}
second_argument_set = {
"artifacts": artifacts,
"python_model": python_model,
}
first_argument_set_specified = any(item is not None for item in first_argument_set.values())
second_argument_set_specified = any(item is not None for item in second_argument_set.values())
if first_argument_set_specified and second_argument_set_specified:
raise MlflowException(
message=(
f"The following sets of parameters cannot be specified together:"
f" {first_argument_set.keys()} and {second_argument_set.keys()}."
" All parameters in one set must be `None`. Instead, found"
f" the following values: {first_argument_set} and {second_argument_set}"
),
error_code=INVALID_PARAMETER_VALUE,
)
elif (loader_module is None) and (python_model is None):
msg = (
"Either `loader_module` or `python_model` must be specified. A `loader_module` "
"should be a python module. A `python_model` should be a subclass of PythonModel"
)
raise MlflowException(message=msg, error_code=INVALID_PARAMETER_VALUE)
if mlflow_model is None:
mlflow_model = Model()
saved_example = None
signature_from_type_hints = None
type_hint_from_example = None
if isinstance(python_model, ChatModel):
if signature is not None:
raise MlflowException(
"ChatModel subclasses have a standard signature that is set "
"automatically. Please remove the `signature` parameter from "
"the call to log_model() or save_model().",
error_code=INVALID_PARAMETER_VALUE,
)
mlflow_model.signature = ModelSignature(
CHAT_MODEL_INPUT_SCHEMA,
CHAT_MODEL_OUTPUT_SCHEMA,
)
# For ChatModel we set default metadata to indicate its task
default_metadata = {TASK: _DEFAULT_CHAT_MODEL_METADATA_TASK}
mlflow_model.metadata = default_metadata | (mlflow_model.metadata or {})
if input_example:
input_example, input_params = _split_input_data_and_params(input_example)
valid_params = {}
if isinstance(input_example, list):
messages = [
message if isinstance(message, ChatMessage) else ChatMessage.from_dict(message)
for message in input_example
]
else:
# If the input example is a dictionary, convert it to ChatMessage format
messages = [
ChatMessage.from_dict(m) if isinstance(m, dict) else m
for m in input_example["messages"]
]
valid_params = {
k: v
for k, v in input_example.items()
if k != "messages" and k in ChatParams.keys()
}
if valid_params or input_params:
_logger.warning(_CHAT_PARAMS_WARNING_MESSAGE)
input_example = {
"messages": [m.to_dict() for m in messages],
**valid_params,
**(input_params or {}),
}
else:
input_example = CHAT_MODEL_INPUT_EXAMPLE
_logger.warning(_CHAT_PARAMS_WARNING_MESSAGE)
messages = [ChatMessage.from_dict(m) for m in input_example["messages"]]
# extra params introduced by ChatParams will not be included in the
# logged input example file to avoid confusion
_save_example(mlflow_model, input_example, path)
params = ChatParams.from_dict(input_example)
# call load_context() first, as predict may depend on it
_logger.info("Predicting on input example to validate output")
context = PythonModelContext(artifacts, model_config)
python_model.load_context(context)
if "context" in inspect.signature(python_model.predict).parameters:
output = python_model.predict(context, messages, params)
else:
output = python_model.predict(messages, params)
if not isinstance(output, ChatCompletionResponse):
raise MlflowException(
"Failed to save ChatModel. Please ensure that the model's predict() method "
"returns a ChatCompletionResponse object. If your predict() method currently "
"returns a dict, you can instantiate a ChatCompletionResponse using "
"`from_dict()`, e.g. `ChatCompletionResponse.from_dict(output)`",
)
elif isinstance(python_model, ChatAgent):
input_example = _save_model_chat_agent_helper(
python_model, mlflow_model, signature, input_example
)
elif IS_RESPONSES_AGENT_AVAILABLE and isinstance(python_model, ResponsesAgent):
input_example = _save_model_responses_agent_helper(
python_model, mlflow_model, signature, input_example, artifacts, model_config
)
elif callable(python_model) or isinstance(python_model, PythonModel):
model_for_signature_inference = None
if callable(python_model):
# first argument is the model input
type_hints = _extract_type_hints(python_model, input_arg_index=0)
pyfunc_decorator_used = getattr(python_model, "_is_pyfunc", False)
# only show the warning here if @pyfunc is not applied on the function
# since @pyfunc will trigger the warning instead
if type_hints.input is None and not pyfunc_decorator_used:
color_warning(
"Add type hints to the `predict` method to enable "
"data validation and automatic signature inference. Check "
"https://mlflow.org/docs/latest/model/python_model.html#type-hint-usage-in-pythonmodel"
" for more details.",
stacklevel=1,
color="yellow",
)
model_for_signature_inference = _FunctionPythonModel(python_model)
elif isinstance(python_model, PythonModel):
type_hints = python_model.predict_type_hints
model_for_signature_inference = python_model
context = PythonModelContext(artifacts, model_config)
type_hint_from_example = _is_type_hint_from_example(type_hints.input)
if type_hint_from_example:
should_infer_signature_from_type_hints = False
else:
should_infer_signature_from_type_hints = (
not _signature_cannot_be_inferred_from_type_hint(type_hints.input)
)
if should_infer_signature_from_type_hints:
# context is only loaded when input_example exists
signature_from_type_hints = _infer_signature_from_type_hints(
python_model=python_model,
context=context,
type_hints=type_hints,
input_example=input_example,
)
# only infer signature based on input example when signature
# and type hints are not provided
if signature is None and signature_from_type_hints is None:
saved_example = _save_example(mlflow_model, input_example, path)
if saved_example is not None:
_logger.info("Inferring model signature from input example")
try:
model_for_signature_inference.load_context(context)
mlflow_model.signature = _infer_signature_from_input_example(
saved_example,
_PythonModelPyfuncWrapper(model_for_signature_inference, context, None),
)
except Exception as e:
_logger.warning(
f"Failed to infer model signature from input example, error: {e}",
)
else:
if type_hint_from_example and mlflow_model.signature:
update_signature_for_type_hint_from_example(
input_example, mlflow_model.signature
)
else:
if type_hint_from_example:
_logger.warning(
_TYPE_FROM_EXAMPLE_ERROR_MESSAGE,
extra={"color": "red"},
)
# if signature is inferred from type hints, warnings are emitted
# in _infer_signature_from_type_hints
elif not should_infer_signature_from_type_hints:
_logger.warning(
"Failed to infer model signature: "
f"Type hint {type_hints} cannot be used to infer model signature and "
"input example is not provided, model signature cannot be inferred."
)
if metadata is not None:
mlflow_model.metadata = metadata
if saved_example is None:
saved_example = _save_example(mlflow_model, input_example, path)
if signature_from_type_hints:
if signature and signature_from_type_hints != signature:
# TODO: drop this support and raise exception in the next minor release since this
# is a behavior change
_logger.warning(
"Provided signature does not match the signature inferred from the Python model's "
"`predict` function type hint. Signature inferred from type hint will be used:\n"
f"{signature_from_type_hints}\nRemove the `signature` parameter or ensure it "
"matches the inferred signature. In a future release, this warning will become an "
"exception, and the signature must align with the type hint.",
extra={"color": "red"},
)
mlflow_model.signature = signature_from_type_hints
elif signature:
mlflow_model.signature = signature
if type_hint_from_example:
if saved_example is None:
_logger.warning(
_TYPE_FROM_EXAMPLE_ERROR_MESSAGE,
extra={"color": "red"},
)
else:
# TODO: validate input example against signature
update_signature_for_type_hint_from_example(input_example, mlflow_model.signature)
else:
if saved_example is None:
color_warning(
message="An input example was not provided when logging the model. To ensure "
"the model signature functions correctly, specify the `input_example` "
"parameter. See "
"https://mlflow.org/docs/latest/model/signatures.html#model-input-example "
"for more details about the benefits of using input_example.",
stacklevel=1,
color="yellow_bold",
)
else:
_logger.info("Validating input example against model signature")
try:
_validate_prediction_input(
data=saved_example.inference_data,
params=saved_example.inference_params,
input_schema=signature.inputs,
params_schema=signature.params,
)
except Exception as e:
raise MlflowException.invalid_parameter_value(
f"Input example does not match the model signature. {e}"
)
with _get_dependencies_schemas() as dependencies_schemas:
schema = dependencies_schemas.to_dict()
if schema is not None:
if mlflow_model.metadata is None:
mlflow_model.metadata = {}
mlflow_model.metadata.update(schema)
if resources is not None:
if isinstance(resources, (Path, str)):
serialized_resource = _ResourceBuilder.from_yaml_file(resources)
else:
serialized_resource = _ResourceBuilder.from_resources(resources)
mlflow_model.resources = serialized_resource
if auth_policy is not None:
mlflow_model.auth_policy = auth_policy
if first_argument_set_specified:
return _save_model_with_loader_module_and_data_path(
path=path,
loader_module=loader_module,
data_path=data_path,
code_paths=code_paths,
conda_env=conda_env,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
model_config=model_config,
streamable=streamable,
infer_code_paths=infer_code_paths,
)
elif second_argument_set_specified:
return mlflow.pyfunc.model._save_model_with_class_artifacts_params(
path=path,
signature=signature,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
code_paths=code_paths,
mlflow_model=mlflow_model,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
model_config=model_config,
streamable=streamable,
model_code_path=model_code_path,
infer_code_paths=infer_code_paths,
)
def update_signature_for_type_hint_from_example(input_example: Any, signature: ModelSignature):
if _is_example_valid_for_type_from_example(input_example):
signature._is_type_hint_from_example = True
else:
_logger.warning(
"Input example must be one of pandas.DataFrame, pandas.Series "
f"or list when using TypeFromExample as type hint, got {type(input_example)}. "
"Check https://mlflow.org/docs/latest/model/python_model.html#typefromexample-type-hint-usage"
" for more details.",
)
@format_docstring(LOG_MODEL_PARAM_DOCS.format(package_name="scikit-learn"))
@trace_disabled # Suppress traces for internal predict calls while logging model
def log_model(
artifact_path=None,
loader_module=None,
data_path=None,
code_paths=None,
infer_code_paths=False,
conda_env=None,
python_model=None,
artifacts=None,
registered_model_name=None,
signature: ModelSignature = None,
input_example: ModelInputExample = None,
await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
pip_requirements=None,
extra_pip_requirements=None,
metadata=None,
model_config=None,
streamable=None,
resources: str | list[Resource] | None = None,
auth_policy: AuthPolicy | None = None,
prompts: list[str | Prompt] | None = None,
name=None,
params: dict[str, Any] | None = None,
tags: dict[str, Any] | None = None,
model_type: str | None = None,
step: int = 0,
model_id: str | None = None,
):
"""
Log a Pyfunc model with custom inference logic and optional data dependencies as an MLflow
artifact for the current run.
For information about the workflows that this method supports, see :ref:`Workflows for
creating custom pyfunc models <pyfunc-create-custom-workflows>` and
:ref:`Which workflow is right for my use case? <pyfunc-create-custom-selecting-workflow>`.
You cannot specify the parameters for the second workflow: ``loader_module``, ``data_path``
and the parameters for the first workflow: ``python_model``, ``artifacts`` together.
Args:
artifact_path: Deprecated. Use `name` instead.
loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``. If not ``None``, this module and its
dependencies must be included in one of the following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by
the ``conda_env`` parameter.
- One or more of the files specified by the ``code_paths`` parameter.
data_path: Path to a file or directory containing model data.
code_paths: {{ code_paths_pyfunc }}
infer_code_paths: {{ infer_code_paths }}
conda_env: {{ conda_env }}
python_model:
An instance of a subclass of :class:`~PythonModel` or a callable object with a single
argument (see the examples below). The passed-in object is serialized using the
CloudPickle library. The python_model can also be a file path to the PythonModel
which defines the model from code artifact rather than serializing the model object.
Any dependencies of the class should be included in one of the
following locations:
- The MLflow library.
- Package(s) listed in the model's Conda environment, specified by the ``conda_env``
parameter.
- One or more of the files specified by the ``code_paths`` parameter.
Note: If the class is imported from another module, as opposed to being defined in the
``__main__`` scope, the defining module should also be included in one of the listed
locations.
**Examples**
Class model
.. code-block:: python
from typing import List
import mlflow
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input: List[str], params=None) -> List[str]:
return [i.upper() for i in model_input]
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model=MyModel(),
)
loaded_model = mlflow.pyfunc.load_model(model_uri=model_info.model_uri)
print(loaded_model.predict(["a", "b", "c"])) # -> ["A", "B", "C"]
Functional model
.. note::
Experimental: Functional model support is experimental and may change or be removed
in a future release without warning.
.. code-block:: python
from typing import List
import mlflow
def predict(model_input: List[str]) -> List[str]:
return [i.upper() for i in model_input]
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model", python_model=predict, input_example=["a"]
)
loaded_model = mlflow.pyfunc.load_model(model_uri=model_info.model_uri)
print(loaded_model.predict(["a", "b", "c"])) # -> ["A", "B", "C"]
Model from code
.. note::
Experimental: Model from code model support is experimental and may change or
be removed in a future release without warning.
.. code-block:: python
# code.py
from typing import List
import mlflow
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input: List[str], params=None) -> List[str]:
return [i.upper() for i in model_input]
mlflow.models.set_model(MyModel())
# log_model.py
import mlflow
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model",
python_model="code.py",
)
If the `predict` method or function has type annotations, MLflow automatically
constructs a model signature based on the type annotations (unless the ``signature``
argument is explicitly specified), and converts the input value to the specified type
before passing it to the function. Currently, the following type annotations are
supported:
- ``List[str]``
- ``List[Dict[str, str]]``
artifacts: A dictionary containing ``<name, artifact_uri>`` entries. Remote artifact URIs
are resolved to absolute filesystem paths, producing a dictionary of
``<name, absolute_path>`` entries. ``python_model`` can reference these
resolved entries as the ``artifacts`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
For example, consider the following ``artifacts`` dictionary::
{"my_file": "s3://my-bucket/path/to/my/file"}
In this case, the ``"my_file"`` artifact is downloaded from S3. The
``python_model`` can then refer to ``"my_file"`` as an absolute filesystem
path via ``context.artifacts["my_file"]``.
If ``None``, no artifacts are added to the model.
registered_model_name: If given, create a model
version under ``registered_model_name``, also creating a
registered model if one with the given name does not exist.
signature: :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
input_example: {{ input_example }}
await_registration_for: Number of seconds to wait for the model version to finish
being created and is in ``READY`` status. By default, the function
waits for five minutes. Specify 0 or None to skip waiting.
pip_requirements: {{ pip_requirements }}
extra_pip_requirements: {{ extra_pip_requirements }}
metadata: {{ metadata }}
model_config: The model configuration to apply to the model. The configuration will
be available as the ``model_config`` property of the ``context`` parameter
in :func:`PythonModel.load_context() <mlflow.pyfunc.PythonModel.load_context>`
and :func:`PythonModel.predict() <mlflow.pyfunc.PythonModel.predict>`.
The configuration can be passed as a file path, or a dict with string keys.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
streamable: A boolean value indicating if the model supports streaming prediction,
If None, MLflow will try to inspect if the model supports streaming
by checking if `predict_stream` method exists. Default None.
resources: A list of model resources or a resources.yaml file containing a list of
resources required to serve the model.
.. Note:: Experimental: This parameter may change or be removed in a future
release without warning.
auth_policy: {{ auth_policy }}
prompts: {{ prompts }}
name: {{ name }}
params: {{ params }}
tags: {{ tags }}
model_type: {{ model_type }}
step: {{ step }}
model_id: {{ model_id }}
Returns:
A :py:class:`ModelInfo <mlflow.models.model.ModelInfo>` instance that contains the
metadata of the logged model.
"""
flavor_name = _get_pyfunc_model_flavor_name(python_model)
return Model.log(
artifact_path=artifact_path,
name=name,
flavor=mlflow.pyfunc,
loader_module=loader_module,
data_path=data_path,
code_paths=code_paths,
python_model=python_model,
artifacts=artifacts,
conda_env=conda_env,
registered_model_name=registered_model_name,
signature=signature,
input_example=input_example,
await_registration_for=await_registration_for,
pip_requirements=pip_requirements,
extra_pip_requirements=extra_pip_requirements,
metadata=metadata,
prompts=prompts,
model_config=model_config,
streamable=streamable,
resources=resources,
infer_code_paths=infer_code_paths,
auth_policy=auth_policy,
params=params,
tags=tags,
model_type=model_type,
step=step,
model_id=model_id,
# only used for checking python model type
flavor_name=flavor_name,
)
def _get_pyfunc_model_flavor_name(python_model: Any) -> str:
if python_model is None:
return "pyfunc"
if isinstance(python_model, str):
return "pyfunc.ModelFromCode"
if IS_RESPONSES_AGENT_AVAILABLE and isinstance(python_model, ResponsesAgent):
return "pyfunc.ResponsesAgent"
if isinstance(python_model, ChatAgent):
return "pyfunc.ChatAgent"
if isinstance(python_model, ChatModel):
return "pyfunc.ChatModel"
if isinstance(python_model, PythonModel):
return "pyfunc.CustomPythonModel"
return "pyfunc"
def _save_model_with_loader_module_and_data_path(
path,
loader_module,
data_path=None,
code_paths=None,
conda_env=None,
mlflow_model=None,
pip_requirements=None,
extra_pip_requirements=None,
model_config=None,
streamable=None,
infer_code_paths=False,
):
"""
Export model as a generic Python function model.
Args:
path: The path to which to save the Python model.
loader_module: The name of the Python module that is used to load the model
from ``data_path``. This module must define a method with the prototype
``_load_pyfunc(data_path)``.
data_path: Path to a file or directory containing model data.
code_paths: A list of local filesystem paths to Python file dependencies (or directories
containing file dependencies). These files are *prepended* to the system
path before the model is loaded.
conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this describes the environment
this model should be run in.
streamable: A boolean value indicating if the model supports streaming prediction,
None value also means not streamable.
Returns:
Model configuration containing model info.
"""
data = None
if data_path is not None:
model_file = _copy_file_or_tree(src=data_path, dst=path, dst_dir="data")
data = model_file
if mlflow_model is None:
mlflow_model = Model()
streamable = streamable or False
mlflow.pyfunc.add_to_model(
mlflow_model,
loader_module=loader_module,
code=None,
data=data,
conda_env=_CONDA_ENV_FILE_NAME,
python_env=_PYTHON_ENV_FILE_NAME,
model_config=model_config,
streamable=streamable,
)
if size := get_total_file_size(path):
mlflow_model.model_size_bytes = size
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
code_dir_subpath = _validate_infer_and_copy_code_paths(
code_paths, path, infer_code_paths, FLAVOR_NAME
)
mlflow_model.flavors[FLAVOR_NAME][CODE] = code_dir_subpath
# `mlflow_model.code` is updated, re-generate `MLmodel` file.
mlflow_model.save(os.path.join(path, MLMODEL_FILE_NAME))
if conda_env is None:
if pip_requirements is None:
default_reqs = get_default_pip_requirements()
extra_env_vars = (
_get_databricks_serverless_env_vars()
if is_in_databricks_serverless_runtime()
else None
)
# To ensure `_load_pyfunc` can successfully load the model during the dependency
# inference, `mlflow_model.save` must be called beforehand to save an MLmodel file.
inferred_reqs = mlflow.models.infer_pip_requirements(
path,
FLAVOR_NAME,
fallback=default_reqs,
extra_env_vars=extra_env_vars,
)
default_reqs = sorted(set(inferred_reqs).union(default_reqs))
else:
default_reqs = None
conda_env, pip_requirements, pip_constraints = _process_pip_requirements(
default_reqs,
pip_requirements,
extra_pip_requirements,
)
else:
conda_env, pip_requirements, pip_constraints = _process_conda_env(conda_env)
with open(os.path.join(path, _CONDA_ENV_FILE_NAME), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# Save `constraints.txt` if necessary
if pip_constraints:
write_to(os.path.join(path, _CONSTRAINTS_FILE_NAME), "\n".join(pip_constraints))
# Save `requirements.txt`
write_to(os.path.join(path, _REQUIREMENTS_FILE_NAME), "\n".join(pip_requirements))
_PythonEnv.current().to_yaml(os.path.join(path, _PYTHON_ENV_FILE_NAME))
return mlflow_model
def _save_model_chat_agent_helper(python_model, mlflow_model, signature, input_example):
"""Helper method for save_model for ChatAgent models
Returns: a dict input_example
"""
if signature is not None:
raise MlflowException(
"ChatAgent subclasses have a standard signature that is set "
"automatically. Please remove the `signature` parameter from "
"the call to log_model() or save_model().",
error_code=INVALID_PARAMETER_VALUE,
)
mlflow_model.signature = ModelSignature(
inputs=CHAT_AGENT_INPUT_SCHEMA,
outputs=CHAT_AGENT_OUTPUT_SCHEMA,
)
# For ChatAgent we set default metadata to indicate its task
default_metadata = {TASK: _DEFAULT_CHAT_AGENT_METADATA_TASK}
mlflow_model.metadata = default_metadata | (mlflow_model.metadata or {})
# We accept a dict with ChatAgentRequest schema
if input_example:
try:
model_validate(ChatAgentRequest, input_example)
except pydantic.ValidationError as e:
raise MlflowException(
message=(
f"Invalid input example. Expected a ChatAgentRequest object or dictionary with"
f" its schema. Pydantic validation error: {e}"
),
error_code=INTERNAL_ERROR,
) from e
if isinstance(input_example, ChatAgentRequest):
input_example = input_example.model_dump(exclude_none=True)
else:
input_example = CHAT_AGENT_INPUT_EXAMPLE
_logger.info("Predicting on input example to validate output")
request = ChatAgentRequest(**input_example)
output = python_model.predict(request.messages, request.context, request.custom_inputs)
try:
model_validate(ChatAgentResponse, output)
except Exception as e:
raise MlflowException(
"Failed to save ChatAgent. Ensure your model's predict() method returns a "
"ChatAgentResponse object or a dict with the same schema."
f"Pydantic validation error: {e}"
) from e
return input_example
def _save_model_responses_agent_helper(
python_model, mlflow_model, signature, input_example, artifacts, model_config
):
"""Helper method for save_model for ResponsesAgent models
Returns: a dictionary input example
"""
from mlflow.types.responses import (
RESPONSES_AGENT_INPUT_EXAMPLE,
RESPONSES_AGENT_INPUT_SCHEMA,
RESPONSES_AGENT_OUTPUT_SCHEMA,
ResponsesAgentRequest,
ResponsesAgentResponse,
)
if signature is not None:
raise MlflowException(
"ResponsesAgent subclasses have a standard signature that is set "
"automatically. Please remove the `signature` parameter from "
"the call to log_model() or save_model().",
error_code=INVALID_PARAMETER_VALUE,
)
mlflow_model.signature = ModelSignature(
inputs=RESPONSES_AGENT_INPUT_SCHEMA,
outputs=RESPONSES_AGENT_OUTPUT_SCHEMA,
)
# For ResponsesAgent we set default metadata to indicate its task
default_metadata = {TASK: _DEFAULT_RESPONSES_AGENT_METADATA_TASK}
mlflow_model.metadata = default_metadata | (mlflow_model.metadata or {})
# We accept either a dict or a ResponsesRequest object as input
if input_example:
try:
model_validate(ResponsesAgentRequest, input_example)
except pydantic.ValidationError as e:
raise MlflowException(
message=(
f"Invalid input example. Expected a ResponsesRequest object or dictionary with"
f" its schema. Pydantic validation error: {e}"
),
error_code=INTERNAL_ERROR,
) from e
if isinstance(input_example, ResponsesAgentRequest):
input_example = input_example.model_dump(exclude_none=True)
else:
input_example = RESPONSES_AGENT_INPUT_EXAMPLE
_logger.info("Predicting on input example to validate output")
context = PythonModelContext(artifacts, model_config)
python_model.load_context(context)
request = ResponsesAgentRequest(**input_example)
output = python_model.predict(request)
try:
model_validate(ResponsesAgentResponse, output)
except Exception as e:
raise MlflowException(
"Failed to save ResponsesAgent. Ensure your model's predict() method returns a "
"ResponsesResponse object or a dict with the same schema."
f"Pydantic validation error: {e}"
) from e
return input_example
| _ServedPyFuncModel |
python | realpython__materials | python-built-in-functions/stack.py | {
"start": 0,
"end": 279
} | class ____:
def __init__(self, items=None):
self.items = list(items) if items is not None else []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def __bool__(self):
return bool(self.items)
| Stack |
python | pytorch__pytorch | torch/distributed/flight_recorder/components/types.py | {
"start": 12011,
"end": 21817
} | class ____:
"""Parses relevant info about operation out of 'event' dict
examples of supported `profiling_name`s:
nccl:broadcast
nccl:send 1->2
nccl:recv 3<-0
"""
def __init__(
self, event: dict[Any, Any], memberships: dict[str, set[Any]], pg_name: str
):
self.profiling_name = event["profiling_name"]
comm_lib_backend, name = self.profiling_name.split(":")
assert comm_lib_backend in ["nccl", "xccl"], (
f"name formatting error? {comm_lib_backend} != 'nccl' or 'xccl'"
)
parts = name.split(" ")
type = parts[0]
meta = parts[1] if len(parts) == 2 else None
self.state = event["state"]
# Store the hashed pg_name for accessing memberships, and original pg info for display
self.pg_name = pg_name # This is the hashed version used for memberships lookup
self.original_pg_name, self.pg_desc = event["process_group"]
assert type in COLLECTIVES | P2P | {"coalesced"}, (
f"{type} is not a supported operation"
)
self.type = type
if type == "send":
assert isinstance(meta, str)
s, d = meta.split("->")
self._src, self._dst = int(s), int(d)
elif type == "recv":
assert isinstance(meta, str)
d, s = meta.split("<-")
self._dst, self._src = int(d), int(s)
else:
self._src, self._dst = -1, -1
self._init_global_src_dst(memberships[pg_name])
self.pg_size = len(memberships[pg_name])
if type in P2P | COLLECTIVES:
self.input_sizes = event["input_sizes"]
self.output_sizes = event["output_sizes"]
else:
self.input_sizes, self.output_sizes = None, None
self.collective_seq_id = event["collective_seq_id"]
self.stack_id = event.get("stack_id", -1)
self.p2p_seq_id = event["p2p_seq_id"]
self.input_dtypes = event["input_dtypes"]
self.output_dtypes = event["output_dtypes"]
self.time_created_ns = event["time_created_ns"]
self.collective_frames = event.get("frames", [])
self.is_verbose = os.getenv("FR_TRACE_VERBOSE_OUTPUT", "0") == "1"
def _init_global_src_dst(self, pg_ranks: set[Any]) -> None:
pg_ranks_sorted = sorted(pg_ranks)
self._src_g = pg_ranks_sorted[self._src] if self._src is not None else None
self._dst_g = pg_ranks_sorted[self._dst] if self._dst is not None else None
@property
def src(self) -> int:
assert self.type in P2P, "can't get src of non-p2p op"
return self._src
@property
def dst(self) -> int:
assert self.type in P2P, "can't get dst of non-p2p op"
return self._dst
def __repr__(self) -> str:
p2p_info = ""
if self.type in P2P:
p2p_info = f"s={self._src_g} d={self._dst_g}"
if self.is_verbose:
verbose_info = (
f"timestamp_created={self.time_created_ns}",
p2p_info,
f"input_sizes={self.input_sizes}",
f"output_sizes={self.output_sizes}",
f"input_dtypes={self.input_dtypes}",
f"output_dtypes={self.output_dtypes}",
"collective_seq_id | p2p_seq_id="
f"{self.p2p_seq_id if self.type in P2P else self.collective_seq_id}",
f"pg_name={self.pg_name}",
f"pg_description={self.pg_desc}",
f"pg_size={self.pg_size}",
f"stack_id={self.stack_id}",
f"state={self.state}",
)
return f"{self.type}(%s)" % ", ".join(s for s in verbose_info if s)
return f"{self.type}(%sinput_sizes={self.input_sizes}, state={self.state})" % (
f"{p2p_info}, " if p2p_info else ""
)
def dtype_mismatch(self, other: "Op") -> bool:
if (
(
self.type not in ["scatter", "gather", "broadcast"]
and set(self.input_dtypes) != set(self.output_dtypes)
and self.input_sizes[0]
and self.output_sizes[0]
)
or (
self.type not in ["scatter", "broadcast"]
and set(self.input_dtypes) != set(other.input_dtypes)
and self.input_sizes[0]
and other.input_sizes[0]
)
or (
self.type not in ["gather"]
and set(self.output_dtypes) != set(other.output_dtypes)
and self.output_sizes[0]
and other.output_sizes[0]
)
):
return True
return False
def match(self, other: "Op") -> MatchInfo:
# TODO: I think this can validly not match,
# e.g. if one PG was used for p2p ops between only some of the peers?
# if self.seq_id != other.seq_id:
# return False
if self.type == "send":
# TODO: We need more states for p2p ops.
return (
MatchInfo(MatchState.FULLY_MATCHED)
if (
other.type == "recv"
and self.src == other.src
and self.dst == other.dst
and self.input_sizes == other.output_sizes
)
else MatchInfo(MatchState.SIZE_OR_SYNTAX_MISMATCH)
)
elif self.type == "recv":
return (
MatchInfo(MatchState.FULLY_MATCHED)
if (
other.type == "send"
and self.src == other.src
and self.dst == other.dst
and self.output_sizes == other.input_sizes
)
else MatchInfo(MatchState.SIZE_OR_SYNTAX_MISMATCH)
)
elif self.type in COLLECTIVES:
if self.type != other.type:
return MatchInfo(
MatchState.COLLECTIVE_TYPE_MISMATCH,
f"Expected collective type: '{self.type}' does not match found collective type: '{other.type}'",
)
if (
self.type not in ["all_to_all", "scatter"]
and self.input_sizes != other.input_sizes
):
return MatchInfo(
MatchState.SIZE_OR_SYNTAX_MISMATCH,
f"Expected input sizes: '{self.input_sizes}' does not match found input sizes: "
f"'{other.input_sizes}'",
)
if (
self.type not in ["all_to_all", "gather"]
and self.output_sizes != other.output_sizes
):
return MatchInfo(
MatchState.SIZE_OR_SYNTAX_MISMATCH,
f"Expected output sizes: '{self.output_sizes}' does not match found output sizes: "
f"'{other.output_sizes}'",
)
if (
self.type in ["all_reduce", "allreduce_coalesced"]
and self.input_sizes != other.output_sizes
):
return MatchInfo(
MatchState.SIZE_OR_SYNTAX_MISMATCH,
f"Expected input sizes: '{self.input_sizes}' does not match found output sizes: '{other.output_sizes}'",
)
if (
self.type
in [
"all_gather",
"all_gather_base",
"all_gather_into_tensor_coalesced",
]
and math.prod(other.output_sizes[0])
!= math.prod(self.input_sizes[0]) * self.pg_size
):
return MatchInfo(
MatchState.SIZE_OR_SYNTAX_MISMATCH,
f"Found input numel '{math.prod(other.input_sizes[0])} * pg size {self.pg_size}' "
f"does not match output numel '{math.prod(other.output_sizes[0])}'",
)
if (
self.type
in [
"reduce_scatter",
"_reduce_scatter_base",
"reduce_scatter_tensor_coalesced",
]
and math.prod(other.input_sizes[0])
!= math.prod(self.output_sizes[0]) * self.pg_size
):
return MatchInfo(
MatchState.SIZE_OR_SYNTAX_MISMATCH,
f"Found input numel '{math.prod(other.input_sizes[0])}' does not match output numel "
f"'{math.prod(other.output_sizes[0])} * pg size {self.pg_size}'",
)
if self.dtype_mismatch(other):
return MatchInfo(
MatchState.COLLECTIVE_DTYPE_MISMATCH,
f"Expected dtypes: '{set(self.input_dtypes)}' does not "
f"match found dtype: '{set(self.output_dtypes)}/"
f"{set(other.input_dtypes)}/{set(other.output_dtypes)}'",
)
if self.state != other.state:
# MatchState()
return MatchInfo(
MatchState.COLLECTIVE_STATE_MISMATCH,
f"Expected state: '{self.state}' does not match found state: '{other.state}'",
)
if self.type == "all_to_all":
return MatchInfo(MatchState.UNDECIDED)
elif self.type in [
"coalesced",
"ALLGATHER_coalesced",
"REDUCE_SCATTER_coalesced",
]:
return (
MatchInfo(MatchState.FULLY_MATCHED)
if (other.type == self.type)
else MatchInfo(MatchState.SIZE_OR_SYNTAX_MISMATCH)
)
return MatchInfo(MatchState.FULLY_MATCHED)
| Op |
python | astropy__astropy | astropy/logger.py | {
"start": 19615,
"end": 19825
} | class ____:
"""A filter for the record origin."""
def __init__(self, origin):
self.origin = origin
def filter(self, record):
return record.origin.startswith(self.origin)
| FilterOrigin |
python | jupyterlab__jupyterlab | jupyterlab/labextensions.py | {
"start": 7413,
"end": 8482
} | class ____(BaseExtensionApp):
description = "(developer) Develop labextension"
flags = develop_flags
user = Bool(False, config=True, help="Whether to do a user install")
sys_prefix = Bool(True, config=True, help="Use the sys.prefix as the prefix")
overwrite = Bool(False, config=True, help="Whether to overwrite files")
symlink = Bool(True, config=False, help="Whether to use a symlink")
labextensions_dir = Unicode(
"",
config=True,
help="Full path to labextensions dir (probably use prefix or user)",
)
def run_task(self):
"""Add config for this labextension"""
self.extra_args = self.extra_args or [os.getcwd()]
for arg in self.extra_args:
develop_labextension_py(
arg,
user=self.user,
sys_prefix=self.sys_prefix,
labextensions_dir=self.labextensions_dir,
logger=self.log,
overwrite=self.overwrite,
symlink=self.symlink,
)
| DevelopLabExtensionApp |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_events_monitors.py | {
"start": 3081,
"end": 4400
} | class ____(object):
"""Alert for Infinity and NaN values."""
def __init__(self,
wall_time,
op_type,
output_slot,
size=None,
num_neg_inf=None,
num_pos_inf=None,
num_nan=None,
execution_index=None,
graph_execution_trace_index=None):
self._wall_time = wall_time
self._op_type = op_type
self._output_slot = output_slot
self._size = size
self._num_neg_inf = num_neg_inf
self._num_pos_inf = num_pos_inf
self._num_nan = num_nan
self._execution_index = execution_index
self._graph_execution_trace_index = graph_execution_trace_index
@property
def wall_time(self):
return self._wall_time
@property
def op_type(self):
return self._op_type
@property
def output_slot(self):
return self._output_slot
@property
def size(self):
return self._size
@property
def num_neg_inf(self):
return self._num_neg_inf
@property
def num_pos_inf(self):
return self._num_pos_inf
@property
def num_nan(self):
return self._num_nan
@property
def execution_index(self):
return self._execution_index
@property
def graph_execution_trace_index(self):
return self._graph_execution_trace_index
| InfNanAlert |
python | getsentry__sentry | src/sentry/seer/anomaly_detection/types.py | {
"start": 2501,
"end": 2601
} | class ____(IntEnum):
ABOVE = 0
BELOW = 1
ABOVE_AND_BELOW = 2
| AnomalyDetectionThresholdType |
python | realpython__materials | python-dict-attribute/person.py | {
"start": 0,
"end": 695
} | class ____:
def __init__(self, first_name, last_name, age):
self.first_name = first_name
self.last_name = last_name
self.age = age
def __str__(self):
return "{first_name} {last_name} is {age} years old".format(
**self.__dict__
)
def __repr__(self):
return "{cls}('{first_name}', '{last_name}', {age})".format(
cls=type(self).__name__,
**self.__dict__,
)
def as_dict(self):
return self.__dict__
def as_tuple(self):
return tuple(self.__dict__.values())
john = Person("John", "Doe", 30)
print(repr(john))
print(john)
print(john.as_dict())
print(john.as_tuple())
| Person |
python | fluentpython__example-code-2e | 18-with-match/lispy/py3.9/lis.py | {
"start": 1666,
"end": 5490
} | class ____(ChainMap[Symbol, Any]):
"A ChainMap that allows changing an item in-place."
def change(self, key: Symbol, value: Any) -> None:
"Find where key is defined and change the value there."
for map in self.maps:
if key in map:
map[key] = value # type: ignore[index]
return
raise KeyError(key)
def standard_env() -> Environment:
"An environment with some Scheme standard procedures."
env = Environment()
env.update(vars(math)) # sin, cos, sqrt, pi, ...
env.update({
'+': op.add,
'-': op.sub,
'*': op.mul,
'/': op.truediv,
'quotient': op.floordiv,
'>': op.gt,
'<': op.lt,
'>=': op.ge,
'<=': op.le,
'=': op.eq,
'abs': abs,
'append': lambda *args: list(chain(*args)),
'apply': lambda proc, args: proc(*args),
'begin': lambda *x: x[-1],
'car': lambda x: x[0],
'cdr': lambda x: x[1:],
'cons': lambda x, y: [x] + y,
'display': lambda x: print(lispstr(x)),
'eq?': op.is_,
'equal?': op.eq,
'filter': lambda *args: list(filter(*args)),
'length': len,
'list': lambda *x: list(x),
'list?': lambda x: isinstance(x, list),
'map': lambda *args: list(map(*args)),
'max': max,
'min': min,
'not': op.not_,
'null?': lambda x: x == [],
'number?': lambda x: isinstance(x, (int, float)),
'procedure?': callable,
'round': round,
'symbol?': lambda x: isinstance(x, Symbol),
})
return env
################ Interaction: A REPL
def repl(prompt: str = 'lis.py> ') -> NoReturn:
"A prompt-read-eval-print loop."
global_env = Environment({}, standard_env())
while True:
ast = parse(input(prompt))
val = evaluate(ast, global_env)
if val is not None:
print(lispstr(val))
def lispstr(exp: object) -> str:
"Convert a Python object back into a Lisp-readable string."
if isinstance(exp, list):
return '(' + ' '.join(map(lispstr, exp)) + ')'
else:
return str(exp)
################ Evaluator
def evaluate(exp: Expression, env: Environment) -> Any:
"Evaluate an expression in an environment."
if isinstance(exp, Symbol): # variable reference
return env[exp]
elif not isinstance(exp, list): # constant literal
return exp
elif exp[0] == 'quote': # (quote exp)
(_, x) = exp
return x
elif exp[0] == 'if': # (if test conseq alt)
(_, test, consequence, alternative) = exp
if evaluate(test, env):
return evaluate(consequence, env)
else:
return evaluate(alternative, env)
elif exp[0] == 'lambda': # (lambda (parm…) body…)
(_, parms, *body) = exp
if not isinstance(parms, list):
raise SyntaxError(lispstr(exp))
return Procedure(parms, body, env)
elif exp[0] == 'define':
(_, name_exp, *rest) = exp
if isinstance(name_exp, Symbol): # (define name exp)
value_exp = rest[0]
env[name_exp] = evaluate(value_exp, env)
else: # (define (name parm…) body…)
name, *parms = name_exp
env[name] = Procedure(parms, rest, env)
elif exp[0] == 'set!':
(_, var, value_exp) = exp
env.change(var, evaluate(value_exp, env))
else: # (proc arg…)
(func_exp, *args) = exp
proc = evaluate(func_exp, env)
args = [evaluate(arg, env) for arg in args]
return proc(*args)
| Environment |
python | wandb__wandb | wandb/vendor/pygments/styles/perldoc.py | {
"start": 462,
"end": 2175
} | class ____(Style):
"""
Style similar to the style used in the perldoc code blocks.
"""
background_color = '#eeeedd'
default_style = ''
styles = {
Whitespace: '#bbbbbb',
Comment: '#228B22',
Comment.Preproc: '#1e889b',
Comment.Special: '#8B008B bold',
String: '#CD5555',
String.Heredoc: '#1c7e71 italic',
String.Regex: '#B452CD',
String.Other: '#cb6c20',
String.Regex: '#1c7e71',
Number: '#B452CD',
Operator.Word: '#8B008B',
Keyword: '#8B008B bold',
Keyword.Type: '#00688B',
Name.Class: '#008b45 bold',
Name.Exception: '#008b45 bold',
Name.Function: '#008b45',
Name.Namespace: '#008b45 underline',
Name.Variable: '#00688B',
Name.Constant: '#00688B',
Name.Decorator: '#707a7c',
Name.Tag: '#8B008B bold',
Name.Attribute: '#658b00',
Name.Builtin: '#658b00',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: 'bg:#e3d2d2 #a61717'
}
| PerldocStyle |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-mailchimp/components.py | {
"start": 798,
"end": 3981
} | class ____(ConfigTransformation):
def transform(self, config: MutableMapping[str, Any]) -> None:
"""
Extract the data center from auth credentials and add it to the config.
For API key auth, extract from the API key itself.
For OAuth, make an HTTP request to get the data center.
"""
# Exit early if the data center is already in the config
if config.get("data_center"):
return
try:
if config.get("credentials", {}).get("auth_type") == "oauth2.0":
self._extract_data_center_from_oauth(config)
else:
self._extract_data_center_from_apikey(config)
except AirbyteTracedException:
# Re-raise AirbyteTracedException as-is
raise
except Exception as e:
# Convert other exceptions to AirbyteTracedException
raise AirbyteTracedException(
failure_type=FailureType.config_error,
internal_message=f"Failed to extract data center: {str(e)}",
message=("Unable to extract data center from credentials. " "Please check your configuration and try again."),
) from e
@staticmethod
def _extract_data_center_from_oauth(config: MutableMapping[str, Any]) -> None:
"""Make a request to oauth2/metadata endpoint to get the data center."""
access_token = config.get("credentials", {}).get("access_token")
response = requests.get(
"https://login.mailchimp.com/oauth2/metadata", headers={"Authorization": f"OAuth {access_token}"}, timeout=10
)
response.raise_for_status()
# Mailchimp returns a 200 response with an error key if the token is invalid
error = response.json().get("error")
if error == "invalid_token":
raise AirbyteTracedException(
failure_type=FailureType.config_error,
internal_message=error,
message=("The access token you provided was invalid. " "Please check your credentials and try again."),
)
# Extract data center from the "dc" field
data_center = response.json().get("dc")
if data_center:
dpath.new(config, ["data_center"], data_center)
@staticmethod
def _extract_data_center_from_apikey(config: MutableMapping[str, Any]) -> None:
"""Extract the data center directly from the API key."""
# Backward compatibility - check for API key at top level
if config.get("apikey"):
api_key = config["apikey"]
if api_key and "-" in api_key:
# API key format: "prefix-datacenter"
data_center = api_key.split("-")[-1]
dpath.new(config, ["data_center"], data_center)
return
# API key flow - extract data center from API key
api_key = config.get("credentials", {}).get("apikey")
if api_key and "-" in api_key:
# API key format: "prefix-datacenter"
data_center = api_key.split("-")[-1]
dpath.new(config, ["data_center"], data_center)
| ExtractAndSetDataCenterConfigValue |
python | optuna__optuna | optuna/cli.py | {
"start": 16257,
"end": 17843
} | class ____(_BaseCommand):
"""Show a list of trials."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument(
"--study-name",
type=str,
required=True,
help="The name of the study which includes trials.",
)
parser.add_argument(
"-f",
"--format",
type=str,
choices=("value", "json", "table", "yaml"),
default="table",
help="Output format.",
)
parser.add_argument(
"--flatten",
default=False,
action="store_true",
help="Flatten nested columns such as params and user_attrs.",
)
def take_action(self, parsed_args: Namespace) -> int:
optuna_warn(
"'trials' is an experimental CLI command. The interface can change in the future.",
ExperimentalWarning,
)
storage = _get_storage(parsed_args.storage, parsed_args.storage_class)
study = optuna.load_study(storage=storage, study_name=parsed_args.study_name)
attrs = (
"number",
"value" if not study._is_multi_objective() else "values",
"datetime_start",
"datetime_complete",
"duration",
"params",
"user_attrs",
"state",
)
records, columns = _dataframe._create_records_and_aggregate_column(study, attrs)
print(_format_output(records, columns, parsed_args.format, parsed_args.flatten))
return 0
| _Trials |
python | psf__black | tests/data/cases/preview_long_strings__regression.py | {
"start": 691,
"end": 936
} | class ____:
def foo():
os.system("This is a regression test. xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxx xxxx.".format("xxxxxxxxxx", "xxxxxx", "xxxxxxxxxx"))
| A |
python | doocs__leetcode | solution/0500-0599/0552.Student Attendance Record II/Solution2.py | {
"start": 0,
"end": 909
} | class ____:
def checkRecord(self, n: int) -> int:
mod = int(1e9 + 7)
dp = [[[0, 0, 0], [0, 0, 0]] for _ in range(n)]
# base case
dp[0][0][0] = dp[0][0][1] = dp[0][1][0] = 1
for i in range(1, n):
# A
dp[i][1][0] = (dp[i - 1][0][0] + dp[i - 1][0][1] + dp[i - 1][0][2]) % mod
# L
dp[i][0][1] = dp[i - 1][0][0]
dp[i][0][2] = dp[i - 1][0][1]
dp[i][1][1] = dp[i - 1][1][0]
dp[i][1][2] = dp[i - 1][1][1]
# P
dp[i][0][0] = (dp[i - 1][0][0] + dp[i - 1][0][1] + dp[i - 1][0][2]) % mod
dp[i][1][0] = (
dp[i][1][0] + dp[i - 1][1][0] + dp[i - 1][1][1] + dp[i - 1][1][2]
) % mod
ans = 0
for j in range(2):
for k in range(3):
ans = (ans + dp[n - 1][j][k]) % mod
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/single-number-ii.py | {
"start": 1106,
"end": 1453
} | class ____(object):
# @param A, a list of integer
# @return an integer
# [1, 1, 1, 1, 2, 2, 2, 2, 3, 3]
def singleNumber(self, A):
one, two, three = 0, 0, 0
for x in A:
one, two, three = (~x & one) | (x & ~one & ~two & ~three), (~x & two) | (x & one), (~x & three) | (x & two)
return two
| SolutionEX |
python | walkccc__LeetCode | solutions/283. Move Zeroes/283.py | {
"start": 0,
"end": 205
} | class ____:
def moveZeroes(self, nums: list[int]) -> None:
j = 0
for num in nums:
if num != 0:
nums[j] = num
j += 1
for i in range(j, len(nums)):
nums[i] = 0
| Solution |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 2286,
"end": 3201
} | class ____(RequestHandler):
# stub out enough methods to make the signed_cookie functions work
def __init__(self, cookie_secret="0123456789", key_version=None):
# don't call super.__init__
self._cookies = {} # type: typing.Dict[str, bytes]
if key_version is None:
self.application = ObjectDict( # type: ignore
settings=dict(cookie_secret=cookie_secret)
)
else:
self.application = ObjectDict( # type: ignore
settings=dict(cookie_secret=cookie_secret, key_version=key_version)
)
def get_cookie(self, name) -> typing.Optional[str]: # type: ignore[override]
return to_unicode(self._cookies.get(name))
def set_cookie(self, name, value, expires_days=None): # type: ignore[override]
self._cookies[name] = value
# See SignedValueTest below for more.
| CookieTestRequestHandler |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver9.py | {
"start": 660,
"end": 1039
} | class ____(ClassA):
pass
def func3(value: _T1) -> type[ClassA[_T1]]:
v1 = ClassA(value)
v2 = type(v1)
reveal_type(v2, expected_text="type[ClassA[_T1@func3]]")
return v2
d = ClassB.get()
reveal_type(d, expected_text="type[ClassB]")
reveal_type(ClassB.get(), expected_text="type[ClassB]")
def func4(cls: type[_T1]) -> Callable[..., _T1]:
return cls
| ClassB |
python | wandb__wandb | wandb/sdk/integration_utils/auto_logging.py | {
"start": 5426,
"end": 8205
} | class ____:
def __init__(
self,
name: str,
symbols: Sequence[str],
resolver: ArgumentResponseResolver,
telemetry_feature: Optional[str] = None,
) -> None:
"""Autolog API calls to W&B."""
self._telemetry_feature = telemetry_feature
self._patch_api = PatchAPI(
name=name,
symbols=symbols,
resolver=resolver,
)
self._name = self._patch_api.name
self._run: Optional[wandb.Run] = None
self.__run_created_by_autolog: bool = False
@property
def _is_enabled(self) -> bool:
"""Returns whether autologging is enabled."""
return self._run is not None
def __call__(self, init: AutologInitArgs = None) -> None:
"""Enable autologging."""
self.enable(init=init)
def _run_init(self, init: AutologInitArgs = None) -> None:
"""Handle wandb run initialization."""
# - autolog(init: dict = {...}) calls wandb.init(**{...})
# regardless of whether there is a wandb.run or not,
# we only track if the run was created by autolog
# - todo: autolog(init: dict | run = run) would use the user-provided run
# - autolog() uses the wandb.run if there is one, otherwise it calls wandb.init()
if init:
_wandb_run = wandb.run
# we delegate dealing with the init dict to wandb.init()
self._run = wandb.init(**init)
if _wandb_run != self._run:
self.__run_created_by_autolog = True
elif wandb.run is None:
self._run = wandb.init()
self.__run_created_by_autolog = True
else:
self._run = wandb.run
def enable(self, init: AutologInitArgs = None) -> None:
"""Enable autologging.
Args:
init: Optional dictionary of arguments to pass to wandb.init().
"""
if self._is_enabled:
logger.info(
f"{self._name} autologging is already enabled, disabling and re-enabling."
)
self.disable()
logger.info(f"Enabling {self._name} autologging.")
self._run_init(init=init)
self._patch_api.patch(self._run)
if self._telemetry_feature:
with wb_telemetry.context(self._run) as tel:
setattr(tel.feature, self._telemetry_feature, True)
def disable(self) -> None:
"""Disable autologging."""
if self._run is None:
return
logger.info(f"Disabling {self._name} autologging.")
if self.__run_created_by_autolog:
self._run.finish()
self.__run_created_by_autolog = False
self._run = None
self._patch_api.unpatch()
| AutologAPI |
python | huggingface__transformers | src/transformers/models/mistral3/modeling_mistral3.py | {
"start": 7939,
"end": 8511
} | class ____(PreTrainedModel):
config: Mistral3Config
base_model_prefix = "model"
input_modalities = ("image", "text")
supports_gradient_checkpointing = True
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_flex_attn = True
_supports_attention_backend = True
@auto_docstring(
custom_intro="""
The Mistral3 model which consists of a vision backbone and a language model, without a language modeling head.
"""
)
| Mistral3PreTrainedModel |
python | apache__airflow | providers/openlineage/tests/unit/openlineage/utils/test_utils.py | {
"start": 72548,
"end": 100955
} | class ____:
def test_dag_info(self):
with DAG(
dag_id="dag_id",
schedule="@once",
start_date=datetime.datetime(2024, 6, 1),
tags={"test"},
description="test desc",
owner_links={"some_owner": "https://airflow.apache.org"},
) as dag:
task_0 = BashOperator(task_id="task_0", bash_command="exit 0;", owner="first") # noqa: F841
task_1 = BashOperator(task_id="task_1", bash_command="exit 1;", owner="second") # noqa: F841
result = dict(DagInfo(dag))
assert sorted(result["owner"].split(", ")) == ["first", "second"]
result.pop("owner")
assert result == {
"dag_id": "dag_id",
"description": "test desc",
"fileloc": pathlib.Path(__file__).resolve().as_posix(),
"start_date": "2024-06-01T00:00:00+00:00",
"tags": "['test']",
"timetable": {},
"timetable_summary": "@once",
"owner_links": {"some_owner": "https://airflow.apache.org"},
}
def test_dag_info_schedule_cron(self):
dag = DAG(
dag_id="dag_id",
schedule="*/4 3 * * *",
start_date=datetime.datetime(2024, 6, 1),
)
result = DagInfo(dag)
assert dict(result) == {
"dag_id": "dag_id",
"description": None,
"fileloc": pathlib.Path(__file__).resolve().as_posix(),
"owner": "",
"start_date": "2024-06-01T00:00:00+00:00",
"tags": "[]",
"owner_links": {},
"timetable": {"expression": "*/4 3 * * *", "timezone": "UTC"},
"timetable_summary": "*/4 3 * * *",
}
def test_dag_info_schedule_events_timetable(self):
dag = DAG(
dag_id="dag_id",
start_date=datetime.datetime(2024, 6, 1),
schedule=EventsTimetable(
event_dates=[
pendulum.datetime(2025, 3, 3, 8, 27, tz="America/Chicago"),
pendulum.datetime(2025, 3, 17, 8, 27, tz="America/Chicago"),
pendulum.datetime(2025, 3, 22, 20, 50, tz="America/Chicago"),
],
description="My Team's Baseball Games",
),
)
timetable = {
"event_dates": [
"2025-03-03T08:27:00-06:00",
"2025-03-17T08:27:00-05:00",
"2025-03-22T20:50:00-05:00",
],
"restrict_to_events": False,
}
if AIRFLOW_V_3_0_3_PLUS:
timetable.update(
{
"_summary": "My Team's Baseball Games",
"description": "My Team's Baseball Games",
}
)
timetable["description"] = "My Team's Baseball Games"
result = DagInfo(dag)
assert dict(result) == {
"dag_id": "dag_id",
"description": None,
"fileloc": pathlib.Path(__file__).resolve().as_posix(),
"owner": "",
"start_date": "2024-06-01T00:00:00+00:00",
"tags": "[]",
"owner_links": {},
"timetable": timetable,
"timetable_summary": "My Team's Baseball Games",
}
def test_dag_info_schedule_single_asset_directly(self):
dag = DAG(
dag_id="dag_id",
start_date=datetime.datetime(2024, 6, 1),
schedule=Asset(uri="uri1", extra={"a": 1}),
)
result = DagInfo(dag)
assert dict(result) == {
"dag_id": "dag_id",
"description": None,
"fileloc": pathlib.Path(__file__).resolve().as_posix(),
"owner": "",
"start_date": "2024-06-01T00:00:00+00:00",
"tags": "[]",
"owner_links": {},
"timetable": {
"asset_condition": {
"__type": "asset",
"uri": "uri1",
"name": "uri1",
"group": "asset",
"extra": {"a": 1},
}
},
"timetable_summary": "Asset",
}
def test_dag_info_schedule_list_single_assets(self):
dag = DAG(
dag_id="dag_id",
start_date=datetime.datetime(2024, 6, 1),
schedule=[Asset(uri="uri1", extra={"a": 1})],
)
result = DagInfo(dag)
assert dict(result) == {
"dag_id": "dag_id",
"description": None,
"fileloc": pathlib.Path(__file__).resolve().as_posix(),
"owner": "",
"start_date": "2024-06-01T00:00:00+00:00",
"tags": "[]",
"owner_links": {},
"timetable": {
"asset_condition": {
"__type": "asset_all",
"objects": [
{
"__type": "asset",
"uri": "uri1",
"name": "uri1",
"group": "asset",
"extra": {"a": 1},
}
],
}
},
"timetable_summary": "Asset",
}
def test_dag_info_schedule_list_two_assets(self):
dag = DAG(
dag_id="dag_id",
start_date=datetime.datetime(2024, 6, 1),
schedule=[Asset(uri="uri1", extra={"a": 1}), Asset(uri="uri2")],
)
result = DagInfo(dag)
assert dict(result) == {
"dag_id": "dag_id",
"description": None,
"fileloc": pathlib.Path(__file__).resolve().as_posix(),
"owner": "",
"start_date": "2024-06-01T00:00:00+00:00",
"tags": "[]",
"owner_links": {},
"timetable": {
"asset_condition": {
"__type": "asset_all",
"objects": [
{
"__type": "asset",
"uri": "uri1",
"name": "uri1",
"group": "asset",
"extra": {"a": 1},
},
{"__type": "asset", "uri": "uri2", "name": "uri2", "group": "asset", "extra": {}},
],
}
},
"timetable_summary": "Asset",
}
def test_dag_info_schedule_assets_logical_condition(self):
dag = DAG(
dag_id="dag_id",
start_date=datetime.datetime(2024, 6, 1),
schedule=((Asset("uri1", extra={"a": 1}) | Asset("uri2")) & (Asset("uri3") | Asset("uri4"))),
)
result = DagInfo(dag)
assert dict(result) == {
"dag_id": "dag_id",
"description": None,
"fileloc": pathlib.Path(__file__).resolve().as_posix(),
"owner": "",
"start_date": "2024-06-01T00:00:00+00:00",
"tags": "[]",
"owner_links": {},
"timetable": {
"asset_condition": {
"__type": "asset_all",
"objects": [
{
"__type": "asset_any",
"objects": [
{
"__type": "asset",
"uri": "uri1",
"name": "uri1",
"group": "asset",
"extra": {"a": 1},
},
{
"__type": "asset",
"uri": "uri2",
"name": "uri2",
"group": "asset",
"extra": {},
},
],
},
{
"__type": "asset_any",
"objects": [
{
"__type": "asset",
"uri": "uri3",
"name": "uri3",
"group": "asset",
"extra": {},
},
{
"__type": "asset",
"uri": "uri4",
"name": "uri4",
"group": "asset",
"extra": {},
},
],
},
],
}
},
"timetable_summary": "Asset",
}
def test_dag_info_schedule_asset_or_time_schedule(self):
from airflow.timetables.assets import AssetOrTimeSchedule
dag = DAG(
dag_id="dag_id",
start_date=datetime.datetime(2024, 6, 1),
schedule=AssetOrTimeSchedule(
timetable=CronTriggerTimetable("*/4 3 * * *", timezone="UTC"),
assets=((Asset("uri1", extra={"a": 1}) | Asset("uri2")) & (Asset("uri3") | Asset("uri4"))),
),
)
result = DagInfo(dag)
assert dict(result) == {
"dag_id": "dag_id",
"description": None,
"fileloc": pathlib.Path(__file__).resolve().as_posix(),
"owner": "",
"start_date": "2024-06-01T00:00:00+00:00",
"tags": "[]",
"owner_links": {},
"timetable": {
"asset_condition": {
"__type": "asset_all",
"objects": [
{
"__type": "asset_any",
"objects": [
{
"__type": "asset",
"uri": "uri1",
"name": "uri1",
"group": "asset",
"extra": {"a": 1},
},
{
"__type": "asset",
"uri": "uri2",
"name": "uri2",
"group": "asset",
"extra": {},
},
],
},
{
"__type": "asset_any",
"objects": [
{
"__type": "asset",
"uri": "uri3",
"name": "uri3",
"group": "asset",
"extra": {},
},
{
"__type": "asset",
"uri": "uri4",
"name": "uri4",
"group": "asset",
"extra": {},
},
],
},
],
},
"timetable": {
"__type": "airflow.timetables.trigger.CronTriggerTimetable",
"__var": {
"expression": "*/4 3 * * *",
"timezone": "UTC",
"interval": 0.0,
"run_immediately": False,
},
},
},
"timetable_summary": "Asset or */4 3 * * *",
}
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Airflow 3 test")
@patch.object(DagRun, "dag_versions", new_callable=PropertyMock)
def test_dagrun_info_af3(mocked_dag_versions):
from airflow.models.dag_version import DagVersion
from airflow.utils.types import DagRunTriggeredByType
date = datetime.datetime(2024, 6, 1, tzinfo=datetime.timezone.utc)
dv1 = DagVersion()
dv2 = DagVersion()
dv2.id = "version_id"
dv2.version_number = "version_number"
dv2.bundle_name = "bundle_name"
dv2.bundle_version = "bundle_version"
mocked_dag_versions.return_value = [dv1, dv2]
dagrun = DagRun(
dag_id="dag_id",
run_id="dag_run__run_id",
queued_at=date,
logical_date=date,
run_after=date,
start_date=date,
conf={"a": 1},
state=DagRunState.RUNNING,
run_type=DagRunType.MANUAL,
creating_job_id=123,
data_interval=(date, date),
triggered_by=DagRunTriggeredByType.UI,
backfill_id=999,
bundle_version="bundle_version",
)
assert dagrun.dag_versions == [dv1, dv2]
dagrun.end_date = date + datetime.timedelta(seconds=74, microseconds=546)
result = DagRunInfo(dagrun)
assert dict(result) == {
"conf": {"a": 1},
"dag_id": "dag_id",
"data_interval_end": "2024-06-01T00:00:00+00:00",
"data_interval_start": "2024-06-01T00:00:00+00:00",
"duration": 74.000546,
"end_date": "2024-06-01T00:01:14.000546+00:00",
"run_id": "dag_run__run_id",
"run_type": DagRunType.MANUAL,
"start_date": "2024-06-01T00:00:00+00:00",
"logical_date": "2024-06-01T00:00:00+00:00",
"run_after": "2024-06-01T00:00:00+00:00",
"dag_bundle_name": "bundle_name",
"dag_bundle_version": "bundle_version",
"dag_version_id": "version_id",
"dag_version_number": "version_number",
}
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Airflow 2 test")
def test_dagrun_info_af2():
date = datetime.datetime(2024, 6, 1, tzinfo=datetime.timezone.utc)
dag = DAG(
"dag_id",
schedule=None,
start_date=date,
)
dagrun = dag.create_dagrun(
run_id="dag_run__run_id",
data_interval=(date, date),
run_type=DagRunType.MANUAL,
state=DagRunState.RUNNING,
execution_date=date,
conf={"a": 1},
)
dagrun.start_date = date
dagrun.end_date = date + datetime.timedelta(seconds=74, microseconds=546)
result = DagRunInfo(dagrun)
assert dict(result) == {
"conf": {"a": 1},
"dag_id": "dag_id",
"data_interval_end": "2024-06-01T00:00:00+00:00",
"data_interval_start": "2024-06-01T00:00:00+00:00",
"duration": 74.000546,
"end_date": "2024-06-01T00:01:14.000546+00:00",
"run_id": "dag_run__run_id",
"run_type": DagRunType.MANUAL,
"external_trigger": False,
"start_date": "2024-06-01T00:00:00+00:00",
"execution_date": "2024-06-01T00:00:00+00:00",
"logical_date": "2024-06-01T00:00:00+00:00",
"dag_bundle_name": None,
"dag_bundle_version": None,
"dag_version_id": None,
"dag_version_number": None,
}
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Airflow 3 test")
def test_taskinstance_info_af3():
from airflow.sdk.api.datamodels._generated import TaskInstance
from airflow.sdk.execution_time.task_runner import RuntimeTaskInstance
task = BaseOperator(task_id="hello")
task._is_mapped = True
dag_id = "basic_task"
dag = DAG(dag_id=dag_id, start_date=timezone.datetime(2024, 12, 3))
task.dag = dag
ti_id = uuid7()
ti = TaskInstance(
id=ti_id,
task_id=task.task_id,
dag_id=dag_id,
run_id="test_run",
try_number=1,
map_index=2,
dag_version_id=ti_id,
)
start_date = timezone.datetime(2025, 1, 1)
runtime_ti = RuntimeTaskInstance.model_construct(
**ti.model_dump(exclude_unset=True),
task=task,
_ti_context_from_server=None,
start_date=start_date,
)
runtime_ti.end_date = start_date + datetime.timedelta(seconds=12, milliseconds=345)
bundle_instance = MagicMock(version="bundle_version")
bundle_instance.name = "bundle_name"
runtime_ti.bundle_instance = bundle_instance
assert dict(TaskInstanceInfo(runtime_ti)) == {
"log_url": runtime_ti.log_url,
"map_index": 2,
"try_number": 1,
"dag_bundle_version": "bundle_version",
"dag_bundle_name": "bundle_name",
}
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Airflow 2 test")
@patch.object(TaskInstance, "log_url", "some_log_url") # Depends on the host, hard to test exact value
def test_taskinstance_info_af2():
some_date = datetime.datetime(2024, 6, 1, tzinfo=datetime.timezone.utc)
task_obj = PythonOperator(task_id="task_id", python_callable=lambda x: x)
ti = TaskInstance(
task=task_obj, run_id="task_instance_run_id", state=TaskInstanceState.RUNNING, map_index=2
)
ti.duration = 12.345
ti.queued_dttm = some_date
assert dict(TaskInstanceInfo(ti)) == {
"duration": 12.345,
"map_index": 2,
"pool": "default_pool",
"try_number": 0,
"queued_dttm": "2024-06-01T00:00:00+00:00",
"log_url": "some_log_url",
"dag_bundle_name": None,
"dag_bundle_version": None,
}
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Airflow 3 test")
def test_task_info_af3():
class CustomOperator(PythonOperator):
def __init__(self, *args, **kwargs):
# Mock some specific attributes from different operators
self.deferrable = True
self.trigger_dag_id = "trigger_dag_id"
self.trigger_run_id = "trigger_run_id"
self.external_dag_id = "external_dag_id"
self.external_task_id = "external_task_id"
self.external_task_ids = "external_task_ids"
self.external_task_group_id = "external_task_group_id"
self.external_dates_filter = "external_dates_filter"
self.logical_date = "logical_date"
self.execution_date = "execution_date"
super().__init__(*args, **kwargs)
with DAG(
dag_id="dag",
schedule="@once",
start_date=datetime.datetime(2024, 6, 1),
) as dag:
task_0 = BashOperator(task_id="task_0", bash_command="exit 0;", dag=dag)
task_1 = BashOperator(task_id="task_1", bash_command="exit 0;", dag=dag)
with TaskGroup("section_1", prefix_group_id=True) as tg:
task_10 = CustomOperator(
task_id="task_3",
python_callable=lambda: 1,
inlets=[Asset(uri="uri1", extra={"a": 1})],
outlets=[Asset(uri="uri2", extra={"b": 2}), Asset(uri="uri3", extra={"c": 3})],
)
task_0 >> task_10
tg >> task_1
result = TaskInfo(task_10)
tg_info = TaskGroupInfo(tg)
assert dict(tg_info) == {
"downstream_group_ids": "[]",
"downstream_task_ids": "['task_1']",
"group_id": "section_1",
"prefix_group_id": True,
"tooltip": "",
"upstream_group_ids": "[]",
"upstream_task_ids": "[]",
}
assert dict(result) == {
"deferrable": True,
"depends_on_past": False,
"downstream_task_ids": "['task_1']",
"execution_date": "execution_date",
"execution_timeout": None,
"executor_config": {},
"external_dag_id": "external_dag_id",
"external_dates_filter": "external_dates_filter",
"external_task_id": "external_task_id",
"external_task_ids": "external_task_ids",
"external_task_group_id": "external_task_group_id",
"ignore_first_depends_on_past": False,
"inlets": "[{'uri': 'uri1', 'extra': {'a': 1}}]",
"logical_date": "logical_date",
"mapped": False,
"max_active_tis_per_dag": None,
"max_active_tis_per_dagrun": None,
"max_retry_delay": None,
"multiple_outputs": False,
"operator_class": "CustomOperator",
"operator_class_path": get_fully_qualified_class_name(task_10),
"operator_provider_version": None, # Custom operator doesn't have provider version
"outlets": "[{'uri': 'uri2', 'extra': {'b': 2}}, {'uri': 'uri3', 'extra': {'c': 3}}]",
"owner": "airflow",
"priority_weight": 1,
"queue": "default",
"retries": 0,
"retry_exponential_backoff": False,
"run_as_user": None,
"task_group": tg_info,
"task_id": "section_1.task_3",
"trigger_dag_id": "trigger_dag_id",
"trigger_run_id": "trigger_run_id",
"trigger_rule": "all_success",
"upstream_task_ids": "['task_0']",
"wait_for_downstream": False,
"wait_for_past_depends_before_skipping": False,
}
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Airflow 2 test")
def test_task_info_af2():
class CustomOperator(PythonOperator):
def __init__(self, *args, **kwargs):
# Mock some specific attributes from different operators
self.deferrable = True
self.trigger_dag_id = "trigger_dag_id"
self.trigger_run_id = "trigger_run_id"
self.external_dag_id = "external_dag_id"
self.external_task_id = "external_task_id"
self.external_task_ids = "external_task_ids"
self.external_task_group_id = "external_task_group_id"
self.external_dates_filter = "external_dates_filter"
self.logical_date = "logical_date"
self.execution_date = "execution_date"
super().__init__(*args, **kwargs)
with DAG(
dag_id="dag",
schedule="@once",
start_date=datetime.datetime(2024, 6, 1),
) as dag:
task_0 = BashOperator(task_id="task_0", bash_command="exit 0;", dag=dag)
task_1 = BashOperator(task_id="task_1", bash_command="exit 0;", dag=dag)
with TaskGroup("section_1", prefix_group_id=True) as tg:
task_10 = CustomOperator(
task_id="task_3",
python_callable=lambda: 1,
inlets=[Asset(uri="uri1", extra={"a": 1})],
outlets=[Asset(uri="uri2", extra={"b": 2}), Asset(uri="uri3", extra={"c": 3})],
)
task_0 >> task_10
tg >> task_1
result = TaskInfo(task_10)
tg_info = TaskGroupInfo(tg)
assert dict(tg_info) == {
"downstream_group_ids": "[]",
"downstream_task_ids": "['task_1']",
"group_id": "section_1",
"prefix_group_id": True,
"tooltip": "",
"upstream_group_ids": "[]",
"upstream_task_ids": "[]",
}
assert dict(result) == {
"deferrable": True,
"depends_on_past": False,
"downstream_task_ids": "['task_1']",
"execution_date": "execution_date",
"execution_timeout": None,
"executor_config": {},
"external_dag_id": "external_dag_id",
"external_dates_filter": "external_dates_filter",
"external_task_id": "external_task_id",
"external_task_ids": "external_task_ids",
"external_task_group_id": "external_task_group_id",
"ignore_first_depends_on_past": True,
"is_setup": False,
"is_teardown": False,
"sla": None,
"inlets": "[{'uri': 'uri1', 'extra': {'a': 1}}]",
"logical_date": "logical_date",
"mapped": False,
"max_active_tis_per_dag": None,
"max_active_tis_per_dagrun": None,
"max_retry_delay": None,
"multiple_outputs": False,
"operator_class": "CustomOperator",
"operator_class_path": get_fully_qualified_class_name(task_10),
"operator_provider_version": None, # Custom operator doesn't have provider version
"outlets": "[{'uri': 'uri2', 'extra': {'b': 2}}, {'uri': 'uri3', 'extra': {'c': 3}}]",
"owner": "airflow",
"priority_weight": 1,
"queue": "default",
"retries": 0,
"retry_exponential_backoff": False,
"run_as_user": None,
"task_group": tg_info,
"task_id": "section_1.task_3",
"trigger_dag_id": "trigger_dag_id",
"trigger_run_id": "trigger_run_id",
"trigger_rule": "all_success",
"upstream_task_ids": "['task_0']",
"wait_for_downstream": False,
"wait_for_past_depends_before_skipping": False,
}
def test_task_info_complete():
task_0 = BashOperator(task_id="task_0", bash_command="exit 0;")
result = TaskInfoComplete(task_0)
assert "'bash_command': 'exit 0;'" in str(result)
@patch("airflow.providers.openlineage.utils.utils.get_fully_qualified_class_name")
def test_get_operator_provider_version_exception_handling(mock_class_name):
mock_class_name.side_effect = Exception("Test exception")
operator = MagicMock()
assert get_operator_provider_version(operator) is None
def test_get_operator_provider_version_for_core_operator():
"""Test that get_operator_provider_version returns None for core operators."""
operator = BaseOperator(task_id="test_task")
result = get_operator_provider_version(operator)
assert result is None
@patch("airflow.providers_manager.ProvidersManager")
def test_get_operator_provider_version_for_provider_operator(mock_providers_manager):
"""Test that get_operator_provider_version returns version for provider operators."""
# Mock ProvidersManager
mock_manager_instance = MagicMock()
mock_providers_manager.return_value = mock_manager_instance
# Mock providers data
mock_manager_instance.providers = {
"apache-airflow-providers-standard": MagicMock(version="1.2.0"),
"apache-airflow-providers-amazon": MagicMock(version="8.12.0"),
"apache-airflow-providers-google": MagicMock(version="10.5.0"),
}
# Test with BashOperator (standard provider)
operator = BashOperator(task_id="test_task", bash_command="echo test")
result = get_operator_provider_version(operator)
assert result == "1.2.0"
@patch("airflow.providers_manager.ProvidersManager")
def test_get_operator_provider_version_provider_not_found(mock_providers_manager):
"""Test that get_operator_provider_version returns None when provider is not found."""
# Mock ProvidersManager with no matching provider
mock_manager_instance = MagicMock()
mock_providers_manager.return_value = mock_manager_instance
mock_manager_instance.providers = {
"apache-airflow-providers-amazon": MagicMock(version="8.12.0"),
"apache-airflow-providers-google": MagicMock(version="10.5.0"),
}
operator = BashOperator(task_id="test_task", bash_command="echo test")
result = get_operator_provider_version(operator)
assert result is None
def test_get_operator_provider_version_for_custom_operator():
"""Test that get_operator_provider_version returns None for custom operators."""
# Create a custom operator that doesn't belong to any provider
class CustomOperator(BaseOperator):
def execute(self, context):
pass
operator = CustomOperator(task_id="test_task")
result = get_operator_provider_version(operator)
assert result is None
@patch("airflow.providers_manager.ProvidersManager")
def test_get_operator_provider_version_for_mapped_operator(mock_providers_manager):
"""Test that get_operator_provider_version works with mapped operators."""
# Mock ProvidersManager
mock_manager_instance = MagicMock()
mock_providers_manager.return_value = mock_manager_instance
# Mock providers data
mock_manager_instance.providers = {
"apache-airflow-providers-standard": MagicMock(version="1.2.0"),
"apache-airflow-providers-amazon": MagicMock(version="8.12.0"),
}
# Test with mapped BashOperator (standard provider)
mapped_operator = BashOperator.partial(task_id="test_task").expand(bash_command=["echo 1", "echo 2"])
result = get_operator_provider_version(mapped_operator)
assert result == "1.2.0"
| TestDagInfoAirflow3 |
python | davidhalter__jedi | test/completion/descriptors.py | {
"start": 3130,
"end": 3421
} | class ____():
def __init__(self, x):
self.x = x
@Memoize
def some_func(self):
return self.x
#? int()
MemoizeTest(10).some_func()
# Now also call the same function over the class (see if clause above).
#? float()
MemoizeTest.some_func(MemoizeTest(10.0))
| MemoizeTest |
python | huggingface__transformers | src/transformers/models/swinv2/modeling_swinv2.py | {
"start": 26278,
"end": 26923
} | class ____(nn.Module):
def __init__(self, config, dim):
super().__init__()
self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->Swinv2
| Swinv2Intermediate |
python | skorch-dev__skorch | skorch/tests/callbacks/test_training.py | {
"start": 35220,
"end": 39935
} | class ____:
@pytest.fixture(params=['torch', 'safetensors'])
def use_safetensors(self, request):
return request.param == 'safetensors'
@pytest.fixture
def checkpoint_cls(self):
from skorch.callbacks import Checkpoint
return Checkpoint
@pytest.fixture
def loadinitstate_cls(self):
from skorch.callbacks import LoadInitState
return LoadInitState
@pytest.fixture
def net_cls(self):
"""very simple network that trains for 10 epochs"""
from skorch import NeuralNetRegressor
from skorch.toy import make_regressor
module_cls = make_regressor(
input_units=1,
num_hidden=0,
output_units=1,
)
return partial(
NeuralNetRegressor,
module=module_cls,
max_epochs=10,
batch_size=10)
@pytest.fixture(scope='module')
def data(self):
# have 10 examples so we can do a nice CV split
X = np.zeros((10, 1), dtype='float32')
y = np.zeros((10, 1), dtype='float32')
return X, y
def test_load_initial_state(
self, checkpoint_cls, net_cls, loadinitstate_cls,
data, tmpdir, use_safetensors):
skorch_dir = tmpdir.mkdir('skorch')
f_params = skorch_dir.join('params.pt')
f_optimizer = skorch_dir.join('optimizer.pt')
f_criterion = skorch_dir.join('criterion.pt')
f_history = skorch_dir.join('history.json')
kwargs = dict(
monitor=None,
f_params=str(f_params),
f_optimizer=str(f_optimizer),
f_criterion=str(f_criterion),
f_history=str(f_history),
use_safetensors=use_safetensors,
)
if use_safetensors:
# safetensors cannot safe optimizers
kwargs['f_optimizer'] = None
cp = checkpoint_cls(**kwargs)
load_init_state = loadinitstate_cls(cp, use_safetensors=use_safetensors)
net = net_cls(callbacks=[cp, load_init_state])
net.fit(*data)
assert f_params.exists()
assert f_criterion.exists()
assert f_history.exists()
if not use_safetensors:
# safetensors cannot safe optimizers
assert f_optimizer.exists()
assert len(net.history) == 10
del net
new_net = net_cls(callbacks=[cp, load_init_state])
new_net.fit(*data)
assert len(new_net.history) == 20
def test_load_initial_state_custom_scoring(
self, checkpoint_cls, net_cls, loadinitstate_cls,
data, tmpdir, use_safetensors):
def epoch_3_scorer(net, *_):
return 1 if net.history[-1, 'epoch'] == 3 else 0
from skorch.callbacks import EpochScoring
scoring = EpochScoring(
scoring=epoch_3_scorer, on_train=True, lower_is_better=False)
skorch_dir = tmpdir.mkdir('skorch')
f_params = skorch_dir.join(
'model_epoch_{last_epoch[epoch]}.pt')
f_optimizer = skorch_dir.join(
'optimizer_epoch_{last_epoch[epoch]}.pt')
f_criterion = skorch_dir.join(
'criterion_epoch_{last_epoch[epoch]}.pt')
f_history = skorch_dir.join(
'history.json')
kwargs = dict(
monitor='epoch_3_scorer_best',
f_params=str(f_params),
f_optimizer=str(f_optimizer),
f_criterion=str(f_criterion),
f_history=str(f_history),
use_safetensors=use_safetensors,
)
if use_safetensors:
# safetensors cannot safe optimizers
kwargs['f_optimizer'] = None
cp = checkpoint_cls(**kwargs)
load_init_state = loadinitstate_cls(cp, use_safetensors=use_safetensors)
net = net_cls(callbacks=[load_init_state, scoring, cp])
net.fit(*data)
assert skorch_dir.join('model_epoch_3.pt').exists()
assert skorch_dir.join('criterion_epoch_3.pt').exists()
assert skorch_dir.join('history.json').exists()
if not use_safetensors:
# safetensors cannot safe optimizers
assert skorch_dir.join('optimizer_epoch_3.pt').exists()
assert len(net.history) == 10
del net
new_net = net_cls(callbacks=[load_init_state, scoring, cp])
new_net.fit(*data)
# new_net starts from the best epoch of the first run
# the best epcoh of the previous run was at epoch 3
# the second run went through 10 epochs, thus
# 3 + 10 = 13
assert len(new_net.history) == 13
assert new_net.history[:, 'event_cp'] == [
True, False, True] + [False] * 10
| TestLoadInitState |
python | dask__distributed | distributed/protocol/tests/test_serialize.py | {
"start": 9900,
"end": 19239
} | class ____:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def my_dumps(obj, context=None):
if type(obj).__name__ == "MyObject":
header = {"serializer": "my-ser"}
frames = [
msgpack.dumps(obj.__dict__, use_bin_type=True),
msgpack.dumps(context, use_bin_type=True),
]
return header, frames
else:
raise NotImplementedError()
def my_loads(header, frames):
obj = MyObject(**msgpack.loads(frames[0], raw=False))
# to provide something to test against, lets just attach the context to
# the object itself
obj.context = msgpack.loads(frames[1], raw=False)
return obj
@gen_cluster(
client=True,
client_kwargs={"serializers": ["my-ser", "pickle"]},
worker_kwargs={"serializers": ["my-ser", "pickle"]},
)
async def test_context_specific_serialization(c, s, a, b):
register_serialization_family("my-ser", my_dumps, my_loads)
try:
# Create the object on A, force communication to B
x = c.submit(MyObject, x=1, y=2, workers=a.address)
y = c.submit(lambda x: x, x, workers=b.address)
await wait(y)
key = y.key
def check(dask_worker):
# Get the context from the object stored on B
my_obj = dask_worker.data[key]
return my_obj.context
result = await c.run(check, workers=[b.address])
expected = {"sender": a.address, "recipient": b.address}
assert result[b.address]["sender"]["address"] == a.address # see origin worker
z = await y # bring object to local process
assert z.x == 1 and z.y == 2
assert z.context["sender"]["address"] == b.address
finally:
from distributed.protocol.serialize import families
del families["my-ser"]
@gen_cluster(client=True)
async def test_context_specific_serialization_class(c, s, a, b):
register_serialization(MyObject, my_dumps, my_loads)
# Create the object on A, force communication to B
x = c.submit(MyObject, x=1, y=2, workers=a.address)
y = c.submit(lambda x: x, x, workers=b.address)
await wait(y)
key = y.key
def check(dask_worker):
# Get the context from the object stored on B
my_obj = dask_worker.data[key]
return my_obj.context
result = await c.run(check, workers=[b.address])
assert result[b.address]["sender"]["address"] == a.address # see origin worker
z = await y # bring object to local process
assert z.x == 1 and z.y == 2
assert z.context["sender"]["address"] == b.address
def test_serialize_raises():
class Foo:
pass
@dask_serialize.register(Foo)
def dumps(f):
raise Exception("Hello-123")
with pytest.raises(Exception) as info:
deserialize(*serialize(Foo()))
assert "Hello-123" in str(info.value)
@gen_test()
@pytest.mark.parametrize("n", range(100, 600, 50))
async def test_deeply_nested_structures(n):
"""sizeof() raises RecursionError at ~140 recursion depth.
msgpack doesn't raise until 512 (sometimes 256 depending on compile options).
These thresholds change substantially between python versions, msgpack versions, and
platforms.
Test that when sizeof() starts failing, things keep working until msgpack fails.
"""
original = outer = {}
inner = {}
for _ in range(n):
outer["children"] = inner
outer, inner = inner, {}
try:
await to_frames(original)
except ValueError as e:
# msgpack failed
assert "recursion limit exceeded" in str(e)
def test_different_compression_families():
"""Test serialization of a collection of items that use different compression
This scenario happens for instance when serializing collections of
cupy and numpy arrays.
"""
class MyObjWithCompression:
pass
class MyObjWithNoCompression:
pass
def my_dumps_compression(obj, context=None):
if not isinstance(obj, MyObjWithCompression):
raise NotImplementedError()
header = {"compression": [True]}
return header, [bytes(2**20)]
def my_dumps_no_compression(obj, context=None):
if not isinstance(obj, MyObjWithNoCompression):
raise NotImplementedError()
header = {"compression": [False]}
return header, [bytes(2**20)]
def my_loads(header, frames):
return pickle.loads(frames[0])
register_serialization_family("with-compression", my_dumps_compression, my_loads)
register_serialization_family("no-compression", my_dumps_no_compression, my_loads)
header, _ = serialize(
[MyObjWithCompression(), MyObjWithNoCompression()],
serializers=("with-compression", "no-compression"),
on_error="raise",
iterate_collection=True,
)
assert header["compression"] == [True, False]
@gen_test()
async def test_frame_split():
data = b"1234abcd" * (2**20) # 8 MiB
assert dask.sizeof.sizeof(data) == dask.utils.parse_bytes("8MiB")
size = dask.utils.parse_bytes("3MiB")
split_frames = await to_frames({"x": to_serialize(data)}, frame_split_size=size)
print(split_frames)
assert len(split_frames) == 3 + 2 # Three splits and two headers
size = dask.utils.parse_bytes("5MiB")
split_frames = await to_frames({"x": to_serialize(data)}, frame_split_size=size)
assert len(split_frames) == 2 + 2 # Two splits and two headers
@pytest.mark.parametrize(
"data,is_serializable",
[
([], False),
({}, False),
({i: i for i in range(10)}, False),
(set(range(10)), False),
(tuple(range(100)), False),
({"x": MyObj(5)}, True),
({"x": {"y": MyObj(5)}}, True),
pytest.param(
[1, MyObj(5)],
True,
marks=pytest.mark.xfail(reason="Only checks 0th element for now."),
),
([MyObj([0, 1, 2]), 1], True),
(tuple([MyObj(None)]), True),
({("x", i): MyObj(5) for i in range(100)}, True),
(memoryview(b"hello"), True),
pytest.param(
memoryview(
np.random.random((3, 4)) # type: ignore
if np is not None
else b"skip np.random"
),
True,
marks=pytest.mark.skipif(np is None, reason="Test needs numpy"),
),
],
)
def test_check_dask_serializable(data, is_serializable):
result = check_dask_serializable(data)
expected = is_serializable
assert result == expected
@pytest.mark.parametrize(
"serializers",
[["msgpack"], ["pickle"], ["msgpack", "pickle"], ["pickle", "msgpack"]],
)
def test_serialize_lists(serializers):
data_in = ["a", 2, "c", None, "e", 6]
header, frames = serialize(data_in, serializers=serializers)
data_out = deserialize(header, frames)
assert data_in == data_out
@pytest.mark.parametrize(
"data_in",
[
memoryview(b"hello"),
pytest.param(
memoryview(
np.random.random((3, 4)) # type: ignore
if np is not None
else b"skip np.random"
),
marks=pytest.mark.skipif(np is None, reason="Test needs numpy"),
),
],
)
def test_deser_memoryview(data_in):
header, frames = serialize(data_in)
assert header["type"] == "memoryview"
assert frames[0] is data_in
data_out = deserialize(header, frames)
assert data_in == data_out
@pytest.mark.skipif(np is None, reason="Test needs numpy")
def test_ser_memoryview_object():
data_in = memoryview(np.array(["hello"], dtype=object))
with pytest.raises(TypeError):
serialize(data_in, on_error="raise")
def test_ser_empty_1d_memoryview():
mv = memoryview(b"")
# serialize empty `memoryview`
header, frames = serialize(mv)
assert frames[0] == mv
# deserialize empty `memoryview`
mv2 = deserialize(header, frames)
assert type(mv2) == type(mv)
assert mv2.format == mv.format
assert mv2 == mv
def test_ser_empty_nd_memoryview():
mv = memoryview(b"12").cast("B", (1, 2))[:0]
# serialize empty `memoryview`
with pytest.raises(TypeError):
serialize(mv, on_error="raise")
@gen_cluster(client=True, Worker=Nanny)
async def test_large_pickled_object(c, s, a, b):
np = pytest.importorskip("numpy")
class Data:
def __init__(self, n):
self.data = np.empty(n, dtype="u1")
x = Data(100_000_000)
y = await c.scatter(x, workers=[a.worker_address])
z = c.submit(lambda x: x, y, workers=[b.worker_address])
await z
def test__is_msgpack_serializable():
assert _is_msgpack_serializable(None)
assert _is_msgpack_serializable("a")
assert _is_msgpack_serializable(1)
assert _is_msgpack_serializable(1.0)
assert _is_msgpack_serializable(b"0")
assert _is_msgpack_serializable({"a": "b"})
assert _is_msgpack_serializable(["a"])
assert _is_msgpack_serializable(("a",))
class C:
def __hash__(self):
return 5
assert not _is_msgpack_serializable(["a", C()])
assert not _is_msgpack_serializable(("a", C()))
assert not _is_msgpack_serializable(C())
assert not _is_msgpack_serializable({C(): "foo"})
assert not _is_msgpack_serializable({"foo": C()})
| MyObject |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 535583,
"end": 536805
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("contributions", "repository")
contributions = sgqlc.types.Field(
sgqlc.types.non_null(CreatedPullRequestContributionConnection),
graphql_name="contributions",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"order_by",
sgqlc.types.Arg(
ContributionOrder,
graphql_name="orderBy",
default={"direction": "DESC"},
),
),
)
),
)
repository = sgqlc.types.Field(
sgqlc.types.non_null("Repository"), graphql_name="repository"
)
| PullRequestContributionsByRepository |
python | huggingface__transformers | src/transformers/models/detr/image_processing_detr_fast.py | {
"start": 9456,
"end": 40321
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
format = AnnotationFormat.COCO_DETECTION
do_resize = True
do_rescale = True
do_normalize = True
do_pad = True
size = {"shortest_edge": 800, "longest_edge": 1333}
default_to_square = False
model_input_names = ["pixel_values", "pixel_mask"]
valid_kwargs = DetrImageProcessorKwargs
def __init__(self, **kwargs: Unpack[DetrImageProcessorKwargs]) -> None:
kwargs.setdefault("do_pad", kwargs.pop("pad_and_return_pixel_mask", self.do_pad))
size = kwargs.pop("size", None)
max_size = None if size is None else kwargs.pop("max_size", 1333)
size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333}
self.size = get_size_dict(size, max_size=max_size, default_to_square=False)
# Backwards compatibility
do_convert_annotations = kwargs.get("do_convert_annotations")
do_normalize = kwargs.get("do_normalize")
if do_convert_annotations is None and getattr(self, "do_convert_annotations", None) is None:
self.do_convert_annotations = do_normalize if do_normalize is not None else self.do_normalize
super().__init__(**kwargs)
def prepare_annotation(
self,
image: torch.Tensor,
target: dict,
format: Optional[AnnotationFormat] = None,
return_segmentation_masks: Optional[bool] = None,
masks_path: Optional[Union[str, pathlib.Path]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> dict:
"""
Prepare an annotation for feeding into DETR model.
"""
format = format if format is not None else self.format
if format == AnnotationFormat.COCO_DETECTION:
return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks
target = prepare_coco_detection_annotation(
image, target, return_segmentation_masks, input_data_format=input_data_format
)
elif format == AnnotationFormat.COCO_PANOPTIC:
return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks
target = prepare_coco_panoptic_annotation(
image,
target,
masks_path=masks_path,
return_masks=return_segmentation_masks,
input_data_format=input_data_format,
)
else:
raise ValueError(f"Format {format} is not supported.")
return target
def resize(
self,
image: torch.Tensor,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"] = None,
**kwargs,
) -> torch.Tensor:
"""
Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an
int, smaller edge of the image will be matched to this number.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Size of the image's `(height, width)` dimensions after resizing. Available options are:
- `{"height": int, "width": int}`: The image will be resized to the exact size `(height, width)`.
Do NOT keep the aspect ratio.
- `{"shortest_edge": int, "longest_edge": int}`: The image will be resized to a maximum size respecting
the aspect ratio and keeping the shortest edge less or equal to `shortest_edge` and the longest edge
less or equal to `longest_edge`.
- `{"max_height": int, "max_width": int}`: The image will be resized to the maximum size respecting the
aspect ratio and keeping the height less or equal to `max_height` and the width less or equal to
`max_width`.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
Resampling filter to use if resizing the image.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR
if size.shortest_edge and size.longest_edge:
# Resize the image so that the shortest edge or the longest edge is of the given size
# while maintaining the aspect ratio of the original image.
new_size = get_size_with_aspect_ratio(
image.size()[-2:],
size["shortest_edge"],
size["longest_edge"],
)
elif size.max_height and size.max_width:
new_size = get_image_size_for_max_height_width(image.size()[-2:], size["max_height"], size["max_width"])
elif size.height and size.width:
new_size = (size["height"], size["width"])
else:
raise ValueError(
"Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got"
f" {size.keys()}."
)
image = F.resize(
image,
size=new_size,
interpolation=interpolation,
**kwargs,
)
return image
def resize_annotation(
self,
annotation: dict[str, Any],
orig_size: tuple[int, int],
target_size: tuple[int, int],
threshold: float = 0.5,
interpolation: Optional["F.InterpolationMode"] = None,
):
"""
Resizes an annotation to a target size.
Args:
annotation (`dict[str, Any]`):
The annotation dictionary.
orig_size (`tuple[int, int]`):
The original size of the input image.
target_size (`tuple[int, int]`):
The target size of the image, as returned by the preprocessing `resize` step.
threshold (`float`, *optional*, defaults to 0.5):
The threshold used to binarize the segmentation masks.
resample (`InterpolationMode`, defaults to `F.InterpolationMode.NEAREST_EXACT`):
The resampling filter to use when resizing the masks.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.NEAREST_EXACT
ratio_height, ratio_width = [target / orig for target, orig in zip(target_size, orig_size)]
new_annotation = {}
new_annotation["size"] = target_size
for key, value in annotation.items():
if key == "boxes":
boxes = value
scaled_boxes = boxes * torch.as_tensor(
[ratio_width, ratio_height, ratio_width, ratio_height], dtype=torch.float32, device=boxes.device
)
new_annotation["boxes"] = scaled_boxes
elif key == "area":
area = value
scaled_area = area * (ratio_width * ratio_height)
new_annotation["area"] = scaled_area
elif key == "masks":
masks = value[:, None]
masks = [F.resize(mask, target_size, interpolation=interpolation) for mask in masks]
masks = torch.stack(masks).to(torch.float32)
masks = masks[:, 0] > threshold
new_annotation["masks"] = masks
elif key == "size":
new_annotation["size"] = target_size
else:
new_annotation[key] = value
return new_annotation
def normalize_annotation(self, annotation: dict, image_size: tuple[int, int]) -> dict:
image_height, image_width = image_size
norm_annotation = {}
for key, value in annotation.items():
if key == "boxes":
boxes = value
boxes = corners_to_center_format(boxes)
boxes /= torch.as_tensor(
[image_width, image_height, image_width, image_height], dtype=torch.float32, device=boxes.device
)
norm_annotation[key] = boxes
else:
norm_annotation[key] = value
return norm_annotation
def _update_annotation_for_padded_image(
self,
annotation: dict,
input_image_size: tuple[int, int],
output_image_size: tuple[int, int],
padding,
update_bboxes,
) -> dict:
"""
Update the annotation for a padded image.
"""
new_annotation = {}
new_annotation["size"] = output_image_size
ratio_height, ratio_width = (input / output for output, input in zip(output_image_size, input_image_size))
for key, value in annotation.items():
if key == "masks":
masks = value
masks = F.pad(
masks,
padding,
fill=0,
)
masks = safe_squeeze(masks, 1)
new_annotation["masks"] = masks
elif key == "boxes" and update_bboxes:
boxes = value
boxes *= torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height], device=boxes.device)
new_annotation["boxes"] = boxes
elif key == "size":
new_annotation["size"] = output_image_size
else:
new_annotation[key] = value
return new_annotation
def pad(
self,
image: torch.Tensor,
padded_size: tuple[int, int],
annotation: Optional[dict[str, Any]] = None,
update_bboxes: bool = True,
fill: int = 0,
):
original_size = image.size()[-2:]
padding_bottom = padded_size[0] - original_size[0]
padding_right = padded_size[1] - original_size[1]
if padding_bottom < 0 or padding_right < 0:
raise ValueError(
f"Padding dimensions are negative. Please make sure that the padded size is larger than the "
f"original size. Got padded size: {padded_size}, original size: {original_size}."
)
if original_size != padded_size:
padding = [0, 0, padding_right, padding_bottom]
image = F.pad(image, padding, fill=fill)
if annotation is not None:
annotation = self._update_annotation_for_padded_image(
annotation, original_size, padded_size, padding, update_bboxes
)
# Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
pixel_mask = torch.zeros(padded_size, dtype=torch.int64, device=image.device)
pixel_mask[: original_size[0], : original_size[1]] = 1
return image, pixel_mask, annotation
def _preprocess(
self,
images: list["torch.Tensor"],
annotations: Optional[Union[AnnotationType, list[AnnotationType]]],
masks_path: Optional[Union[str, pathlib.Path]],
return_segmentation_masks: bool,
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
do_convert_annotations: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
do_pad: bool,
pad_size: Optional[SizeDict],
format: Optional[Union[str, AnnotationFormat]],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
"""
Preprocess an image or a batch of images so that it can be used by the model.
"""
if annotations is not None and isinstance(annotations, dict):
annotations = [annotations]
if annotations is not None and len(images) != len(annotations):
raise ValueError(
f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match."
)
format = AnnotationFormat(format)
if annotations is not None:
validate_annotations(format, SUPPORTED_ANNOTATION_FORMATS, annotations)
if (
masks_path is not None
and format == AnnotationFormat.COCO_PANOPTIC
and not isinstance(masks_path, (pathlib.Path, str))
):
raise ValueError(
"The path to the directory containing the mask PNG files should be provided as a"
f" `pathlib.Path` or string object, but is {type(masks_path)} instead."
)
data = {}
processed_images = []
processed_annotations = []
pixel_masks = [] # Initialize pixel_masks here
for image, annotation in zip(images, annotations if annotations is not None else [None] * len(images)):
# prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image)
if annotations is not None:
annotation = self.prepare_annotation(
image,
annotation,
format,
return_segmentation_masks=return_segmentation_masks,
masks_path=masks_path,
input_data_format=ChannelDimension.FIRST,
)
if do_resize:
resized_image = self.resize(image, size=size, interpolation=interpolation)
if annotations is not None:
annotation = self.resize_annotation(
annotation,
orig_size=image.size()[-2:],
target_size=resized_image.size()[-2:],
)
image = resized_image
# Fused rescale and normalize
image = self.rescale_and_normalize(image, do_rescale, rescale_factor, do_normalize, image_mean, image_std)
if do_convert_annotations and annotations is not None:
annotation = self.normalize_annotation(annotation, get_image_size(image, ChannelDimension.FIRST))
processed_images.append(image)
processed_annotations.append(annotation)
images = processed_images
annotations = processed_annotations if annotations is not None else None
if do_pad:
# depends on all resized image shapes so we need another loop
if pad_size is not None:
padded_size = (pad_size.height, pad_size.width)
else:
padded_size = get_max_height_width(images)
padded_images = []
padded_annotations = []
for image, annotation in zip(images, annotations if annotations is not None else [None] * len(images)):
# Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...}
if padded_size == image.size()[-2:]:
padded_images.append(image)
pixel_masks.append(torch.ones(padded_size, dtype=torch.int64, device=image.device))
padded_annotations.append(annotation)
continue
image, pixel_mask, annotation = self.pad(
image, padded_size, annotation=annotation, update_bboxes=do_convert_annotations
)
padded_images.append(image)
padded_annotations.append(annotation)
pixel_masks.append(pixel_mask)
images = padded_images
annotations = padded_annotations if annotations is not None else None
data.update({"pixel_mask": torch.stack(pixel_masks, dim=0)})
data.update({"pixel_values": torch.stack(images, dim=0)})
encoded_inputs = BatchFeature(data, tensor_type=return_tensors)
if annotations is not None:
encoded_inputs["labels"] = [
BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations
]
return encoded_inputs
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_object_detection
def post_process_object_detection(
self, outputs, threshold: float = 0.5, target_sizes: Optional[Union[TensorType, list[tuple]]] = None
):
"""
Converts the raw output of [`DetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
if target_sizes is not None:
if len(out_logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
prob = nn.functional.softmax(out_logits, -1)
scores, labels = prob[..., :-1].max(-1)
# Convert to [x0, y0, x1, y1] format
boxes = center_to_corners_format(out_bbox)
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
if isinstance(target_sizes, list):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = []
for s, l, b in zip(scores, labels, boxes):
score = s[s > threshold]
label = l[s > threshold]
box = b[s > threshold]
results.append({"scores": score, "labels": label, "boxes": box})
return results
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_semantic_segmentation
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple[int, int]]] = None):
"""
Converts the output of [`DetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch.
Args:
outputs ([`DetrForSegmentation`]):
Raw outputs of the model.
target_sizes (`list[tuple[int, int]]`, *optional*):
A list of tuples (`tuple[int, int]`) containing the target size (height, width) of each image in the
batch. If unset, predictions will not be resized.
Returns:
`list[torch.Tensor]`:
A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width)
corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each
`torch.Tensor` correspond to a semantic class id.
"""
class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
# Remove the null class `[..., :-1]`
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Semantic segmentation logits of shape (batch_size, num_classes, height, width)
segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
batch_size = class_queries_logits.shape[0]
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if batch_size != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
semantic_segmentation = []
for idx in range(batch_size):
resized_logits = nn.functional.interpolate(
segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = segmentation.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_instance_segmentation
def post_process_instance_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
target_sizes: Optional[list[tuple[int, int]]] = None,
return_coco_annotation: Optional[bool] = False,
) -> list[dict]:
"""
Converts the output of [`DetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch.
Args:
outputs ([`DetrForSegmentation`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction. If unset, predictions will not be resized.
return_coco_annotation (`bool`, *optional*):
Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE)
format.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or
`list[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to
`True`. Set to `None` if no mask if found above `threshold`.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- An integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Predicted label and score of each query (batch_size, num_queries)
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
)
# No mask found
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
# Get segmentation map and segment information of batch item
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(
mask_probs=mask_probs_item,
pred_scores=pred_scores_item,
pred_labels=pred_labels_item,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
label_ids_to_fuse=[],
target_size=target_size,
)
# Return segmentation map in run-length encoding (RLE) format
if return_coco_annotation:
segmentation = convert_segmentation_to_rle(segmentation)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
# Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_panoptic_segmentation
def post_process_panoptic_segmentation(
self,
outputs,
threshold: float = 0.5,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
label_ids_to_fuse: Optional[set[int]] = None,
target_sizes: Optional[list[tuple[int, int]]] = None,
) -> list[dict]:
"""
Converts the output of [`DetrForSegmentation`] into image panoptic segmentation predictions. Only supports
PyTorch.
Args:
outputs ([`DetrForSegmentation`]):
The outputs from [`DetrForSegmentation`].
threshold (`float`, *optional*, defaults to 0.5):
The probability score threshold to keep predicted instance masks.
mask_threshold (`float`, *optional*, defaults to 0.5):
Threshold to use when turning the predicted masks into binary values.
overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8):
The overlap mask area threshold to merge or discard small disconnected parts within each binary
instance mask.
label_ids_to_fuse (`Set[int]`, *optional*):
The labels in this state will have all their instances be fused together. For instance we could say
there can only be one sky in an image, but several persons, so the label ID for sky would be in that
set, but not the one for person.
target_sizes (`list[Tuple]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]]`) corresponds to the requested
final size (height, width) of each prediction in batch. If unset, predictions will not be resized.
Returns:
`list[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys:
- **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or
`None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to
the corresponding `target_sizes` entry.
- **segments_info** -- A dictionary that contains additional information on each segment.
- **id** -- an integer representing the `segment_id`.
- **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`.
- **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise.
Multiple instances of the same class / label were fused and assigned a single `segment_id`.
- **score** -- Prediction score of segment with `segment_id`.
"""
if label_ids_to_fuse is None:
logger.warning_once("`label_ids_to_fuse` unset. No instance will be fused.")
label_ids_to_fuse = set()
class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1]
masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width]
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
# Predicted label and score of each query (batch_size, num_queries)
pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1)
# Loop over items in batch size
results: list[dict[str, TensorType]] = []
for i in range(batch_size):
mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects(
mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels
)
# No mask found
if mask_probs_item.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
# Get segmentation map and segment information of batch item
target_size = target_sizes[i] if target_sizes is not None else None
segmentation, segments = compute_segments(
mask_probs=mask_probs_item,
pred_scores=pred_scores_item,
pred_labels=pred_labels_item,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
label_ids_to_fuse=label_ids_to_fuse,
target_size=target_size,
)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
__all__ = ["DetrImageProcessorFast"]
| DetrImageProcessorFast |
python | oauthlib__oauthlib | oauthlib/openid/connect/core/exceptions.py | {
"start": 408,
"end": 824
} | class ____(OpenIDClientError):
"""
The Authorization Server requires End-User interaction to proceed.
This error MAY be returned when the prompt parameter value in the
Authentication Request is none, but the Authentication Request cannot be
completed without displaying a user interface for End-User interaction.
"""
error = 'interaction_required'
status_code = 401
| InteractionRequired |
python | streamlit__streamlit | lib/streamlit/elements/widgets/audio_input.py | {
"start": 3298,
"end": 12411
} | class ____:
@gather_metrics("audio_input")
def audio_input(
self,
label: str,
*,
sample_rate: int | None = 16000,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
) -> UploadedFile | None:
r"""Display a widget that returns an audio recording from the user's microphone.
Parameters
----------
label : str
A short label explaining to the user what this widget is used for.
The label can optionally contain GitHub-flavored Markdown of the
following types: Bold, Italics, Strikethroughs, Inline Code, Links,
and Images. Images display like icons, with a max height equal to
the font height.
Unsupported Markdown elements are unwrapped so only their children
(text contents) render. Display unsupported elements as literal
characters by backslash-escaping them. E.g.,
``"1\. Not an ordered list"``.
See the ``body`` parameter of |st.markdown|_ for additional,
supported Markdown directives.
For accessibility reasons, you should never set an empty label, but
you can hide it with ``label_visibility`` if needed. In the future,
we may disallow empty labels by raising an exception.
.. |st.markdown| replace:: ``st.markdown``
.. _st.markdown: https://docs.streamlit.io/develop/api-reference/text/st.markdown
sample_rate : int or None
The target sample rate for the audio recording in Hz.
This defaults to 16000 Hz, which is optimal for speech recognition.
The following sample rates are supported: 8000, 11025, 16000,
22050, 24000, 32000, 44100, or 48000. If this is ``None``, the
widget uses the browser's default sample rate (typically 44100 or
48000 Hz).
key : str or int
An optional string or integer to use as the unique key for the widget.
If this is omitted, a key will be generated for the widget
based on its content. No two widgets may have the same key.
help : str or None
A tooltip that gets displayed next to the widget label. Streamlit
only displays the tooltip when ``label_visibility="visible"``. If
this is ``None`` (default), no tooltip is displayed.
The tooltip can optionally contain GitHub-flavored Markdown,
including the Markdown directives described in the ``body``
parameter of ``st.markdown``.
on_change : callable
An optional callback invoked when this audio input's value
changes.
args : list or tuple
An optional list or tuple of args to pass to the callback.
kwargs : dict
An optional dict of kwargs to pass to the callback.
disabled : bool
An optional boolean that disables the audio input if set to
``True``. Default is ``False``.
label_visibility : "visible", "hidden", or "collapsed"
The visibility of the label. The default is ``"visible"``. If this
is ``"hidden"``, Streamlit displays an empty spacer instead of the
label, which can help keep the widget aligned with other widgets.
If this is ``"collapsed"``, Streamlit displays no label or spacer.
width : "stretch" or int
The width of the audio input widget. This can be one of the following:
- ``"stretch"`` (default): The width of the widget matches the
width of the parent container.
- An integer specifying the width in pixels: The widget has a
fixed width. If the specified width is greater than the width of
the parent container, the width of the widget matches the width
of the parent container.
Returns
-------
None or UploadedFile
The ``UploadedFile`` class is a subclass of ``BytesIO``, and
therefore is "file-like". This means you can pass an instance of it
anywhere a file is expected. The MIME type for the audio data is
``audio/wav``.
.. Note::
The resulting ``UploadedFile`` is subject to the size
limitation configured in ``server.maxUploadSize``. If you
expect large sound files, update the configuration option
appropriately.
Examples
--------
*Example 1:* Record a voice message and play it back.*
The default sample rate of 16000 Hz is optimal for speech recognition.
>>> import streamlit as st
>>>
>>> audio_value = st.audio_input("Record a voice message")
>>>
>>> if audio_value:
... st.audio(audio_value)
.. output::
https://doc-audio-input.streamlit.app/
height: 260px
*Example 2:* Record high-fidelity audio and play it back.*
Higher sample rates can create higher-quality, larger audio files. This
might require a nicer microphone to fully appreciate the difference.
>>> import streamlit as st
>>>
>>> audio_value = st.audio_input("Record high quality audio", sample_rate=48000)
>>>
>>> if audio_value:
... st.audio(audio_value)
.. output::
https://doc-audio-input-high-rate.streamlit.app/
height: 260px
"""
# Validate sample_rate parameter
if sample_rate is not None and sample_rate not in ALLOWED_SAMPLE_RATES:
raise StreamlitAPIException(
f"Invalid sample_rate: {sample_rate}. "
f"Must be one of {sorted(ALLOWED_SAMPLE_RATES)} Hz, or None for browser default."
)
ctx = get_script_run_ctx()
return self._audio_input(
label=label,
sample_rate=sample_rate,
key=key,
help=help,
on_change=on_change,
args=args,
kwargs=kwargs,
disabled=disabled,
label_visibility=label_visibility,
width=width,
ctx=ctx,
)
def _audio_input(
self,
label: str,
sample_rate: int | None = 16000,
key: Key | None = None,
help: str | None = None,
on_change: WidgetCallback | None = None,
args: WidgetArgs | None = None,
kwargs: WidgetKwargs | None = None,
*, # keyword-only arguments:
disabled: bool = False,
label_visibility: LabelVisibility = "visible",
width: WidthWithoutContent = "stretch",
ctx: ScriptRunContext | None = None,
) -> UploadedFile | None:
key = to_key(key)
check_widget_policies(
self.dg,
key,
on_change,
default_value=None,
writes_allowed=False,
)
maybe_raise_label_warnings(label, label_visibility)
element_id = compute_and_register_element_id(
"audio_input",
user_key=key,
# Treat the provided key as the main identity.
key_as_main_identity=True,
dg=self.dg,
label=label,
help=help,
width=width,
sample_rate=sample_rate,
)
audio_input_proto = AudioInputProto()
audio_input_proto.id = element_id
audio_input_proto.label = label
audio_input_proto.form_id = current_form_id(self.dg)
audio_input_proto.disabled = disabled
audio_input_proto.label_visibility.value = get_label_visibility_proto_value(
label_visibility
)
# Set sample_rate in protobuf if specified
if sample_rate is not None:
audio_input_proto.sample_rate = sample_rate
if label and help is not None:
audio_input_proto.help = dedent(help)
validate_width(width)
layout_config = LayoutConfig(width=width)
serde = AudioInputSerde()
audio_input_state = register_widget(
audio_input_proto.id,
on_change_handler=on_change,
args=args,
kwargs=kwargs,
deserializer=serde.deserialize,
serializer=serde.serialize,
ctx=ctx,
value_type="file_uploader_state_value",
)
self.dg._enqueue("audio_input", audio_input_proto, layout_config=layout_config)
if isinstance(audio_input_state.value, DeletedFile):
return None
return audio_input_state.value
@property
def dg(self) -> DeltaGenerator:
"""Get our DeltaGenerator."""
return cast("DeltaGenerator", self)
| AudioInputMixin |
python | google__pytype | pytype/rewrite/function_call_helper_test.py | {
"start": 2187,
"end": 2799
} | class ____(TestBase):
def test_build(self):
code = test_utils.parse('def C(): pass').consts[0]
builder = abstract.InterpreterFunction(
ctx=self.ctx,
name='C',
code=code,
enclosing_scope=(),
parent_frame=self.helper._frame,
)
args = abstract.Args(
posargs=(builder.to_variable(), self.ctx.consts['C'].to_variable()),
frame=self.helper._frame,
)
self.helper._frame.step() # initialize frame state
cls = self.helper.build_class(args)
self.assertEqual(cls.name, 'C')
if __name__ == '__main__':
unittest.main()
| BuildClassTest |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 7793,
"end": 8002
} | class ____(BaseModel):
"""
Connection Test serializer for responses.
"""
status: Annotated[bool, Field(title="Status")]
message: Annotated[str, Field(title="Message")]
| ConnectionTestResponse |
python | dask__dask | dask/dataframe/dask_expr/_concat.py | {
"start": 11807,
"end": 12484
} | class ____(Blockwise):
_parameters = ["ignore_order", "_kwargs", "axis", "join"]
_defaults = {"ignore_order": False, "_kwargs": {}, "axis": 1, "join": "outer"}
_keyword_only = ["ignore_order", "_kwargs", "axis", "join"]
@functools.cached_property
def _meta(self):
return methods.concat(
[df._meta for df in self.dependencies()],
ignore_order=self.ignore_order,
axis=self.axis,
join=self.join,
**self.operand("_kwargs"),
)
@staticmethod
def operation(*args, ignore_order, _kwargs, axis, join):
return concat_and_check(args, ignore_order=ignore_order)
| ConcatUnindexed |
python | ray-project__ray | python/ray/tune/utils/object_cache.py | {
"start": 195,
"end": 5512
} | class ____:
"""Cache up to some maximum count given a grouping key.
This object cache can e.g. be used to cache Ray Tune trainable actors
given their resource requirements (reuse_actors=True).
If the max number of cached objects for a grouping key is reached,
no more objects for this group will be cached.
However, if `may_keep_one=True`, one object (globally across all grouping
keys) may be cached, even if the max number of objects is 0. This is to
allow to cache an object if the max number of objects of this key
will increase shortly after (as is the case e.g. in the Ray Tune control
loop).
Args:
may_keep_one: If True, one object (globally) may be cached if no desired
maximum objects are defined.
"""
def __init__(self, may_keep_one: bool = True):
self._num_cached_objects: int = 0
self._cached_objects: Dict[T, List[U]] = defaultdict(list)
self._max_num_objects: Counter[T] = Counter()
self._may_keep_one = may_keep_one
@property
def num_cached_objects(self):
return self._num_cached_objects
@property
def total_max_objects(self):
# Counter.total() is only available for python 3.10+
return sum(self._max_num_objects.values())
def increase_max(self, key: T, by: int = 1) -> None:
"""Increase number of max objects for this key.
Args:
key: Group key.
by: Decrease by this amount.
"""
self._max_num_objects[key] += by
def decrease_max(self, key: T, by: int = 1) -> None:
"""Decrease number of max objects for this key.
Args:
key: Group key.
by: Decrease by this amount.
"""
self._max_num_objects[key] -= by
def has_cached_object(self, key: T) -> bool:
"""Return True if at least one cached object exists for this key.
Args:
key: Group key.
Returns:
True if at least one cached object exists for this key.
"""
return bool(self._cached_objects[key])
def cache_object(self, key: T, obj: U) -> bool:
"""Cache object for a given key.
This will put the object into a cache, assuming the number
of cached objects for this key is less than the number of
max objects for this key.
An exception is made if `max_keep_one=True` and no other
objects are cached globally. In that case, the object can
still be cached.
Args:
key: Group key.
obj: Object to cache.
Returns:
True if the object has been cached. False otherwise.
"""
# If we have more objects cached already than we desire
if len(self._cached_objects[key]) >= self._max_num_objects[key]:
# If may_keep_one is False, never cache
if not self._may_keep_one:
return False
# If we have more than one other cached object, don't cache
if self._num_cached_objects > 0:
return False
# If any other objects are expected to be cached, don't cache
if any(v for v in self._max_num_objects.values()):
return False
# Otherwise, cache (for now).
self._cached_objects[key].append(obj)
self._num_cached_objects += 1
return True
def pop_cached_object(self, key: T) -> Optional[U]:
"""Get one cached object for a key.
This will remove the object from the cache.
Args:
key: Group key.
Returns:
Cached object.
"""
if not self.has_cached_object(key):
return None
self._num_cached_objects -= 1
return self._cached_objects[key].pop(0)
def flush_cached_objects(self, force_all: bool = False) -> Generator[U, None, None]:
"""Return a generator over cached objects evicted from the cache.
This method yields all cached objects that should be evicted from the
cache for cleanup by the caller.
If the number of max objects is lower than the number of
cached objects for a given key, objects are evicted until
the numbers are equal.
If `max_keep_one=True` (and ``force_all=False``), one cached object
may be retained.
Objects are evicted FIFO.
If ``force_all=True``, all objects are evicted.
Args:
force_all: If True, all objects are flushed. This takes precedence
over ``keep_one``.
Yields:
Evicted objects to be cleaned up by caller.
"""
# If force_all=True, don't keep one.
keep_one = self._may_keep_one and not force_all
for key, objs in self._cached_objects.items():
max_cached = self._max_num_objects[key] if not force_all else 0
if (
self._num_cached_objects == 1
and keep_one
# Only keep this object if we don't expect a different one
and not any(v for v in self._max_num_objects.values())
):
break
while len(objs) > max_cached:
self._num_cached_objects -= 1
yield objs.pop(0)
| _ObjectCache |
python | numpy__numpy | numpy/polynomial/tests/test_hermite.py | {
"start": 12852,
"end": 16023
} | class ____:
def test_hermfit(self):
def f(x):
return x * (x - 1) * (x - 2)
def f2(x):
return x**4 + x**2 + 1
# Test exceptions
assert_raises(ValueError, herm.hermfit, [1], [1], -1)
assert_raises(TypeError, herm.hermfit, [[1]], [1], 0)
assert_raises(TypeError, herm.hermfit, [], [1], 0)
assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0)
assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0)
assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0)
assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, herm.hermfit, [1], [1], [-1,])
assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6])
assert_raises(TypeError, herm.hermfit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = herm.hermfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(herm.hermval(x, coef3), y)
coef3 = herm.hermfit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(herm.hermval(x, coef3), y)
#
coef4 = herm.hermfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
# check things still work if deg is not in strict increasing
coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0])
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
#
coef2d = herm.hermfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = herm.hermfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(herm.hermfit(x, x, 1), [0, .5])
assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5])
# test fitting only even Legendre polynomials
x = np.linspace(-1, 1)
y = f2(x)
coef1 = herm.hermfit(x, y, 4)
assert_almost_equal(herm.hermval(x, coef1), y)
coef2 = herm.hermfit(x, y, [0, 2, 4])
assert_almost_equal(herm.hermval(x, coef2), y)
assert_almost_equal(coef1, coef2)
| TestFitting |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 53370,
"end": 53872
} | class ____(RegexLexer):
"""
Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of
code blocks.
.. versionadded:: 1.5
"""
tokens = {
'root': [
(r'<%\S?', Keyword, 'sec'),
(r'[^<]+', Other),
(r'<', Other),
],
'sec': [
(r'%>', Keyword, '#pop'),
# note: '\w\W' != '.' without DOTALL.
(r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)),
],
}
| TeaTemplateRootLexer |
python | django__django | django/db/backends/oracle/base.py | {
"start": 18280,
"end": 25945
} | class ____:
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
"""
charset = "utf-8"
def __init__(self, connection, database):
self.cursor = connection.cursor()
self.cursor.outputtypehandler = self._output_type_handler
self.database = database
@staticmethod
def _output_number_converter(value):
return decimal.Decimal(value) if "." in value else int(value)
@staticmethod
def _get_decimal_converter(precision, scale):
if scale == 0:
return int
context = decimal.Context(prec=precision)
quantize_value = decimal.Decimal(1).scaleb(-scale)
return lambda v: decimal.Decimal(v).quantize(quantize_value, context=context)
@staticmethod
def _output_type_handler(cursor, name, defaultType, length, precision, scale):
"""
Called for each db column fetched from cursors. Return numbers as the
appropriate Python type, and NCLOB with JSON as strings.
"""
if defaultType == Database.NUMBER:
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point.
# This will normally be an integer from a sequence,
# but it could be a decimal value.
outconverter = FormatStylePlaceholderCursor._output_number_converter
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
outconverter = float
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntegerField and DecimalField columns.
outconverter = FormatStylePlaceholderCursor._get_decimal_converter(
precision, scale
)
else:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
outconverter = FormatStylePlaceholderCursor._output_number_converter
return cursor.var(
Database.STRING,
size=255,
arraysize=cursor.arraysize,
outconverter=outconverter,
)
# oracledb 2.0.0+ returns NLOB columns with IS JSON constraints as
# dicts. Use a no-op converter to avoid this.
elif defaultType == Database.DB_TYPE_NCLOB:
return cursor.var(Database.DB_TYPE_NCLOB, arraysize=cursor.arraysize)
def _format_params(self, params):
try:
return {k: OracleParam(v, self, True) for k, v in params.items()}
except AttributeError:
return tuple(OracleParam(p, self, True) for p in params)
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], "keys"):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
if sizes:
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
if sizes:
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, "items"):
return {k: v.force_bytes for k, v in params.items()}
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params, unify_by_values=False):
# oracledb wants no trailing ';' for SQL statements. For PL/SQL, it
# does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(";") or query.endswith("/"):
query = query[:-1]
if params is None:
params = []
elif hasattr(params, "keys"):
# Handle params as dict
args = {k: ":%s" % k for k in params}
query %= args
elif unify_by_values and params:
# Handle params as a dict with unified query parameters by their
# values. It can be used only in single query execute() because
# executemany() shares the formatted query with each of the params
# list. e.g. for input params = [0.75, 2, 0.75, 'sth', 0.75]
# params_dict = {
# (float, 0.75): ':arg0',
# (int, 2): ':arg1',
# (str, 'sth'): ':arg2',
# }
# args = [':arg0', ':arg1', ':arg0', ':arg2', ':arg0']
# params = {':arg0': 0.75, ':arg1': 2, ':arg2': 'sth'}
# The type of parameters in param_types keys is necessary to avoid
# unifying 0/1 with False/True.
param_types = [(type(param), param) for param in params]
params_dict = {
param_type: ":arg%d" % i
for i, param_type in enumerate(dict.fromkeys(param_types))
}
args = [params_dict[param_type] for param_type in param_types]
params = {
placeholder: param for (_, param), placeholder in params_dict.items()
}
query %= tuple(args)
else:
# Handle params as sequence
args = [(":arg%d" % i) for i in range(len(params))]
query %= tuple(args)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params, unify_by_values=True)
self._guess_input_sizes([params])
with wrap_oracle_errors():
return self.cursor.execute(query, self._param_generator(params))
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
with wrap_oracle_errors():
return self.cursor.executemany(
query, [self._param_generator(p) for p in formatted]
)
def close(self):
try:
self.cursor.close()
except Database.InterfaceError:
# already closed
pass
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
| FormatStylePlaceholderCursor |
python | scikit-learn__scikit-learn | sklearn/cluster/_agglomerative.py | {
"start": 28169,
"end": 41155
} | class ____(ClusterMixin, BaseEstimator):
"""
Agglomerative Clustering.
Recursively merges pair of clusters of sample data; uses linkage distance.
Read more in the :ref:`User Guide <hierarchical_clustering>`.
Parameters
----------
n_clusters : int or None, default=2
The number of clusters to find. It must be ``None`` if
``distance_threshold`` is not ``None``.
metric : str or callable, default="euclidean"
Metric used to compute the linkage. Can be "euclidean", "l1", "l2",
"manhattan", "cosine", or "precomputed". If linkage is "ward", only
"euclidean" is accepted. If "precomputed", a distance matrix is needed
as input for the fit method. If connectivity is None, linkage is
"single" and affinity is not "precomputed" any valid pairwise distance
metric can be assigned.
For an example of agglomerative clustering with different metrics, see
:ref:`sphx_glr_auto_examples_cluster_plot_agglomerative_clustering_metrics.py`.
.. versionadded:: 1.2
memory : str or object with the joblib.Memory interface, default=None
Used to cache the output of the computation of the tree.
By default, no caching is done. If a string is given, it is the
path to the caching directory.
connectivity : array-like, sparse matrix, or callable, default=None
Connectivity matrix. Defines for each sample the neighboring
samples following a given structure of the data.
This can be a connectivity matrix itself or a callable that transforms
the data into a connectivity matrix, such as derived from
`kneighbors_graph`. Default is ``None``, i.e, the
hierarchical clustering algorithm is unstructured.
For an example of connectivity matrix using
:class:`~sklearn.neighbors.kneighbors_graph`, see
:ref:`sphx_glr_auto_examples_cluster_plot_ward_structured_vs_unstructured.py`.
compute_full_tree : 'auto' or bool, default='auto'
Stop early the construction of the tree at ``n_clusters``. This is
useful to decrease computation time if the number of clusters is not
small compared to the number of samples. This option is useful only
when specifying a connectivity matrix. Note also that when varying the
number of clusters and using caching, it may be advantageous to compute
the full tree. It must be ``True`` if ``distance_threshold`` is not
``None``. By default `compute_full_tree` is "auto", which is equivalent
to `True` when `distance_threshold` is not `None` or that `n_clusters`
is inferior to the maximum between 100 or `0.02 * n_samples`.
Otherwise, "auto" is equivalent to `False`.
linkage : {'ward', 'complete', 'average', 'single'}, default='ward'
Which linkage criterion to use. The linkage criterion determines which
distance to use between sets of observation. The algorithm will merge
the pairs of cluster that minimize this criterion.
- 'ward' minimizes the variance of the clusters being merged.
- 'average' uses the average of the distances of each observation of
the two sets.
- 'complete' or 'maximum' linkage uses the maximum distances between
all observations of the two sets.
- 'single' uses the minimum of the distances between all observations
of the two sets.
.. versionadded:: 0.20
Added the 'single' option
For examples comparing different `linkage` criteria, see
:ref:`sphx_glr_auto_examples_cluster_plot_linkage_comparison.py`.
distance_threshold : float, default=None
The linkage distance threshold at or above which clusters will not be
merged. If not ``None``, ``n_clusters`` must be ``None`` and
``compute_full_tree`` must be ``True``.
.. versionadded:: 0.21
compute_distances : bool, default=False
Computes distances between clusters even if `distance_threshold` is not
used. This can be used to make dendrogram visualization, but introduces
a computational and memory overhead.
.. versionadded:: 0.24
For an example of dendrogram visualization, see
:ref:`sphx_glr_auto_examples_cluster_plot_agglomerative_dendrogram.py`.
Attributes
----------
n_clusters_ : int
The number of clusters found by the algorithm. If
``distance_threshold=None``, it will be equal to the given
``n_clusters``.
labels_ : ndarray of shape (n_samples)
Cluster labels for each point.
n_leaves_ : int
Number of leaves in the hierarchical tree.
n_connected_components_ : int
The estimated number of connected components in the graph.
.. versionadded:: 0.21
``n_connected_components_`` was added to replace ``n_components_``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
children_ : array-like of shape (n_samples-1, 2)
The children of each non-leaf node. Values less than `n_samples`
correspond to leaves of the tree which are the original samples.
A node `i` greater than or equal to `n_samples` is a non-leaf
node and has children `children_[i - n_samples]`. Alternatively
at the i-th iteration, children[i][0] and children[i][1]
are merged to form node `n_samples + i`.
distances_ : array-like of shape (n_nodes-1,)
Distances between nodes in the corresponding place in `children_`.
Only computed if `distance_threshold` is used or `compute_distances`
is set to `True`.
See Also
--------
FeatureAgglomeration : Agglomerative clustering but for features instead of
samples.
ward_tree : Hierarchical clustering with ward linkage.
Examples
--------
>>> from sklearn.cluster import AgglomerativeClustering
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> clustering = AgglomerativeClustering().fit(X)
>>> clustering
AgglomerativeClustering()
>>> clustering.labels_
array([1, 1, 1, 0, 0, 0])
For a comparison of Agglomerative clustering with other clustering algorithms, see
:ref:`sphx_glr_auto_examples_cluster_plot_cluster_comparison.py`
"""
_parameter_constraints: dict = {
"n_clusters": [Interval(Integral, 1, None, closed="left"), None],
"metric": [
StrOptions(set(_VALID_METRICS) | {"precomputed"}),
callable,
],
"memory": [str, HasMethods("cache"), None],
"connectivity": ["array-like", "sparse matrix", callable, None],
"compute_full_tree": [StrOptions({"auto"}), "boolean"],
"linkage": [StrOptions(set(_TREE_BUILDERS.keys()))],
"distance_threshold": [Interval(Real, 0, None, closed="left"), None],
"compute_distances": ["boolean"],
}
def __init__(
self,
n_clusters=2,
*,
metric="euclidean",
memory=None,
connectivity=None,
compute_full_tree="auto",
linkage="ward",
distance_threshold=None,
compute_distances=False,
):
self.n_clusters = n_clusters
self.distance_threshold = distance_threshold
self.memory = memory
self.connectivity = connectivity
self.compute_full_tree = compute_full_tree
self.linkage = linkage
self.metric = metric
self.compute_distances = compute_distances
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y=None):
"""Fit the hierarchical clustering from features, or distance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features) or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self : object
Returns the fitted instance.
"""
X = validate_data(self, X, ensure_min_samples=2)
return self._fit(X)
def _fit(self, X):
"""Fit without validation
Parameters
----------
X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)
Training instances to cluster, or distances between instances if
``metric='precomputed'``.
Returns
-------
self : object
Returns the fitted instance.
"""
memory = check_memory(self.memory)
if not ((self.n_clusters is None) ^ (self.distance_threshold is None)):
raise ValueError(
"Exactly one of n_clusters and "
"distance_threshold has to be set, and the other "
"needs to be None."
)
if self.distance_threshold is not None and not self.compute_full_tree:
raise ValueError(
"compute_full_tree must be True if distance_threshold is set."
)
if self.linkage == "ward" and self.metric != "euclidean":
raise ValueError(
f"{self.metric} was provided as metric. Ward can only "
"work with euclidean distances."
)
tree_builder = _TREE_BUILDERS[self.linkage]
connectivity = self.connectivity
if self.connectivity is not None:
if callable(self.connectivity):
connectivity = self.connectivity(X)
connectivity = check_array(
connectivity, accept_sparse=["csr", "coo", "lil"]
)
n_samples = len(X)
compute_full_tree = self.compute_full_tree
if self.connectivity is None:
compute_full_tree = True
if compute_full_tree == "auto":
if self.distance_threshold is not None:
compute_full_tree = True
else:
# Early stopping is likely to give a speed up only for
# a large number of clusters. The actual threshold
# implemented here is heuristic
compute_full_tree = self.n_clusters < max(100, 0.02 * n_samples)
n_clusters = self.n_clusters
if compute_full_tree:
n_clusters = None
# Construct the tree
kwargs = {}
if self.linkage != "ward":
kwargs["linkage"] = self.linkage
kwargs["affinity"] = self.metric
distance_threshold = self.distance_threshold
return_distance = (distance_threshold is not None) or self.compute_distances
out = memory.cache(tree_builder)(
X,
connectivity=connectivity,
n_clusters=n_clusters,
return_distance=return_distance,
**kwargs,
)
(self.children_, self.n_connected_components_, self.n_leaves_, parents) = out[
:4
]
if return_distance:
self.distances_ = out[-1]
if self.distance_threshold is not None: # distance_threshold is used
self.n_clusters_ = (
np.count_nonzero(self.distances_ >= distance_threshold) + 1
)
else: # n_clusters is used
self.n_clusters_ = self.n_clusters
# Cut the tree
if compute_full_tree:
self.labels_ = _hc_cut(self.n_clusters_, self.children_, self.n_leaves_)
else:
labels = _hierarchical.hc_get_heads(parents, copy=False)
# copy to avoid holding a reference on the original array
labels = np.copy(labels[:n_samples])
# Reassign cluster numbers
self.labels_ = np.searchsorted(np.unique(labels), labels)
return self
def fit_predict(self, X, y=None):
"""Fit and return the result of each sample's clustering assignment.
In addition to fitting, this method also return the result of the
clustering assignment for each sample in the training set.
Parameters
----------
X : array-like of shape (n_samples, n_features) or \
(n_samples, n_samples)
Training instances to cluster, or distances between instances if
``affinity='precomputed'``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
labels : ndarray of shape (n_samples,)
Cluster labels.
"""
return super().fit_predict(X, y)
| AgglomerativeClustering |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass11.py | {
"start": 585,
"end": 666
} | class ____(type):
def __setattr__(cls, key: str, value: Any) -> None: ...
| MetaB |
python | walkccc__LeetCode | solutions/2438. Range Product Queries of Powers/2438.py | {
"start": 0,
"end": 373
} | class ____:
def productQueries(self, n: int, queries: list[list[int]]) -> list[int]:
MOD = 1_000_000_007
MAX_BIT = 30
ans = []
pows = [1 << i for i in range(MAX_BIT) if n >> i & 1]
for left, right in queries:
prod = 1
for i in range(left, right + 1):
prod *= pows[i]
prod %= MOD
ans.append(prod)
return ans
| Solution |
python | google__python-fire | fire/test_components.py | {
"start": 8542,
"end": 8601
} | class ____(enum.Enum):
RED = 1
GREEN = 2
BLUE = 3
| Color |
python | sqlalchemy__sqlalchemy | test/orm/test_subquery_relations.py | {
"start": 88095,
"end": 91829
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column("id", Integer, primary_key=True),
Column("type", String(50)),
Column("related_id", Integer, ForeignKey("related.id")),
)
Table(
"bar",
metadata,
Column("id", Integer, ForeignKey("foo.id"), primary_key=True),
)
Table(
"baz",
metadata,
Column("id", Integer, ForeignKey("foo.id"), primary_key=True),
)
Table("related", metadata, Column("id", Integer, primary_key=True))
@classmethod
def setup_classes(cls):
class Foo(cls.Comparable):
pass
class Bar(Foo):
pass
class Baz(Foo):
pass
class Related(cls.Comparable):
pass
@classmethod
def fixtures(cls):
return dict(
foo=[
("id", "type", "related_id"),
(1, "bar", 1),
(2, "bar", 2),
(3, "baz", 1),
(4, "baz", 2),
],
bar=[("id",), (1,), (2,)],
baz=[("id",), (3,), (4,)],
related=[("id",), (1,), (2,)],
)
@classmethod
def setup_mappers(cls):
cls.mapper_registry.map_imperatively(
cls.classes.Foo,
cls.tables.foo,
properties={"related": relationship(cls.classes.Related)},
polymorphic_on=cls.tables.foo.c.type,
)
cls.mapper_registry.map_imperatively(
cls.classes.Bar,
cls.tables.bar,
polymorphic_identity="bar",
inherits=cls.classes.Foo,
)
cls.mapper_registry.map_imperatively(
cls.classes.Baz,
cls.tables.baz,
polymorphic_identity="baz",
inherits=cls.classes.Foo,
)
cls.mapper_registry.map_imperatively(
cls.classes.Related, cls.tables.related
)
def test_caches_query_per_base_subq(self):
Foo, Bar, Baz, Related = (
self.classes.Foo,
self.classes.Bar,
self.classes.Baz,
self.classes.Related,
)
s = Session(testing.db)
fp = with_polymorphic(Foo, [Bar, Baz])
def go():
eq_(
s.query(fp)
.order_by(fp.id)
.options(subqueryload(fp.related))
.all(),
[
Bar(id=1, related=Related(id=1)),
Bar(id=2, related=Related(id=2)),
Baz(id=3, related=Related(id=1)),
Baz(id=4, related=Related(id=2)),
],
)
self.assert_sql_count(testing.db, go, 2)
def test_caches_query_per_base_joined(self):
# technically this should be in test_eager_relations
Foo, Bar, Baz, Related = (
self.classes.Foo,
self.classes.Bar,
self.classes.Baz,
self.classes.Related,
)
s = Session(testing.db)
fp = with_polymorphic(Foo, [Bar, Baz])
def go():
eq_(
s.query(fp)
.order_by(fp.id)
.options(joinedload(fp.related))
.all(),
[
Bar(id=1, related=Related(id=1)),
Bar(id=2, related=Related(id=2)),
Baz(id=3, related=Related(id=1)),
Baz(id=4, related=Related(id=2)),
],
)
self.assert_sql_count(testing.db, go, 1)
| InheritanceToRelatedTest |
python | pydantic__pydantic | tests/mypy/modules/plugin_success_baseConfig.py | {
"start": 686,
"end": 950
} | class ____(BaseModel, from_attributes=True):
x: float
y: str
class NotConfig:
frozen = True
kwargs_model = KwargsModel(x=1, y='y')
KwargsModel(x=1, y='y', z='z')
kwargs_model.x = 2
kwargs_model.model_validate(kwargs_model.__dict__)
| KwargsModel |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/event_log/polling_event_watcher.py | {
"start": 812,
"end": 3877
} | class ____:
"""Event Log Watcher that uses a multithreaded polling approach to retrieving new events for run_ids
This class' job is to manage a collection of threads that each poll the event log for a given run_id
Uses one thread (SqlPollingRunIdEventWatcherThread) per watched run_id.
LOCKING INFO:
ORDER: _dict_lock -> run_id_thread.callback_fn_list_lock
INVARIANTS: _dict_lock protects _run_id_to_watcher_dict
"""
def __init__(self, event_log_storage: EventLogStorage):
self._event_log_storage = check.inst_param(
event_log_storage, "event_log_storage", EventLogStorage
)
# INVARIANT: dict_lock protects _run_id_to_watcher_dict
self._dict_lock: threading.Lock = threading.Lock()
self._run_id_to_watcher_dict: MutableMapping[str, SqlPollingRunIdEventWatcherThread] = {}
self._disposed = False
def has_run_id(self, run_id: str) -> bool:
run_id = check.str_param(run_id, "run_id")
with self._dict_lock:
_has_run_id = run_id in self._run_id_to_watcher_dict
return _has_run_id
def watch_run(
self,
run_id: str,
cursor: Optional[str],
callback: Callable[[EventLogEntry, str], None],
) -> None:
run_id = check.str_param(run_id, "run_id")
cursor = check.opt_str_param(cursor, "cursor")
callback = check.callable_param(callback, "callback")
check.invariant(not self._disposed, "Attempted to watch_run after close")
with self._dict_lock:
if run_id not in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id] = SqlPollingRunIdEventWatcherThread(
self._event_log_storage, run_id
)
self._run_id_to_watcher_dict[run_id].daemon = True
self._run_id_to_watcher_dict[run_id].start()
self._run_id_to_watcher_dict[run_id].add_callback(cursor, callback)
def unwatch_run(
self,
run_id: str,
handler: Callable[[EventLogEntry, str], None],
) -> None:
run_id = check.str_param(run_id, "run_id")
handler = check.callable_param(handler, "handler")
with self._dict_lock:
if run_id in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id].remove_callback(handler)
if self._run_id_to_watcher_dict[run_id].should_thread_exit.is_set():
del self._run_id_to_watcher_dict[run_id]
def close(self) -> None:
if not self._disposed:
self._disposed = True
with self._dict_lock:
for watcher_thread in self._run_id_to_watcher_dict.values():
if not watcher_thread.should_thread_exit.is_set():
watcher_thread.should_thread_exit.set()
for run_id in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id].join()
self._run_id_to_watcher_dict = {}
| SqlPollingEventWatcher |
python | sqlalchemy__sqlalchemy | examples/versioned_history/history_meta.py | {
"start": 7623,
"end": 11477
} | class ____:
use_mapper_versioning = False
"""if True, also assign the version column to be tracked by the mapper"""
__table_args__ = {"sqlite_autoincrement": True}
"""Use sqlite_autoincrement, to ensure unique integer values
are used for new rows even for rows that have been deleted."""
def __init_subclass__(cls) -> None:
insp = inspect(cls, raiseerr=False)
if insp is not None:
_history_mapper(insp)
else:
@event.listens_for(cls, "after_mapper_constructed")
def _mapper_constructed(mapper, class_):
_history_mapper(mapper)
super().__init_subclass__()
def versioned_objects(iter_):
for obj in iter_:
if hasattr(obj, "__history_mapper__"):
yield obj
def create_version(obj, session, deleted=False):
obj_mapper = object_mapper(obj)
history_mapper = obj.__history_mapper__
history_cls = history_mapper.class_
obj_state = attributes.instance_state(obj)
attr = {}
obj_changed = False
for om, hm in zip(
obj_mapper.iterate_to_root(), history_mapper.iterate_to_root()
):
if hm.single:
continue
for hist_col in hm.local_table.c:
if _is_versioning_col(hist_col):
continue
obj_col = om.local_table.c[hist_col.key]
# get the value of the
# attribute based on the MapperProperty related to the
# mapped column. this will allow usage of MapperProperties
# that have a different keyname than that of the mapped column.
try:
prop = obj_mapper.get_property_by_column(obj_col)
except UnmappedColumnError:
# in the case of single table inheritance, there may be
# columns on the mapped table intended for the subclass only.
# the "unmapped" status of the subclass column on the
# base class is a feature of the declarative module.
continue
# expired object attributes and also deferred cols might not
# be in the dict. force it to load no matter what by
# using getattr().
if prop.key not in obj_state.dict:
getattr(obj, prop.key)
a, u, d = attributes.get_history(obj, prop.key)
if d:
attr[prop.key] = d[0]
obj_changed = True
elif u:
attr[prop.key] = u[0]
elif a:
# if the attribute had no value.
attr[prop.key] = a[0]
obj_changed = True
if not obj_changed:
# not changed, but we have relationships. OK
# check those too
for prop in obj_mapper.iterate_properties:
if (
isinstance(prop, RelationshipProperty)
and attributes.get_history(
obj, prop.key, passive=attributes.PASSIVE_NO_INITIALIZE
).has_changes()
):
for p in prop.local_columns:
if p.foreign_keys:
obj_changed = True
break
if obj_changed is True:
break
if not obj_changed and not deleted:
return
attr["version"] = obj.version
hist = history_cls()
for key, value in attr.items():
setattr(hist, key, value)
session.add(hist)
obj.version += 1
def versioned_session(session):
@event.listens_for(session, "before_flush")
def before_flush(session, flush_context, instances):
for obj in versioned_objects(session.dirty):
create_version(obj, session)
for obj in versioned_objects(session.deleted):
create_version(obj, session, deleted=True)
| Versioned |
python | walkccc__LeetCode | solutions/2741. Special Permutations/2741.py | {
"start": 0,
"end": 710
} | class ____:
def specialPerm(self, nums: list[int]) -> int:
MOD = 1_000_000_007
maxMask = 1 << len(nums)
@functools.lru_cache(None)
def dp(prev: int, mask: int) -> int:
"""
Returns the number of special permutations, where the previous number is
nums[i] and `mask` is the bitmask of the used numbers.
"""
if mask == maxMask - 1:
return 1
res = 0
for i, num in enumerate(nums):
if mask >> i & 1:
continue
if num % nums[prev] == 0 or nums[prev] % num == 0:
res += dp(i, mask | 1 << i)
res %= MOD
return res
return sum(dp(i, 1 << i)
for i in range(len(nums))) % MOD
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/streams.py | {
"start": 18840,
"end": 18957
} | class ____(IterableExportEventsStreamAdjustableRange):
data_field = "hostedUnsubscribeClick"
| HostedUnsubscribeClick |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 6548,
"end": 6894
} | class ____(ConcreteTemplate):
cases = [signature(types.float64, op1, op2)
for op1, op2 in itertools.product(machine_ints, machine_ints)]
cases += [signature(op, op, op) for op in sorted(types.real_domain)]
cases += [signature(op, op, op) for op in sorted(types.complex_domain)]
@infer_global(operator.itruediv)
| BinOpTrueDiv |
python | ray-project__ray | python/ray/data/tests/test_namespace_expressions.py | {
"start": 9106,
"end": 9872
} | class ____:
"""Tests for string searching operations."""
def test_string_search(
self,
dataset_format,
method_name,
method_args,
method_kwargs,
input_values,
expected_results,
):
"""Test string searching methods."""
data = [{"val": v} for v in input_values]
ds = _create_dataset(data, dataset_format)
method = getattr(col("val").str, method_name)
result = ds.with_column(
"result", method(*method_args, **method_kwargs)
).to_pandas()
expected = pd.DataFrame({"val": input_values, "result": expected_results})
assert rows_same(result, expected)
@pytest.mark.parametrize("dataset_format", DATASET_FORMATS)
| TestStringSearch |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_types06.py | {
"start": 315,
"end": 981
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("types06.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_write_formula_default(self):
"""Test writing formulas with strings_to_formulas on."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, '="0"&".0"', None, "0.0")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | python-openxml__python-docx | src/docx/oxml/xmlchemy.py | {
"start": 18911,
"end": 19445
} | class ____(_BaseChildElement):
"""Defines an optional repeating child element for MetaOxmlElement."""
def populate_class_members(self, element_cls: MetaOxmlElement, prop_name: str) -> None:
"""Add the appropriate methods to `element_cls`."""
super(ZeroOrMore, self).populate_class_members(element_cls, prop_name)
self._add_list_getter()
self._add_creator()
self._add_inserter()
self._add_adder()
self._add_public_adder()
delattr(element_cls, prop_name)
| ZeroOrMore |
python | encode__django-rest-framework | tests/test_serializer_lists.py | {
"start": 9794,
"end": 11402
} | class ____:
"""Tests the behavior of allow_empty=False when a ListSerializer is used as a field."""
@pytest.mark.parametrize('partial', (False, True))
def test_allow_empty_true(self, partial):
"""
If allow_empty is True, empty lists should be allowed regardless of the value
of partial on the parent serializer.
"""
class ChildSerializer(serializers.Serializer):
id = serializers.IntegerField()
class ParentSerializer(serializers.Serializer):
ids = ChildSerializer(many=True, allow_empty=True)
serializer = ParentSerializer(data={'ids': []}, partial=partial)
assert serializer.is_valid()
assert serializer.validated_data == {
'ids': [],
}
@pytest.mark.parametrize('partial', (False, True))
def test_allow_empty_false(self, partial):
"""
If allow_empty is False, empty lists should fail validation regardless of the value
of partial on the parent serializer.
"""
class ChildSerializer(serializers.Serializer):
id = serializers.IntegerField()
class ParentSerializer(serializers.Serializer):
ids = ChildSerializer(many=True, allow_empty=False)
serializer = ParentSerializer(data={'ids': []}, partial=partial)
assert not serializer.is_valid()
assert serializer.errors == {
'ids': {
'non_field_errors': [
ErrorDetail(string='This list may not be empty.', code='empty')],
}
}
| TestNestedListSerializerAllowEmpty |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_hparams.py | {
"start": 31138,
"end": 31942
} | class ____(BoringModel):
def __init__(self, config):
super().__init__()
self.save_hyperparameters(config)
def test_empty_hparams_container(tmp_path):
"""Test that save_hyperparameters() is a no-op when saving an empty hparams container."""
model = HparamsKwargsContainerModel()
assert not model.hparams
model = HparamsNamespaceContainerModel(Namespace())
assert not model.hparams
def test_hparams_name_from_container(tmp_path):
"""Test that save_hyperparameters(container) captures the name of the argument correctly."""
model = HparamsKwargsContainerModel(a=1, b=2)
assert model._hparams_name is None
model = HparamsNamespaceContainerModel(Namespace(a=1, b=2))
assert model._hparams_name == "config"
@dataclass
| HparamsNamespaceContainerModel |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/bedrock/_beta.py | {
"start": 2805,
"end": 3055
} | class ____:
def __init__(self, beta: Beta) -> None:
self._beta = beta
@cached_property
def messages(self) -> MessagesWithStreamingResponse:
return MessagesWithStreamingResponse(self._beta.messages)
| BetaWithStreamingResponse |
python | django-guardian__django-guardian | guardian/testapp/tests/test_core.py | {
"start": 1244,
"end": 2077
} | class ____(TestCase):
def setUp(self):
self.group, created = Group.objects.get_or_create(name="jackGroup")
self.user, created = User.objects.get_or_create(username="jack")
self.user.groups.add(self.group)
self.ctype = ContentType.objects.create(model="bar", app_label="fake-for-guardian-tests")
self.ctype_qset = ContentType.objects.filter(model="bar", app_label="fake-for-guardian-tests")
self.ctype_list = [self.ctype_qset.first()]
self.anonymous_user = User.objects.get(username=guardian_settings.ANONYMOUS_USER_NAME)
def get_permission(self, codename, app_label=None):
qs = Permission.objects
if app_label:
qs = qs.filter(content_type__app_label=app_label)
return Permission.objects.get(codename=codename)
| ObjectPermissionTestCase |
python | giampaolo__psutil | tests/test_contracts.py | {
"start": 5395,
"end": 7186
} | class ____(PsutilTestCase):
def test_environ(self):
assert hasattr(psutil.Process, "environ") == (
LINUX
or MACOS
or WINDOWS
or AIX
or SUNOS
or FREEBSD
or OPENBSD
or NETBSD
)
def test_uids(self):
assert hasattr(psutil.Process, "uids") == POSIX
def test_gids(self):
assert hasattr(psutil.Process, "uids") == POSIX
def test_terminal(self):
assert hasattr(psutil.Process, "terminal") == POSIX
def test_ionice(self):
assert hasattr(psutil.Process, "ionice") == (LINUX or WINDOWS)
@pytest.mark.skipif(
GITHUB_ACTIONS and LINUX,
reason="unsupported on GITHUB_ACTIONS + LINUX",
)
def test_rlimit(self):
assert hasattr(psutil.Process, "rlimit") == (LINUX or FREEBSD)
def test_io_counters(self):
hasit = hasattr(psutil.Process, "io_counters")
assert hasit == (not (MACOS or SUNOS))
def test_num_fds(self):
assert hasattr(psutil.Process, "num_fds") == POSIX
def test_num_handles(self):
assert hasattr(psutil.Process, "num_handles") == WINDOWS
def test_cpu_affinity(self):
assert hasattr(psutil.Process, "cpu_affinity") == (
LINUX or WINDOWS or FREEBSD
)
def test_cpu_num(self):
assert hasattr(psutil.Process, "cpu_num") == (
LINUX or FREEBSD or SUNOS
)
def test_memory_maps(self):
hasit = hasattr(psutil.Process, "memory_maps")
assert hasit == (not (OPENBSD or NETBSD or AIX or MACOS))
# ===================================================================
# --- API types
# ===================================================================
| TestAvailProcessAPIs |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-mixpanel/source_mixpanel/components.py | {
"start": 8100,
"end": 10688
} | class ____(SubstreamPartitionRouter):
def stream_slices(self) -> Iterable[StreamSlice]:
"""
Add 'funnel_name' to the slice, the rest code is exactly the same as in super().stream_slices(...)
Remove empty 'parent_slice' attribute to be compatible with LegacyToPerPartitionStateMigration
"""
if not self.parent_stream_configs:
yield from []
else:
for parent_stream_config in self.parent_stream_configs:
parent_stream = parent_stream_config.stream
parent_field = parent_stream_config.parent_key.eval(self.config) # type: ignore # parent_key is always casted to an interpolated string
partition_field = parent_stream_config.partition_field.eval(self.config) # type: ignore # partition_field is always casted to an interpolated string
for parent_stream_slice in parent_stream.stream_slices(
sync_mode=SyncMode.full_refresh, cursor_field=None, stream_state=None
):
empty_parent_slice = True
parent_partition = parent_stream_slice.partition if parent_stream_slice else {}
for parent_record in parent_stream.read_records(
sync_mode=SyncMode.full_refresh, cursor_field=None, stream_slice=parent_stream_slice, stream_state=None
):
# Skip non-records (eg AirbyteLogMessage)
if isinstance(parent_record, AirbyteMessage):
if parent_record.type == Type.RECORD:
parent_record = parent_record.record.data
else:
continue
elif isinstance(parent_record, Record):
parent_record = parent_record.data
try:
partition_value = dpath.util.get(parent_record, parent_field)
except KeyError:
pass
else:
empty_parent_slice = False
yield StreamSlice(
partition={partition_field: partition_value},
cursor_slice={"funnel_name": parent_record.get("name")},
)
# If the parent slice contains no records,
if empty_parent_slice:
yield from []
@dataclass
| FunnelsSubstreamPartitionRouter |
python | pypa__pip | src/pip/_vendor/distlib/util.py | {
"start": 57646,
"end": 59574
} | class ____(object):
"""
Mixin for running subprocesses and capturing their output
"""
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
# def _get_pypirc_command():
# """
# Get the distutils command for interacting with PyPI configurations.
# :return: the command.
# """
# from distutils.core import Distribution
# from distutils.config import PyPIRCCommand
# d = Distribution()
# return PyPIRCCommand(d)
| SubprocessMixin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.