language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
wireservice__csvkit
|
tests/test_utilities/test_in2csv.py
|
{
"start": 202,
"end": 13554
}
|
class ____(CSVKitTestCase, EmptyFileTests):
Utility = In2CSV
default_args = ['-f', 'csv']
def assertConverted(self, input_format, input_filename, output_filename, additional_args=[]):
output = self.get_output(['-f', input_format, input_filename] + additional_args)
with open(output_filename) as f:
self.assertEqual(output, f.read())
def test_launch_new_instance(self):
with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/dummy.csv']):
launch_new_instance()
def test_version(self):
with self.assertRaises(SystemExit) as e:
self.get_output(['-V'])
self.assertEqual(e.exception.code, 0)
def test_args(self):
for args in ([], ['-']):
with self.subTest(args=args):
self.assertError(
launch_new_instance,
[],
'You must specify a format when providing input as piped data via STDIN.',
args=args,
)
def test_options(self):
for options, args, message in (
(
[],
['dummy.unknown'],
'Unable to automatically determine the format of the input file. '
'Try specifying a format with --format.',
),
(
['-n'],
['dummy.csv'],
'You cannot use the -n or --names options with non-Excel files.',
),
):
with self.subTest(args=options + args):
self.assertError(launch_new_instance, options, message, args=args)
def test_locale(self):
self.assertConverted('csv', 'examples/test_locale.csv',
'examples/test_locale_converted.csv', ['--locale', 'de_DE'])
def test_add_bom(self):
self.assertConverted('csv', 'examples/test_utf8.csv',
'examples/test_utf8_bom.csv', ['--add-bom'])
def test_no_blanks(self):
self.assertConverted('csv', 'examples/blanks.csv', 'examples/blanks_converted.csv')
def test_blanks(self):
self.assertConverted('csv', 'examples/blanks.csv', 'examples/blanks.csv', ['--blanks'])
def test_null_value(self):
input_file = io.BytesIO(b'a,b\nn/a,\\N')
with stdin_as_string(input_file):
self.assertLines(['-f', 'csv', '--null-value', '\\N'], [
'a,b',
',',
])
input_file.close()
def test_null_value_blanks(self):
input_file = io.BytesIO(b'a,b\nn/a,\\N')
with stdin_as_string(input_file):
self.assertLines(['-f', 'csv', '--null-value', '\\N', '--blanks'], [
'a,b',
'n/a,',
])
input_file.close()
def test_no_leading_zeroes(self):
self.assertConverted('csv', 'examples/test_no_leading_zeroes.csv',
'examples/test_no_leading_zeroes.csv', ['--no-leading-zeroes'])
def test_date_format(self):
self.assertConverted('csv', 'examples/test_date_format.csv',
'examples/test_date_format_converted.csv', ['--date-format', '%d/%m/%Y'])
def test_date_format_default(self):
self.assertConverted('csv', 'examples/test_date_format.csv', 'examples/test_date_format.csv')
def test_numeric_date_format(self):
self.assertConverted('csv', 'examples/test_numeric_date_format.csv',
'examples/test_date_format_converted.csv', ['--date-format', '%Y%m%d'])
def test_numeric_date_format_default(self):
self.assertConverted('csv', 'examples/test_numeric_date_format.csv', 'examples/test_numeric_date_format.csv')
def test_date_like_number(self):
self.assertConverted('csv', 'examples/date_like_number.csv', 'examples/date_like_number.csv')
def test_convert_csv(self):
self.assertConverted('csv', 'examples/testfixed_converted.csv', 'examples/testfixed_converted.csv')
def test_convert_csv_with_skip_lines(self):
self.assertConverted('csv', 'examples/test_skip_lines.csv', 'examples/dummy.csv',
['--skip-lines', '3', '--no-inference'])
def test_convert_tsv(self):
self.assertConverted('csv', 'examples/dummy.tsv', 'examples/dummy.csv', ['--no-inference'])
def test_convert_tsv_streaming(self):
self.assertConverted('csv', 'examples/dummy.tsv', 'examples/dummy.csv',
['--no-inference', '--snifflimit', '0', '--tabs'])
def test_convert_dbf(self):
self.assertConverted('dbf', 'examples/testdbf.dbf', 'examples/testdbf_converted.csv')
def test_convert_json(self):
self.assertConverted('json', 'examples/testjson.json', 'examples/testjson_converted.csv')
def test_convert_geojson(self):
self.assertConverted('geojson', 'examples/test_geojson.json', 'examples/test_geojson.csv')
def test_convert_ndjson(self):
self.assertConverted('ndjson', 'examples/testjson_multiline.json', 'examples/testjson_multiline_converted.csv')
def test_convert_nested_json(self):
self.assertConverted('json', 'examples/testjson_nested.json', 'examples/testjson_nested_converted.csv')
def test_convert_xls(self):
self.assertConverted('xls', 'examples/test.xls', 'examples/testxls_converted.csv')
def test_convert_xls_with_sheet(self):
self.assertConverted('xls', 'examples/sheets.xls', 'examples/testxls_converted.csv', ['--sheet', 'data'])
def test_convert_xls_with_unicode_sheet(self):
self.assertLines(['--sheet', 'ʤ', 'examples/sheets.xls'], [
'a,b,c',
'1.0,2.0,3.0',
])
def test_convert_xls_with_skip_lines(self):
self.assertConverted('xls', 'examples/test_skip_lines.xls',
'examples/testxls_converted.csv', ['--skip-lines', '3'])
def test_convert_xlsx(self):
self.assertConverted('xlsx', 'examples/test.xlsx', 'examples/testxlsx_converted.csv')
def test_convert_xlsx_with_sheet(self):
self.assertConverted('xlsx', 'examples/sheets.xlsx', 'examples/testxlsx_converted.csv', ['--sheet', 'data'])
def test_convert_xlsx_with_unicode_sheet(self):
self.assertLines(['--sheet', 'ʤ', '--no-inference', 'examples/sheets.xlsx'], [
'a,b,c',
'1,2,3',
])
def test_convert_xlsx_with_skip_lines(self):
self.assertConverted('xlsx', 'examples/test_skip_lines.xlsx',
'examples/testxlsx_converted.csv', ['--skip-lines', '3'])
def test_names(self):
self.assertLines(['--names', 'examples/sheets.xlsx'], [
'not this one',
'data',
'ʤ',
])
def test_csv_no_headers(self):
self.assertConverted('csv', 'examples/no_header_row.csv', 'examples/dummy.csv',
['--no-header-row', '--no-inference'])
def test_csv_no_headers_streaming(self):
self.assertConverted('csv', 'examples/no_header_row.csv', 'examples/dummy.csv',
['--no-header-row', '--no-inference', '--snifflimit', '0'])
def test_csv_datetime_inference(self):
input_file = io.BytesIO(b'a\n2015-01-01T00:00:00Z')
with stdin_as_string(input_file):
self.assertLines(['-f', 'csv'], [
'a',
'2015-01-01T00:00:00+00:00',
])
input_file.close()
def test_csv_no_inference(self):
self.assertLines(['--no-inference', 'examples/dummy.csv'], [
'a,b,c',
'1,2,3',
])
def test_xls_no_inference(self):
self.assertLines(['--no-inference', 'examples/dummy.xls'], [
'a,b,c',
'1.0,2.0,3.0',
])
def test_xlsx_no_inference(self):
self.assertLines(['--no-inference', 'examples/dummy.xlsx'], [
'a,b,c',
'1,2,3',
])
def test_geojson_no_inference(self):
input_file = io.BytesIO(
b'{"a": 1, "b": 2, "type": "FeatureCollection", "features": [{"geometry": {}, "properties": '
b'{"a": 1, "b": 2, "c": 3}}]}')
with stdin_as_string(input_file):
self.assertLines(['--no-inference', '-f', 'geojson'], [
'id,a,b,c,geojson,type,longitude,latitude',
',1,2,3,{},,,',
])
input_file.close()
def test_json_no_inference(self):
input_file = io.BytesIO(b'[{"a": 1, "b": 2, "c": 3}]')
with stdin_as_string(input_file):
self.assertLines(['--no-inference', '-f', 'json'], [
'a,b,c',
'1,2,3',
])
input_file.close()
def test_ndjson_no_inference(self):
input_file = io.BytesIO(b'{"a": 1, "b": 2, "c": 3}')
with stdin_as_string(input_file):
self.assertLines(['--no-inference', '-f', 'ndjson'], [
'a,b,c',
'1,2,3',
])
input_file.close()
def test_names_xls(self):
output = self.get_output_as_io(['-n', 'examples/sheets.xls'])
self.assertEqual(next(output), 'not this one\n')
self.assertEqual(next(output), 'data\n')
def test_names_xlsx(self):
output = self.get_output_as_io(['-n', 'examples/sheets.xlsx'])
self.assertEqual(next(output), 'not this one\n')
self.assertEqual(next(output), 'data\n')
def test_convert_xls_with_write_sheets(self):
try:
self.assertConverted('xls', 'examples/sheets.xls', 'examples/testxls_converted.csv',
['--sheet', 'data', '--write-sheets', "ʤ,1"])
with open('examples/sheets_0.csv') as f, open('examples/testxls_unicode_converted.csv') as g:
self.assertEqual(f.read(), g.read())
with open('examples/sheets_1.csv') as f, open('examples/testxls_converted.csv') as g:
self.assertEqual(f.read(), g.read())
self.assertFalse(os.path.exists('examples/sheets_2.csv'))
finally:
for suffix in (0, 1):
path = 'examples/sheets_%d.csv' % suffix
if os.path.exists(path):
os.remove(path)
def test_convert_xlsx_with_write_sheets(self):
try:
self.assertConverted('xlsx', 'examples/sheets.xlsx', 'examples/testxlsx_noinference_converted.csv',
['--no-inference', '--sheet', 'data', '--write-sheets', "ʤ,1"])
with open('examples/sheets_0.csv') as f, open('examples/testxlsx_unicode_converted.csv') as g:
self.assertEqual(f.read(), g.read())
with open('examples/sheets_1.csv') as f, open('examples/testxlsx_noinference_converted.csv') as g:
self.assertEqual(f.read(), g.read())
self.assertFalse(os.path.exists('examples/sheets_2.csv'))
finally:
for suffix in (0, 1):
path = 'examples/sheets_%d.csv' % suffix
if os.path.exists(path):
os.remove(path)
def test_convert_xls_with_write_sheets_with_names(self):
try:
self.assertConverted('xls', 'examples/sheets.xls', 'examples/testxls_converted.csv',
['--sheet', 'data', '--write-sheets', "ʤ,1", '--use-sheet-names'])
with open('examples/sheets_ʤ.csv', 'r') as f:
with open('examples/testxls_unicode_converted.csv', 'r') as g:
self.assertEqual(f.read(), g.read())
with open('examples/sheets_data.csv', 'r') as f:
with open('examples/testxls_converted.csv', 'r') as g:
self.assertEqual(f.read(), g.read())
self.assertFalse(os.path.exists('examples/sheets_0.csv'))
self.assertFalse(os.path.exists('examples/sheets_1.csv'))
self.assertFalse(os.path.exists('examples/sheets_2.csv'))
finally:
for suffix in ('ʤ', 'data'):
path = 'examples/sheets_%s.csv' % suffix
if os.path.exists(path):
os.remove(path)
def test_convert_xlsx_with_write_sheets_with_names(self):
try:
self.assertConverted('xlsx', 'examples/sheets.xlsx', 'examples/testxlsx_noinference_converted.csv',
['--no-inference', '--sheet', 'data', '--write-sheets', "ʤ,1", '--use-sheet-names'])
with open('examples/sheets_ʤ.csv', 'r') as f:
with open('examples/testxlsx_unicode_converted.csv', 'r') as g:
self.assertEqual(f.read(), g.read())
with open('examples/sheets_data.csv', 'r') as f:
with open('examples/testxlsx_noinference_converted.csv', 'r') as g:
self.assertEqual(f.read(), g.read())
self.assertFalse(os.path.exists('examples/sheets_0.csv'))
self.assertFalse(os.path.exists('examples/sheets_1.csv'))
self.assertFalse(os.path.exists('examples/sheets_2.csv'))
finally:
for suffix in ('ʤ', 'data'):
path = 'examples/sheets_%s.csv' % suffix
if os.path.exists(path):
os.remove(path)
|
TestIn2CSV
|
python
|
huggingface__transformers
|
src/transformers/models/zamba/modeling_zamba.py
|
{
"start": 13400,
"end": 28138
}
|
class ____(nn.Module):
"""
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
and is why Mamba is called **selective** state spaces)
This module differs from `transformers.models.mamba.modeling_mamba.MambaMixer` in two ways:
- Added multi-head: the output of `self.in_proj` is split into `self.n_mamba_heads` heads, and each head
undergoes an independent forward pass, identical to the original `MambaMixer`, up until the pre-activations of
`self.out_proj`. The pre-activations, coming from different mamba heads, are then concatenated and fed into `self.out_proj`.
"""
def __init__(self, config: ZambaConfig, layer_idx):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.hidden_size = config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = config.mamba_expand * config.hidden_size
self.time_step_rank = config.mamba_dt_rank
self.n_mamba_heads = config.n_mamba_heads
self.mamba_head_dim = self.intermediate_size // self.n_mamba_heads
self.use_conv_bias = config.mamba_conv_bias
self.use_bias = config.mamba_proj_bias
self.conv1d = nn.Conv1d(
in_channels=self.intermediate_size,
out_channels=self.intermediate_size,
bias=self.use_conv_bias,
kernel_size=self.conv_kernel_size,
groups=self.intermediate_size,
padding=self.conv_kernel_size - 1,
)
self.activation = config.hidden_mamba_act
self.act = ACT2FN[config.hidden_mamba_act]
self.use_fast_kernels = config.use_mamba_kernels
# projection of the input hidden states
self.in_proj = nn.Linear(self.hidden_size, self.intermediate_size * 2, bias=self.use_bias)
# weight associated to the selective projection used to make dt, B and C input dependent
# each mamba head is processed independently
self.x_proj_weight = nn.Parameter(
torch.zeros(
self.n_mamba_heads,
self.time_step_rank + self.ssm_state_size * 2,
self.mamba_head_dim,
)
)
# time step projection (discretization)
self.dt_proj_weight = nn.Parameter(
(torch.zeros(self.n_mamba_heads, self.mamba_head_dim, self.time_step_rank) - 0.5)
* 2
/ self.time_step_rank**0.5
)
self.dt_proj_bias = nn.Parameter(torch.zeros(self.n_mamba_heads, self.mamba_head_dim))
# S4D real initialization. These are not discretized!
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
A = torch.arange(1, self.ssm_state_size + 1, dtype=torch.float32)[None, :]
A = A.expand(self.intermediate_size, -1).contiguous()
self.A_log = nn.Parameter(torch.log(A).reshape(self.n_mamba_heads, self.mamba_head_dim, -1))
self.D = nn.Parameter(torch.ones(self.n_mamba_heads, self.mamba_head_dim))
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=self.use_bias)
if not is_fast_path_available:
logger.warning_once(
"The fast path is not available because one of `(selective_state_update, selective_scan_fn, causal_conv1d_fn, causal_conv1d_update, mamba_inner_fn)`"
" is None. To install follow https://github.com/state-spaces/mamba/#installation and"
" https://github.com/Dao-AILab/causal-conv1d. If you want to use the naive implementation, set `use_mamba_kernels=False` in the model config"
)
def cuda_kernels_forward(
self, hidden_states: torch.Tensor, cache_params: ZambaHybridDynamicCache = None, attention_mask=None
):
batch_size, seq_len, _ = hidden_states.shape
use_precomputed_states = cache_params is not None and cache_params.has_previous_state and seq_len == 1
# 1. Gated linear projection
projected_states = self.in_proj(hidden_states).transpose(1, 2)
hidden_states, gate = projected_states.view(batch_size, -1, 2, seq_len).chunk(2, dim=2)
hidden_states = hidden_states.squeeze(2).contiguous()
gate = gate.squeeze(2)
gate = gate.reshape(batch_size, self.n_mamba_heads, -1, seq_len).transpose(0, 1)
# 2. Convolution sequence transformation
conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2))
if use_precomputed_states:
hidden_states = causal_conv1d_update(
hidden_states.squeeze(-1),
cache_params.conv_states[self.layer_idx],
conv_weights,
self.conv1d.bias,
self.activation,
)
hidden_states = hidden_states.unsqueeze(-1)
else:
if attention_mask is not None and not torch.all(attention_mask == 1):
hidden_states = hidden_states * attention_mask.unsqueeze(1)
if cache_params is not None:
conv_states = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
cache_params.conv_states[self.layer_idx].copy_(conv_states)
hidden_states = causal_conv1d_fn(hidden_states, conv_weights, self.conv1d.bias, activation=self.activation)
if attention_mask is not None and not torch.all(attention_mask == 1):
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 3. SSM sequence transformation
# 3.a. input varying initialization of time_step, B and C
hidden_states = hidden_states.reshape(-1, self.n_mamba_heads, self.mamba_head_dim, seq_len).transpose(0, 1)
ssm_parameters = (self.x_proj_weight[:, None, :, :] @ hidden_states).transpose(-1, -2)
time_step, B, C = torch.split(
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
)
discrete_time_step = self.dt_proj_weight[:, None] @ time_step.transpose(-1, -2)
A = -torch.exp(self.A_log.float())
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
time_proj_bias = self.dt_proj_bias.float() if self.dt_proj_bias is not None else None
scan_outputs = torch.empty((batch_size, 0, seq_len), device=hidden_states.device, dtype=hidden_states.dtype)
if use_precomputed_states:
for n in range(self.n_mamba_heads):
scan_outputs_ = selective_state_update(
cache_params.ssm_states[self.layer_idx][:, n],
hidden_states[n, ..., 0],
discrete_time_step[n, ..., 0],
A[n],
B[n, :, 0],
C[n, :, 0],
self.D[n],
gate[n, ..., 0],
time_proj_bias[n],
dt_softplus=True,
).unsqueeze(-1)
scan_outputs = torch.cat((scan_outputs, scan_outputs_), dim=1)
else:
ssm_state = torch.empty(
(batch_size, 0, self.mamba_head_dim, self.ssm_state_size),
device=hidden_states.device,
dtype=hidden_states.dtype,
)
for n in range(self.n_mamba_heads):
scan_outputs_, ssm_state_ = selective_scan_fn(
hidden_states[n],
discrete_time_step[n],
A[n],
B[n].transpose(1, 2),
C[n].transpose(1, 2),
self.D[n].float(),
gate[n],
time_proj_bias[n],
delta_softplus=True,
return_last_state=True,
)
scan_outputs = torch.cat((scan_outputs, scan_outputs_), dim=1).contiguous()
ssm_state = torch.cat((ssm_state, ssm_state_.unsqueeze(1)), dim=1)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
# 4. Final linear projection
contextualized_states = self.out_proj(scan_outputs.transpose(1, 2))
return contextualized_states
def slow_forward(self, input_states, cache_params: ZambaHybridDynamicCache = None, attention_mask=None):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
# 1. Gated linear projection
projected_states = self.in_proj(input_states).transpose(1, 2)
hidden_states, gate = projected_states.view(batch_size, -1, 2, seq_len).chunk(2, dim=2)
hidden_states = hidden_states.squeeze(2).contiguous()
gate = gate.squeeze(2)
gate = gate.reshape(batch_size, self.n_mamba_heads, -1, seq_len).transpose(0, 1)
use_cache = isinstance(cache_params, ZambaHybridDynamicCache)
# 2. Convolution sequence transformation
if use_cache and cache_params.ssm_states[self.layer_idx].shape[0] == batch_size:
if self.training:
# In training mode, we don't want to perform in-place operations on ssm_state so we can compute the backwards pass
ssm_state = cache_params.ssm_states[self.layer_idx].clone()
else:
ssm_state = cache_params.ssm_states[self.layer_idx]
ssm_state = ssm_state.to(hidden_states.device)
if (
cache_params.has_previous_state
and seq_len == 1
and cache_params.conv_states[self.layer_idx].shape[0] == batch_size
):
conv_state = cache_params.conv_states[self.layer_idx]
conv_state = torch.roll(conv_state, shifts=-1, dims=-1)
conv_state[:, :, -1] = hidden_states[:, :, 0]
cache_params.conv_states[self.layer_idx] = conv_state
hidden_states = torch.sum(conv_state * self.conv1d.weight[:, 0, :], dim=-1)
if self.use_conv_bias:
hidden_states += self.conv1d.bias
hidden_states = self.act(hidden_states).to(dtype).unsqueeze(-1)
else:
if attention_mask is not None and not torch.all(attention_mask == 1):
hidden_states = hidden_states * attention_mask[:, -hidden_states.shape[-1] :].unsqueeze(1)
conv_state = nn.functional.pad(hidden_states, (self.conv_kernel_size - hidden_states.shape[-1], 0))
cache_params.conv_states[self.layer_idx] = conv_state
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len])
if attention_mask is not None and not torch.all(attention_mask == 1):
hidden_states = hidden_states * attention_mask[:, -hidden_states.shape[-1] :].unsqueeze(1)
else:
ssm_state = torch.zeros(
(batch_size, self.n_mamba_heads, self.mamba_head_dim, self.ssm_state_size),
device=hidden_states.device,
dtype=dtype,
)
if attention_mask is not None and not torch.all(attention_mask == 1):
hidden_states = hidden_states * attention_mask.unsqueeze(1)
hidden_states = self.act(self.conv1d(hidden_states)[..., :seq_len])
if attention_mask is not None and not torch.all(attention_mask == 1):
hidden_states = hidden_states * attention_mask.unsqueeze(1)
# 3. State Space Model sequence transformation
# 3.a. Selection: [batch, seq_len, self.time_step_rank + self.ssm_state_size * 2]
hidden_states = hidden_states.reshape(-1, self.n_mamba_heads, self.mamba_head_dim, seq_len).transpose(0, 1)
ssm_parameters = (self.x_proj_weight[:, None, :, :] @ hidden_states).transpose(-1, -2)
time_step, B, C = torch.split(
ssm_parameters, [self.time_step_rank, self.ssm_state_size, self.ssm_state_size], dim=-1
)
discrete_time_step = (self.dt_proj_weight[:, None] @ time_step.transpose(-1, -2)) + self.dt_proj_bias[
:, None, :, None
]
discrete_time_step = nn.functional.softplus(discrete_time_step)
# 3.b. Discretization: B and C to [batch, seq_len, intermediate_size, ssm_state_size] (SRAM)
A = -torch.exp(self.A_log.float())
discrete_A = torch.exp(A[:, None, :, None, :] * discrete_time_step[:, :, :, :, None])
discrete_B = discrete_time_step[:, :, :, :, None] * B[:, :, None, :, :].float()
deltaB_u = discrete_B * hidden_states[:, :, :, :, None].float()
# 3.c perform the recurrence y ← SSM(A, B, C)(x)
scan_outputs = []
for i in range(seq_len):
ssm_state = discrete_A[:, :, :, i, :].transpose(0, 1) * ssm_state + deltaB_u[:, :, :, i, :].transpose(0, 1)
scan_output = torch.matmul(ssm_state.transpose(0, 1).to(dtype), C[:, :, i, :].unsqueeze(-1))
scan_outputs.append(scan_output[:, :, :, 0])
scan_output = torch.stack(scan_outputs, dim=-1)
scan_output = scan_output + (hidden_states * self.D[:, None, :, None])
scan_output = scan_output * self.act(gate)
if use_cache:
cache_params.ssm_states[self.layer_idx] = ssm_state
# 4. Final linear projection
contextualized_states = self.out_proj(
scan_output.transpose(0, 1).reshape(batch_size, -1, seq_len).transpose(1, 2)
)
return contextualized_states
def forward(self, hidden_states, cache_params: ZambaHybridDynamicCache = None, attention_mask=None):
if self.use_fast_kernels:
if not is_fast_path_available or "cuda" not in self.x_proj_weight.device.type:
raise ValueError(
"Fast Mamba kernels are not available. Make sure to they are installed and that "
"the mamba module is on a CUDA device. lease run 'pip install causal-conv1d>=1.2.0' "
"and 'pip install mamba-ssm', or set use_mamba_kernels=False in the model's config."
)
return self.cuda_kernels_forward(hidden_states, cache_params, attention_mask=attention_mask)
return self.slow_forward(hidden_states, cache_params, attention_mask=attention_mask)
# Copied from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Zamba
|
ZambaMambaMixer
|
python
|
ray-project__ray
|
rllib/models/tf/tf_action_dist.py
|
{
"start": 4031,
"end": 7832
}
|
class ____(TFActionDistribution):
"""MultiCategorical distribution for MultiDiscrete action spaces."""
def __init__(
self,
inputs: List[TensorType],
model: ModelV2,
input_lens: Union[List[int], np.ndarray, Tuple[int, ...]],
action_space=None,
):
# skip TFActionDistribution init
ActionDistribution.__init__(self, inputs, model)
self.cats = [
Categorical(input_, model)
for input_ in tf.split(inputs, input_lens, axis=1)
]
self.action_space = action_space
if self.action_space is None:
self.action_space = gym.spaces.MultiDiscrete(
[c.inputs.shape[1] for c in self.cats]
)
self.sample_op = self._build_sample_op()
self.sampled_action_logp_op = self.logp(self.sample_op)
@override(ActionDistribution)
def deterministic_sample(self) -> TensorType:
sample_ = tf.stack([cat.deterministic_sample() for cat in self.cats], axis=1)
if isinstance(self.action_space, gym.spaces.Box):
return tf.cast(
tf.reshape(sample_, [-1] + list(self.action_space.shape)),
self.action_space.dtype,
)
return sample_
@override(ActionDistribution)
def logp(self, actions: TensorType) -> TensorType:
# If tensor is provided, unstack it into list.
if isinstance(actions, tf.Tensor):
if isinstance(self.action_space, gym.spaces.Box):
actions = tf.reshape(
actions, [-1, int(np.prod(self.action_space.shape))]
)
elif isinstance(self.action_space, gym.spaces.MultiDiscrete):
actions.set_shape((None, len(self.cats)))
actions = tf.unstack(tf.cast(actions, tf.int32), axis=1)
logps = tf.stack([cat.logp(act) for cat, act in zip(self.cats, actions)])
return tf.reduce_sum(logps, axis=0)
@override(ActionDistribution)
def multi_entropy(self) -> TensorType:
return tf.stack([cat.entropy() for cat in self.cats], axis=1)
@override(ActionDistribution)
def entropy(self) -> TensorType:
return tf.reduce_sum(self.multi_entropy(), axis=1)
@override(ActionDistribution)
def multi_kl(self, other: ActionDistribution) -> TensorType:
return tf.stack(
[cat.kl(oth_cat) for cat, oth_cat in zip(self.cats, other.cats)], axis=1
)
@override(ActionDistribution)
def kl(self, other: ActionDistribution) -> TensorType:
return tf.reduce_sum(self.multi_kl(other), axis=1)
@override(TFActionDistribution)
def _build_sample_op(self) -> TensorType:
sample_op = tf.stack([cat.sample() for cat in self.cats], axis=1)
if isinstance(self.action_space, gym.spaces.Box):
return tf.cast(
tf.reshape(sample_op, [-1] + list(self.action_space.shape)),
dtype=self.action_space.dtype,
)
return sample_op
@staticmethod
@override(ActionDistribution)
def required_model_output_shape(
action_space: gym.Space, model_config: ModelConfigDict
) -> Union[int, np.ndarray]:
# Int Box.
if isinstance(action_space, gym.spaces.Box):
assert action_space.dtype.name.startswith("int")
low_ = np.min(action_space.low)
high_ = np.max(action_space.high)
assert np.all(action_space.low == low_)
assert np.all(action_space.high == high_)
return np.prod(action_space.shape, dtype=np.int32) * (high_ - low_ + 1)
# MultiDiscrete space.
else:
# nvec is already integer, so no casting needed.
return np.sum(action_space.nvec)
@OldAPIStack
|
MultiCategorical
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_storage_class.py
|
{
"start": 383,
"end": 14396
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'allow_volume_expansion': 'bool',
'allowed_topologies': 'list[V1TopologySelectorTerm]',
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'mount_options': 'list[str]',
'parameters': 'dict(str, str)',
'provisioner': 'str',
'reclaim_policy': 'str',
'volume_binding_mode': 'str'
}
attribute_map = {
'allow_volume_expansion': 'allowVolumeExpansion',
'allowed_topologies': 'allowedTopologies',
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'mount_options': 'mountOptions',
'parameters': 'parameters',
'provisioner': 'provisioner',
'reclaim_policy': 'reclaimPolicy',
'volume_binding_mode': 'volumeBindingMode'
}
def __init__(self, allow_volume_expansion=None, allowed_topologies=None, api_version=None, kind=None, metadata=None, mount_options=None, parameters=None, provisioner=None, reclaim_policy=None, volume_binding_mode=None, local_vars_configuration=None): # noqa: E501
"""V1StorageClass - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._allow_volume_expansion = None
self._allowed_topologies = None
self._api_version = None
self._kind = None
self._metadata = None
self._mount_options = None
self._parameters = None
self._provisioner = None
self._reclaim_policy = None
self._volume_binding_mode = None
self.discriminator = None
if allow_volume_expansion is not None:
self.allow_volume_expansion = allow_volume_expansion
if allowed_topologies is not None:
self.allowed_topologies = allowed_topologies
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if mount_options is not None:
self.mount_options = mount_options
if parameters is not None:
self.parameters = parameters
self.provisioner = provisioner
if reclaim_policy is not None:
self.reclaim_policy = reclaim_policy
if volume_binding_mode is not None:
self.volume_binding_mode = volume_binding_mode
@property
def allow_volume_expansion(self):
"""Gets the allow_volume_expansion of this V1StorageClass. # noqa: E501
allowVolumeExpansion shows whether the storage class allow volume expand. # noqa: E501
:return: The allow_volume_expansion of this V1StorageClass. # noqa: E501
:rtype: bool
"""
return self._allow_volume_expansion
@allow_volume_expansion.setter
def allow_volume_expansion(self, allow_volume_expansion):
"""Sets the allow_volume_expansion of this V1StorageClass.
allowVolumeExpansion shows whether the storage class allow volume expand. # noqa: E501
:param allow_volume_expansion: The allow_volume_expansion of this V1StorageClass. # noqa: E501
:type: bool
"""
self._allow_volume_expansion = allow_volume_expansion
@property
def allowed_topologies(self):
"""Gets the allowed_topologies of this V1StorageClass. # noqa: E501
allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature. # noqa: E501
:return: The allowed_topologies of this V1StorageClass. # noqa: E501
:rtype: list[V1TopologySelectorTerm]
"""
return self._allowed_topologies
@allowed_topologies.setter
def allowed_topologies(self, allowed_topologies):
"""Sets the allowed_topologies of this V1StorageClass.
allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature. # noqa: E501
:param allowed_topologies: The allowed_topologies of this V1StorageClass. # noqa: E501
:type: list[V1TopologySelectorTerm]
"""
self._allowed_topologies = allowed_topologies
@property
def api_version(self):
"""Gets the api_version of this V1StorageClass. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1StorageClass. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1StorageClass.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1StorageClass. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1StorageClass. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1StorageClass. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1StorageClass.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1StorageClass. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1StorageClass. # noqa: E501
:return: The metadata of this V1StorageClass. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1StorageClass.
:param metadata: The metadata of this V1StorageClass. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def mount_options(self):
"""Gets the mount_options of this V1StorageClass. # noqa: E501
mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid. # noqa: E501
:return: The mount_options of this V1StorageClass. # noqa: E501
:rtype: list[str]
"""
return self._mount_options
@mount_options.setter
def mount_options(self, mount_options):
"""Sets the mount_options of this V1StorageClass.
mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid. # noqa: E501
:param mount_options: The mount_options of this V1StorageClass. # noqa: E501
:type: list[str]
"""
self._mount_options = mount_options
@property
def parameters(self):
"""Gets the parameters of this V1StorageClass. # noqa: E501
parameters holds the parameters for the provisioner that should create volumes of this storage class. # noqa: E501
:return: The parameters of this V1StorageClass. # noqa: E501
:rtype: dict(str, str)
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1StorageClass.
parameters holds the parameters for the provisioner that should create volumes of this storage class. # noqa: E501
:param parameters: The parameters of this V1StorageClass. # noqa: E501
:type: dict(str, str)
"""
self._parameters = parameters
@property
def provisioner(self):
"""Gets the provisioner of this V1StorageClass. # noqa: E501
provisioner indicates the type of the provisioner. # noqa: E501
:return: The provisioner of this V1StorageClass. # noqa: E501
:rtype: str
"""
return self._provisioner
@provisioner.setter
def provisioner(self, provisioner):
"""Sets the provisioner of this V1StorageClass.
provisioner indicates the type of the provisioner. # noqa: E501
:param provisioner: The provisioner of this V1StorageClass. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and provisioner is None: # noqa: E501
raise ValueError("Invalid value for `provisioner`, must not be `None`") # noqa: E501
self._provisioner = provisioner
@property
def reclaim_policy(self):
"""Gets the reclaim_policy of this V1StorageClass. # noqa: E501
reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete. # noqa: E501
:return: The reclaim_policy of this V1StorageClass. # noqa: E501
:rtype: str
"""
return self._reclaim_policy
@reclaim_policy.setter
def reclaim_policy(self, reclaim_policy):
"""Sets the reclaim_policy of this V1StorageClass.
reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete. # noqa: E501
:param reclaim_policy: The reclaim_policy of this V1StorageClass. # noqa: E501
:type: str
"""
self._reclaim_policy = reclaim_policy
@property
def volume_binding_mode(self):
"""Gets the volume_binding_mode of this V1StorageClass. # noqa: E501
volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature. # noqa: E501
:return: The volume_binding_mode of this V1StorageClass. # noqa: E501
:rtype: str
"""
return self._volume_binding_mode
@volume_binding_mode.setter
def volume_binding_mode(self, volume_binding_mode):
"""Sets the volume_binding_mode of this V1StorageClass.
volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature. # noqa: E501
:param volume_binding_mode: The volume_binding_mode of this V1StorageClass. # noqa: E501
:type: str
"""
self._volume_binding_mode = volume_binding_mode
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1StorageClass):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1StorageClass):
return True
return self.to_dict() != other.to_dict()
|
V1StorageClass
|
python
|
pytorch__pytorch
|
torch/fx/tensor_type.py
|
{
"start": 197,
"end": 1010
}
|
class ____:
"""
TensorType defines a type for tensors, which consists of a list of dimensions.
Example:
class M(torch.nn.Module):
def forward(self, x:TensorType((1,2,3, Dyn)), y:TensorType((1,2,3, Dyn))):
return torch.add(x, y)
"""
def __init__(self, dim):
self.__origin__ = TensorType
self.__args__ = dim
def __repr__(self):
return f"TensorType[{self.__args__}]"
def __eq__(self, other):
if isinstance(other, self.__class__):
return list(self.__args__) == list(other.__args__)
else:
return False
@staticmethod
def __class_getitem__(*args):
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
return TensorType(tuple(args))
|
TensorType
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/common/utils.py
|
{
"start": 7217,
"end": 9860
}
|
class ____(enum.Enum):
# Categorizing known failures, to ease later follow-up investigation.
# Some are crosshair issues, some hypothesis issues, others truly ok-to-xfail tests.
symbolic_outside_context = "CrosshairInternal error (using value outside context)"
nested_given = "nested @given decorators don't work with crosshair"
undiscovered = "crosshair may not find the failing input"
other = "reasons not elsewhere categorized"
def xfail_on_crosshair(why: Why, /, *, strict=True, as_marks=False):
# run `pytest -m xf_crosshair` to select these tests!
kw = {
"strict": strict and why != Why.undiscovered,
"reason": f"Expected failure due to: {why.value}",
"condition": settings().backend == "crosshair",
}
if as_marks: # for use with pytest.param(..., marks=xfail_on_crosshair())
return (pytest.mark.xf_crosshair, pytest.mark.xfail(**kw))
return lambda fn: pytest.mark.xf_crosshair(pytest.mark.xfail(**kw)(fn))
def skipif_threading(f):
return pytest.mark.skipif(
settings.get_current_profile_name() == "threading", reason="not thread safe"
)(f)
def xfail_if_gil_disabled(f):
try:
if not sys._is_gil_enabled(): # 3.13+
return pytest.mark.xfail(
reason="fails on free-threading build", strict=False
)(f)
except Exception:
pass
return f
# we don't monkeypatch _consistently_increment_time under threading
skipif_time_unpatched = skipif_threading
_restore_recursion_limit_lock = RLock()
@contextlib.contextmanager
def restore_recursion_limit():
with _restore_recursion_limit_lock:
original_limit = sys.getrecursionlimit()
try:
yield
finally:
sys.setrecursionlimit(original_limit)
def run_concurrently(function, *, n: int) -> None:
import pytest
if settings.get_current_profile_name() == "crosshair":
pytest.skip("crosshair is not thread safe")
if sys.platform == "emscripten":
pytest.skip("no threads on emscripten")
def run():
barrier.wait()
function()
threads = [Thread(target=run) for _ in range(n)]
barrier = Barrier(n)
for thread in threads:
thread.start()
for thread in threads:
thread.join(timeout=10)
def wait_for(condition, *, timeout=1, interval=0.01):
for _ in range(math.ceil(timeout / interval)):
if condition():
return
time_sleep(interval)
raise Exception(
f"timing out after waiting {timeout}s for condition "
f"{get_pretty_function_description(condition)}"
)
|
Why
|
python
|
pytorch__pytorch
|
torch/distributed/fsdp/api.py
|
{
"start": 16365,
"end": 17024
}
|
class ____(StateDictConfig):
"""
``ShardedStateDictConfig`` is a config class meant to be used with
``StateDictType.SHARDED_STATE_DICT``.
Attributes:
_use_dtensor (bool): If ``True``, then FSDP saves the state dict values
as ``DTensor``, and if ``False``, then FSDP saves them as
``ShardedTensor``. (Default: ``False``)
.. warning:: ``_use_dtensor`` is a private field of :class:`ShardedStateDictConfig`
and it is used by FSDP to determine the type of state dict values. Users should not
manually modify ``_use_dtensor``.
"""
_use_dtensor: bool = False
@dataclass
|
ShardedStateDictConfig
|
python
|
getsentry__sentry
|
src/sentry/web/client_config.py
|
{
"start": 5722,
"end": 19353
}
|
class ____:
def __init__(
self,
request: Request | None = None,
org_context: RpcUserOrganizationContext | None = None,
) -> None:
self.request = request
if request is not None:
self.user: User | AnonymousUser | None = request.user
self.session: SessionBase | None = request.session
else:
self.user = None
self.session = None
self.last_org = _resolve_last_org(request, self.session, self.user, org_context)
@property
def last_org_slug(self) -> str | None:
if self.last_org is None:
return None
return self.last_org.slug
@cached_property
def customer_domain(self) -> Mapping[str, str] | None:
if self.request is None or not is_using_customer_domain(self.request):
return None
return {
"subdomain": self.request.subdomain,
"organizationUrl": generate_organization_url(self.request.subdomain),
"sentryUrl": options.get("system.url-prefix"),
}
@cached_property
def tracing_data(self) -> Mapping[str, str]:
return {
"sentry_trace": sentry_sdk.get_traceparent() or "",
"baggage": sentry_sdk.get_baggage() or "",
}
@property
def enabled_features(self) -> Iterable[str]:
if features.has("organizations:create", actor=self.user):
yield "organizations:create"
if auth.has_user_registration():
yield "auth:register"
if features.has("relocation:enabled", actor=self.user):
yield "relocation:enabled"
if features.has("system:multi-region"):
yield "system:multi-region"
# TODO @athena: remove this feature flag after development is done
# this is a temporary hack to be able to used flagpole in a case where there's no organization
# availble on the frontend
if self.last_org and features.has(
"organizations:scoped-partner-oauth", self.last_org, actor=self.user
):
yield "system:scoped-partner-oauth"
@property
def needs_upgrade(self) -> bool:
return self.request is not None and is_active_superuser(self.request) and _needs_upgrade()
@cached_property
def public_dsn(self) -> str | None:
return _get_public_dsn()
@property
def messages(self):
if self.request is None:
return []
return get_messages(self.request)
@property
def language_code(self) -> str:
default_language_code = "en"
if self.request is None:
return default_language_code
return getattr(self.request, "LANGUAGE_CODE", default_language_code)
@property
def user_identity(self) -> Iterable[tuple[str, Any]]:
if self.request is None:
return
yield "ip_address", self.request.META["REMOTE_ADDR"]
if self.user and self.user.is_authenticated:
yield "email", self.user.email
yield "id", self.user.id
yield "isStaff", self.user.is_staff
if self.user.name:
yield "name", self.user.name
@cached_property
def allow_list(self) -> list[str]:
if settings.SENTRY_FRONTEND_WHITELIST_URLS:
return settings.SENTRY_FRONTEND_WHITELIST_URLS
if settings.ALLOWED_HOSTS == ["*"]:
return []
return list(settings.ALLOWED_HOSTS)
def _is_superuser(self) -> bool:
# Note: This intentionally does not use the "active" superuser flag as
# the frontend should only ever use this flag as a hint that the user can be a superuser
# the API will always need to check for active superuser.
#
# This is needed in the case where you access a different org and get denied, but the UI
# can open the sudo dialog if you are an "inactive" superuser
return self.request is not None and self.user is not None and self.user.is_superuser
@property
def links(self) -> Iterable[tuple[str, str | None]]:
organization_url = (
generate_organization_url(self.last_org_slug) if self.last_org_slug else None
)
region_url = None
if self.last_org:
if SiloMode.get_current_mode() == SiloMode.CONTROL:
organization_mapping = OrganizationMapping.objects.get(
organization_id=self.last_org.id
)
region_url = generate_region_url(organization_mapping.region_name)
else:
region_url = generate_region_url()
yield "organizationUrl", organization_url
yield "regionUrl", region_url
yield "sentryUrl", options.get("system.url-prefix")
if self._is_superuser() and superuser.SUPERUSER_ORG_ID is not None:
org_context = organization_service.get_organization_by_id(
id=superuser.SUPERUSER_ORG_ID,
user_id=None,
include_projects=False,
include_teams=False,
)
if org_context and org_context.organization:
yield "superuserUrl", generate_organization_url(org_context.organization.slug)
@cached_property
def user_details(self) -> Mapping[str, Any] | None:
if self.user is None or not self.user.is_authenticated:
return None
query_result = user_service.serialize_many(
filter={"user_ids": [self.user.id]},
serializer=UserSerializeType.SELF_DETAILED,
auth_context=AuthenticationContext(
auth=self.request.auth if self.request is not None else None,
user=serialize_generic_user(self.user),
),
)
if not query_result:
# this could be an empty result as the user could be deleted
return None
(user_details,) = query_result
user_details = json.loads(json.dumps(user_details))
if self._is_superuser():
user_details["isSuperuser"] = self.user.is_superuser
return user_details
@cached_property
def _member_region_names(self) -> frozenset[str]:
# If the user is not authenticated they have no region membership
if not self.user or not self.user.id:
return frozenset()
region_names = user_service.get_member_region_names(user_id=self.user.id)
return frozenset(region_names)
@staticmethod
def _serialize_regions(
region_names: Iterable[str], display_order: Callable[[Region], Any]
) -> list[Mapping[str, Any]]:
regions = [get_region_by_name(name) for name in region_names]
regions.sort(key=display_order)
return [region.api_serialize() for region in regions]
@property
def regions(self) -> list[Mapping[str, Any]]:
"""
The regions available to the current user.
This will include *all* multi-tenant regions, and if the user
has membership on any single-tenant regions those will also be included.
"""
# Only expose visible regions.
# When new regions are added they can take some work to get working correctly.
# Before they are working we need ways to bring parts of the region online without
# exposing the region to customers.
region_names = find_all_multitenant_region_names()
if not region_names:
return [{"name": "default", "url": options.get("system.url-prefix")}]
def region_display_order(region: Region) -> tuple[bool, bool, str]:
return (
not region.is_historic_monolith_region(), # default region comes first
region.category != RegionCategory.MULTI_TENANT, # multi-tenants before single
region.name, # then sort alphabetically
)
# Show all visible multi-tenant regions to unauthenticated users as they could
# create a new account. Else, ensure all regions the current user is in are
# included as there could be single tenants or hidden regions.
unique_regions = set(region_names) | self._member_region_names
return self._serialize_regions(unique_regions, region_display_order)
@property
def member_regions(self) -> list[Mapping[str, Any]]:
"""
The regions the user has membership in. Includes single-tenant regions.
"""
return self._serialize_regions(self._member_region_names, lambda r: r.name)
@property
def should_preload_data(self) -> bool:
"""
Indicates if the preload-data functionality is enabled when rendering
the preload-data.html template. This is only used when layout.html is
rendered.
"""
# Don't send requests if there is no logged in user.
if not self.user_details:
return False
# If the user is viewing the accept invitation user interface,
# we should avoid preloading the data as they might not yet have access to it,
# which could cause an error notification (403) to pop up in the user interface.
invite_route_names = (
"sentry-accept-invite",
"sentry-organization-accept-invite",
)
if (
self.request
and self.request.resolver_match
and self.request.resolver_match.url_name in invite_route_names
):
return False
return True
def get_context(self) -> Mapping[str, Any]:
return {
"initialTrace": self.tracing_data,
"customerDomain": self.customer_domain,
"singleOrganization": settings.SENTRY_SINGLE_ORGANIZATION,
"supportEmail": _get_support_mail(),
"urlPrefix": options.get("system.url-prefix"),
"version": _get_version_info(),
"features": list(self.enabled_features),
"distPrefix": get_frontend_dist_prefix(),
"needsUpgrade": self.needs_upgrade,
"dsn": self.public_dsn,
"statuspage": _get_statuspage(),
"messages": [{"message": msg.message, "level": msg.tags} for msg in self.messages],
"apmSampling": float(settings.SENTRY_FRONTEND_APM_SAMPLING or 0),
# Maintain isOnPremise key for backcompat (plugins?).
"isOnPremise": is_self_hosted(),
"isSelfHosted": is_self_hosted(),
"isSelfHostedErrorsOnly": is_self_hosted_errors_only(),
# sentryMode intends to supersede isSelfHosted,
# so we can differentiate between "SELF_HOSTED", "SINGLE_TENANT", and "SAAS".
"sentryMode": settings.SENTRY_MODE.name,
"shouldPreloadData": self.should_preload_data,
"shouldShowBeaconConsentPrompt": not self.needs_upgrade
and should_show_beacon_consent_prompt(),
"invitesEnabled": settings.SENTRY_ENABLE_INVITES,
"gravatarBaseUrl": settings.SENTRY_GRAVATAR_BASE_URL,
"termsUrl": settings.TERMS_URL,
"privacyUrl": settings.PRIVACY_URL,
# Note `lastOrganization` should not be expected to update throughout frontend app lifecycle
# It should only be used on a fresh browser nav to a path where an
# organization is not in context
"lastOrganization": self.last_org_slug,
"languageCode": self.language_code,
"userIdentity": dict(self.user_identity),
"csrfCookieName": settings.CSRF_COOKIE_NAME,
"superUserCookieName": superuser.COOKIE_NAME,
"superUserCookieDomain": superuser.COOKIE_DOMAIN,
"sentryConfig": {
"dsn": self.public_dsn,
# XXX: In the world of frontend / backend deploys being separated,
# this is likely incorrect, since the backend version may not
# match the frontend build version.
#
# This is likely to be removed sometime in the future.
"release": f"frontend@{settings.SENTRY_SDK_CONFIG['release']}",
"environment": settings.SENTRY_SDK_CONFIG["environment"],
# By default `ALLOWED_HOSTS` is [*], however the JS SDK does not support globbing
"whitelistUrls": self.allow_list,
"allowUrls": self.allow_list,
"tracePropagationTargets": settings.SENTRY_FRONTEND_TRACE_PROPAGATION_TARGETS or [],
},
"memberRegions": self.member_regions,
"regions": self.regions,
"relocationConfig": {"selectableRegions": options.get("relocation.selectable-regions")},
"demoMode": is_demo_mode_enabled() and is_demo_user(self.user),
"enableAnalytics": settings.ENABLE_ANALYTICS,
"validateSUForm": getattr(
settings, "VALIDATE_SUPERUSER_ACCESS_CATEGORY_AND_REASON", False
),
"disableU2FForSUForm": getattr(settings, "DISABLE_SU_FORM_U2F_CHECK_FOR_LOCAL", False),
"links": dict(self.links),
"user": self.user_details,
"isAuthenticated": self.user_details is not None,
}
def get_client_config(
request=None, org_context: RpcUserOrganizationContext | None = None
) -> Mapping[str, Any]:
"""
Provides initial bootstrap data needed to boot the frontend application.
"""
config = _ClientConfig(request, org_context)
if request is not None and config.last_org is None:
_delete_activeorg(config.session)
return config.get_context()
|
_ClientConfig
|
python
|
walkccc__LeetCode
|
solutions/3043. Find the Length of the Longest Common Prefix/3043.py
|
{
"start": 84,
"end": 538
}
|
class ____:
def __init__(self):
self.root = TrieNode()
def insert(self, word: str) -> None:
node: TrieNode = self.root
for c in word:
node = node.children.setdefault(c, TrieNode())
node.isWord = True
def search(self, word: str) -> int:
prefixLength = 0
node = self.root
for c in word:
if c not in node.children:
break
node = node.children[c]
prefixLength += 1
return prefixLength
|
Trie
|
python
|
apache__airflow
|
providers/microsoft/azure/src/airflow/providers/microsoft/azure/hooks/powerbi.py
|
{
"start": 1797,
"end": 1933
}
|
class ____(AirflowException):
"""An exception that indicates a failure in getting the list of datasets."""
|
PowerBIDatasetListException
|
python
|
Netflix__metaflow
|
metaflow/_vendor/packaging/requirements.py
|
{
"start": 544,
"end": 3264
}
|
class ____:
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string: str) -> None:
try:
parsed = parse_requirement(requirement_string)
except ParserSyntaxError as e:
raise InvalidRequirement(str(e)) from e
self.name: str = parsed.name
if parsed.url:
parsed_url = urllib.parse.urlparse(parsed.url)
if parsed_url.scheme == "file":
if urllib.parse.urlunparse(parsed_url) != parsed.url:
raise InvalidRequirement("Invalid URL given")
elif not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc
):
raise InvalidRequirement(f"Invalid URL: {parsed.url}")
self.url: Optional[str] = parsed.url
else:
self.url = None
self.extras: Set[str] = set(parsed.extras if parsed.extras else [])
self.specifier: SpecifierSet = SpecifierSet(parsed.specifier)
self.marker: Optional[Marker] = None
if parsed.marker is not None:
self.marker = Marker.__new__(Marker)
self.marker._markers = _normalize_extra_values(parsed.marker)
def __str__(self) -> str:
parts: List[str] = [self.name]
if self.extras:
formatted_extras = ",".join(sorted(self.extras))
parts.append(f"[{formatted_extras}]")
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append(f"@ {self.url}")
if self.marker:
parts.append(" ")
if self.marker:
parts.append(f"; {self.marker}")
return "".join(parts)
def __repr__(self) -> str:
return f"<Requirement('{self}')>"
def __hash__(self) -> int:
return hash((self.__class__.__name__, str(self)))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Requirement):
return NotImplemented
return (
self.name == other.name
and self.extras == other.extras
and self.specifier == other.specifier
and self.url == other.url
and self.marker == other.marker
)
|
Requirement
|
python
|
bokeh__bokeh
|
src/bokeh/sphinxext/_internal/bokeh_directive.py
|
{
"start": 2048,
"end": 2754
}
|
class ____(SphinxDirective):
def parse(self, rst_text, annotation):
result = ViewList()
for line in rst_text.split("\n"):
result.append(line, annotation)
node = nodes.paragraph()
node.document = self.state.document
nested_parse_with_titles(self.state, result, node)
return node.children
# -----------------------------------------------------------------------------
# Private API
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
|
BokehDirective
|
python
|
walkccc__LeetCode
|
solutions/274. H-Index/274.py
|
{
"start": 0,
"end": 419
}
|
class ____:
def hIndex(self, citations: list[int]) -> int:
n = len(citations)
accumulate = 0
count = [0] * (n + 1)
for citation in citations:
count[min(citation, n)] += 1
# To find the maximum h-index, loop from the back to the front.
# i := the candidate's h-index
for i, c in reversed(list(enumerate(count))):
accumulate += c
if accumulate >= i:
return i
|
Solution
|
python
|
django__django
|
tests/admin_views/models.py
|
{
"start": 14549,
"end": 14611
}
|
class ____(Plot):
class Meta:
proxy = True
|
PlotProxy
|
python
|
walkccc__LeetCode
|
solutions/1680. Concatenation of Consecutive Binary Numbers/1680-2.py
|
{
"start": 0,
"end": 268
}
|
class ____:
def concatenatedBinary(self, n: int) -> int:
MOD = 1_000_000_007
ans = 0
numberOfBits = 0
for i in range(1, n + 1):
if i.bit_count() == 1:
numberOfBits += 1
ans = ((ans << numberOfBits) + i) % MOD
return ans
|
Solution
|
python
|
keras-team__keras
|
keras/src/saving/saving_lib_test.py
|
{
"start": 36280,
"end": 42623
}
|
class ____(testing.TestCase):
def test_custom_object_without_from_config(self):
temp_filepath = os.path.join(
self.get_temp_dir(), "custom_fn_model.keras"
)
inputs = keras.Input(shape=(4, 4))
outputs = keras.layers.Dense(1, activation=GrowthFactor(0.5))(inputs)
model = keras.Model(inputs, outputs)
model.save(temp_filepath)
with self.assertRaisesRegex(
TypeError, "Unable to reconstruct an instance"
):
_ = saving_lib.load_model(temp_filepath)
def test_complex_model_without_explicit_deserialization(self):
temp_filepath = os.path.join(self.get_temp_dir(), "complex_model.keras")
inputs = keras.Input((32,))
outputs = ComplexModel(first_layer=FactorLayer(0.5))(inputs)
model = keras.Model(inputs, outputs)
model.save(temp_filepath)
with self.assertRaisesRegex(TypeError, "are explicitly deserialized"):
_ = saving_lib.load_model(temp_filepath)
def test_redefinition_of_trackable(self):
"""Test that a trackable can be aliased under a new name."""
class NormalModel(keras.Model):
def __init__(self):
super().__init__()
self.dense = keras.layers.Dense(3)
def call(self, x):
return self.dense(x)
class WeirdModel(keras.Model):
def __init__(self):
super().__init__()
# This property will be traversed first,
# but "_dense" isn't in the saved file
# generated by NormalModel.
self.a_dense = keras.layers.Dense(3)
@property
def dense(self):
return self.a_dense
def call(self, x):
return self.dense(x)
temp_filepath = os.path.join(
self.get_temp_dir(), "normal_model.weights.h5"
)
model_a = NormalModel()
model_a(np.random.random((2, 2)))
model_a.save_weights(temp_filepath)
model_b = WeirdModel()
model_b(np.random.random((2, 2)))
model_b.load_weights(temp_filepath)
self.assertAllClose(
model_a.dense.kernel.numpy(), model_b.dense.kernel.numpy()
)
def test_normalization_legacy_h5_format(self):
temp_filepath = os.path.join(self.get_temp_dir(), "custom_model.h5")
inputs = keras.Input((32,))
normalization = keras.layers.Normalization()
outputs = normalization(inputs)
model = keras.Model(inputs, outputs)
x = np.random.random((1, 32))
normalization.adapt(x)
ref_out = model(x)
model.save(temp_filepath)
new_model = keras.saving.load_model(temp_filepath)
out = new_model(x)
self.assertAllClose(ref_out, out, atol=1e-6)
def test_legacy_h5_format(self):
temp_filepath = os.path.join(self.get_temp_dir(), "custom_model.h5")
inputs = keras.Input((32,))
x = MyDense(2)(inputs)
outputs = CustomModelX()(x)
model = keras.Model(inputs, outputs)
x = np.random.random((1, 32))
ref_out = model(x)
model.save(temp_filepath)
new_model = keras.saving.load_model(temp_filepath)
out = new_model(x)
self.assertAllClose(ref_out, out, atol=1e-6)
def test_nested_functional_model_saving(self):
def func(in_size=4, out_size=2, name=None):
inputs = keras.layers.Input(shape=(in_size,))
outputs = keras.layers.Dense(out_size)((inputs))
return keras.Model(inputs, outputs=outputs, name=name)
input_a, input_b = keras.Input((4,)), keras.Input((4,))
out_a = func(out_size=2, name="func_a")(input_a)
out_b = func(out_size=3, name="func_b")(input_b)
model = keras.Model([input_a, input_b], outputs=[out_a, out_b])
temp_filepath = os.path.join(self.get_temp_dir(), "nested_func.keras")
model.save(temp_filepath)
new_model = keras.saving.load_model(temp_filepath)
x = [np.random.random((2, 4))], np.random.random((2, 4))
ref_out = model(x)
out = new_model(x)
self.assertAllClose(ref_out[0], out[0])
self.assertAllClose(ref_out[1], out[1])
def test_nested_shared_functional_model_saving(self):
def func(in_size=4, out_size=2, name=None):
inputs = keras.layers.Input(shape=(in_size,))
outputs = keras.layers.Dense(out_size)((inputs))
return keras.Model(inputs, outputs=outputs, name=name)
inputs = [keras.Input((4,)), keras.Input((4,))]
func_shared = func(out_size=4, name="func_shared")
shared_a = func_shared(inputs[0])
shared_b = func_shared(inputs[1])
out_a = keras.layers.Dense(2)(shared_a)
out_b = keras.layers.Dense(2)(shared_b)
model = keras.Model(inputs, outputs=[out_a, out_b])
temp_filepath = os.path.join(
self.get_temp_dir(), "nested_shared_func.keras"
)
model.save(temp_filepath)
new_model = keras.saving.load_model(temp_filepath)
x = [np.random.random((2, 4))], np.random.random((2, 4))
ref_out = model(x)
out = new_model(x)
self.assertAllClose(ref_out[0], out[0])
self.assertAllClose(ref_out[1], out[1])
def test_bidirectional_lstm_saving(self):
inputs = keras.Input((3, 2))
outputs = keras.layers.Bidirectional(keras.layers.LSTM(64))(inputs)
model = keras.Model(inputs, outputs)
temp_filepath = os.path.join(self.get_temp_dir(), "bidir_lstm.keras")
model.save(temp_filepath)
new_model = keras.saving.load_model(temp_filepath)
x = np.random.random((1, 3, 2))
ref_out = model(x)
out = new_model(x)
self.assertAllClose(ref_out, out)
def test_remove_weights_only_saving_and_loading(self):
def is_remote_path(path):
return True
temp_filepath = os.path.join(self.get_temp_dir(), "model.weights.h5")
with mock.patch(
"keras.src.utils.file_utils.is_remote_path", is_remote_path
):
model = _get_basic_functional_model()
model.save_weights(temp_filepath)
model.load_weights(temp_filepath)
|
SavingBattleTest
|
python
|
getsentry__sentry
|
src/sentry/sentry_apps/utils/webhooks.py
|
{
"start": 275,
"end": 345
}
|
class ____(SentryAppActionType):
CREATED = "created"
|
ErrorActionType
|
python
|
ansible__ansible
|
lib/ansible/plugins/lookup/pipe.py
|
{
"start": 1823,
"end": 2789
}
|
class ____(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
# https://docs.python.org/3/library/subprocess.html#popen-constructor
#
# The shell argument (which defaults to False) specifies whether to use the
# shell as the program to execute. If shell is True, it is recommended to pass
# args as a string rather than as a sequence
#
# https://github.com/ansible/ansible/issues/6550
term = str(term)
p = subprocess.Popen(term, cwd=self._templar.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.append(stdout.decode("utf-8").rstrip())
else:
raise AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
return ret
|
LookupModule
|
python
|
ray-project__ray
|
python/ray/autoscaler/v2/tests/test_threaded_ray_installer.py
|
{
"start": 794,
"end": 5109
}
|
class ____(unittest.TestCase):
def setUp(self):
self.base_provider = MockProvider()
self.config = AutoscalingConfig(load_test_config("test_ray_complex.yaml"))
self.runner = MockProcessRunner()
self.ray_installer = RayInstaller(self.base_provider, self.config, self.runner)
self.instance_storage = InstanceStorage(
cluster_id="test_cluster_id",
storage=InMemoryStorage(),
)
self.error_queue = Queue()
self.threaded_ray_installer = ThreadedRayInstaller(
head_node_ip="127.0.0.1",
instance_storage=self.instance_storage,
ray_installer=self.ray_installer,
error_queue=self.error_queue,
)
def test_install_ray_on_new_node_version_mismatch(self):
self.base_provider.create_node({}, {TAG_RAY_NODE_KIND: "worker_nodes1"}, 1)
instance = Instance(
instance_id="0",
instance_type="worker_nodes1",
cloud_instance_id="0",
status=Instance.RAY_INSTALLING,
node_kind=NodeKind.WORKER,
)
success, verison = self.instance_storage.upsert_instance(instance)
assert success
self.runner.respond_to_call("json .Config.Env", ["[]" for i in range(1)])
self.threaded_ray_installer._install_ray_on_single_node(instance)
instances, _ = self.instance_storage.get_instances(
instance_ids={instance.instance_id}
)
assert instances[instance.instance_id].status == Instance.RAY_INSTALLING
assert instances[instance.instance_id].version == verison
@patch.object(RayInstaller, "install_ray")
def test_install_ray_on_new_node_install_failed(self, mock_method):
self.base_provider.create_node({}, {TAG_RAY_NODE_KIND: "worker_nodes1"}, 1)
instance = Instance(
instance_id="0",
instance_type="worker_nodes1",
cloud_instance_id="0",
status=Instance.RAY_INSTALLING,
node_kind=NodeKind.WORKER,
)
success, verison = self.instance_storage.upsert_instance(instance)
assert success
instance.version = verison
mock_method.side_effect = RuntimeError("Installation failed")
self.threaded_ray_installer._install_retry_interval = 0
self.threaded_ray_installer._max_install_attempts = 1
self.threaded_ray_installer._install_ray_on_single_node(instance)
instances, _ = self.instance_storage.get_instances(
instance_ids={instance.instance_id}
)
# Make sure the instance status is not updated by the ThreadedRayInstaller
# since it should be updated by the Reconciler.
assert instances[instance.instance_id].status == Instance.RAY_INSTALLING
# Make sure the error is added to the error queue.
error = self.error_queue.get()
assert error.im_instance_id == instance.instance_id
assert "Installation failed" in error.details
def test_install_ray_on_new_nodes(self):
self.base_provider.create_node({}, {TAG_RAY_NODE_KIND: "worker_nodes1"}, 1)
instance = Instance(
instance_id="0",
instance_type="worker_nodes1",
cloud_instance_id="0",
status=Instance.RAY_INSTALLING,
node_kind=NodeKind.WORKER,
)
success, verison = self.instance_storage.upsert_instance(instance)
assert success
instance.version = verison
self.runner.respond_to_call("json .Config.Env", ["[]" for i in range(1)])
self.threaded_ray_installer._install_ray_on_new_nodes(instance.instance_id)
self.threaded_ray_installer._ray_installation_executor.shutdown(wait=True)
instances, _ = self.instance_storage.get_instances(
instance_ids={instance.instance_id}
)
# Make sure the instance status is not updated by the ThreadedRayInstaller
# since it should be updated by the Reconciler.
assert instances[instance.instance_id].status == Instance.RAY_INSTALLING
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
|
ThreadedRayInstallerTest
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mysql/types.py
|
{
"start": 23203,
"end": 24636
}
|
class ____(_StringType, sqltypes.CHAR):
"""MySQL CHAR type, for fixed-length character data."""
__visit_name__ = "CHAR"
def __init__(self, length: Optional[int] = None, **kwargs: Any):
"""Construct a CHAR.
:param length: Maximum data length, in characters.
:param binary: Optional, use the default binary collation for the
national character set. This does not affect the type of data
stored, use a BINARY type for binary data.
:param collation: Optional, request a particular collation. Must be
compatible with the national character set.
"""
super().__init__(length=length, **kwargs)
@classmethod
def _adapt_string_for_cast(cls, type_: sqltypes.String) -> sqltypes.CHAR:
# copy the given string type into a CHAR
# for the purposes of rendering a CAST expression
type_ = sqltypes.to_instance(type_)
if isinstance(type_, sqltypes.CHAR):
return type_
elif isinstance(type_, _StringType):
return CHAR(
length=type_.length,
charset=type_.charset,
collation=type_.collation,
ascii=type_.ascii,
binary=type_.binary,
unicode=type_.unicode,
national=False, # not supported in CAST
)
else:
return CHAR(length=type_.length)
|
CHAR
|
python
|
wandb__wandb
|
wandb/sdk/artifacts/_generated/artifact_type.py
|
{
"start": 208,
"end": 284
}
|
class ____(GQLResult):
project: Optional[ArtifactTypeProject]
|
ArtifactType
|
python
|
TheAlgorithms__Python
|
data_structures/binary_tree/binary_tree_node_sum.py
|
{
"start": 400,
"end": 656
}
|
class ____:
"""
A Node has a value variable and pointers to Nodes to its left and right.
"""
def __init__(self, value: int) -> None:
self.value = value
self.left: Node | None = None
self.right: Node | None = None
|
Node
|
python
|
pytorch__pytorch
|
torch/onnx/_internal/fx/passes/type_promotion.py
|
{
"start": 50495,
"end": 61425
}
|
class ____(torch.fx.Interpreter):
"""Interpreter that inserts type promotion for each node."""
def __init__(
self,
module: torch.fx.GraphModule,
type_promotion_table: TypePromotionTable,
) -> None:
super().__init__(module)
self.type_promotion_table = type_promotion_table
def _run_node_and_set_meta(self, node) -> Any:
"""Run node and set meta according to `fx_traceback.get_current_meta()`.
This should be used on new nodes or nodes that have been modified.
By default `Interpreter.run_node` does not update `node.meta`.
Set `node.meta` to the current meta, except for `node.meta["val"]`, which is
recomputed.
"""
out = super().run_node(node)
# Update interpreter env state with new output value.
self.env[node] = out
node.meta.update(
(k, v)
for k, v in fx_traceback.get_current_meta().items()
if k not in node.meta
)
node.meta["val"] = proxy_tensor.extract_val(out)
return out
def _create_node(
self,
graph: torch.fx.Graph,
op_type: str,
target: torch.fx.node.Target,
args: tuple,
kwargs: dict,
) -> torch.fx.Node:
"""Create a node and set its metadata."""
assert op_type in (
"call_function",
"call_method",
"get_attr",
"call_module",
"placeholder",
"output",
), f"Unexpected op_type: {op_type}"
node = getattr(graph, op_type)(target, args, kwargs)
self._run_node_and_set_meta(node)
return node
def _rerun_node_after_type_promotion(
self,
node: torch.fx.Node,
expected_out_dtype: torch.dtype,
) -> None:
"""Rerun a node after type promotion and update node.meta["val"] with the output value."""
node_val = node.meta.get("val", None)
assert node_val is not None, f"Node {node} node.meta['val'] is not set."
args, kwargs = self.fetch_args_kwargs_from_env(node)
target = node.target
assert isinstance(target, torch._ops.OpOverload), (
f"Expected OpOverload, got {type(target)}"
)
node.target = find_compatible_op_overload(target.overloadpacket, args, kwargs)
new_node_val = self._run_node_and_set_meta(node)
assert isinstance(new_node_val, type(node_val)), (
f"run_node output type should not change between runs. "
f"Got {type(new_node_val)}, expect {type(node_val)}."
)
if isinstance(node_val, torch.Tensor):
prev_node_dtype = node_val.dtype
assert prev_node_dtype == expected_out_dtype, (
f"node.meta['val'].dtype({prev_node_dtype}) does not agree with "
f"type promotion rule({expected_out_dtype})."
)
if new_node_val.dtype != expected_out_dtype:
# With explicit type promotion, the expected result dtype may not be
# the same as the computation dtype. This is referred to as "op math".
# We need to explicitly cast the output back to the expected dtype.
# See more about "op math" topic at `_prims_common.elementwise_dtypes`.
graph = node.graph
with graph.inserting_after(node):
output_cast_node = self._create_node(
graph,
"call_function",
torch.ops.prims.convert_element_type.default,
(node,),
{"dtype": expected_out_dtype},
)
node.replace_all_uses_with(output_cast_node)
output_cast_node.args = (node,)
logger.info(
"Node '%s' output dtype becomes %s due to op math. "
"Cast back to %s.",
node,
new_node_val.dtype,
expected_out_dtype,
)
elif fx_type_utils.is_torch_symbolic_type(node_val):
raise NotImplementedError(
"Type promotion does not support node output of sym types."
)
elif isinstance(node_val, (list, tuple)):
raise NotImplementedError(
"Type promotion does not support node output of list or tuple."
)
else:
raise RuntimeError(f"Unexpected node output type: {type(node_val)}.")
def _maybe_promote_arg(
self,
node: torch.fx.Node,
fx_arg: torch.fx.node.Argument,
dtype: torch.dtype | None,
) -> torch.fx.node.Argument:
"""Promote fx_arg to dtype if necessary."""
if dtype is None:
logger.info(
"Argument %s is not promoted. Not mentioned by type promotion rule.",
fx_arg,
)
return fx_arg
if isinstance(fx_arg, torch.fx.Node):
arg_val = self.env[fx_arg]
if isinstance(arg_val, torch.Tensor):
if (old_dtype := arg_val.dtype) != dtype:
# Promote tensor to dtype.
graph = node.graph
with graph.inserting_before(node):
logger.info(
"Argument %s(%s) is promoted to %s.",
fx_arg,
old_dtype,
dtype,
)
return self._create_node(
graph,
"call_function",
torch.ops.prims.convert_element_type.default,
(fx_arg,),
{"dtype": dtype},
)
logger.info("Argument %s is not promoted. Already %s.", fx_arg, dtype)
return fx_arg
elif fx_type_utils.is_torch_symbolic_type(arg_val):
arg_type = type(arg_val)
equivalent_dtype = fx_type_utils.from_scalar_type_to_torch_dtype(
arg_type
)
assert equivalent_dtype is not None, f"Unexpected arg_type: {arg_type}"
if equivalent_dtype != dtype:
# Promote Sym number to tensor of dtype.
graph = node.graph
with graph.inserting_before(node):
logger.info(
"Argument %s(Scalar of equivalent dtype: %s) "
"is promoted to %s.",
fx_arg,
equivalent_dtype,
dtype,
)
return self._create_node(
graph,
"call_function",
torch.ops.aten.scalar_tensor.default,
(fx_arg,),
{"dtype": dtype},
)
logger.info("Argument %s is not promoted. Already %s.", fx_arg, dtype)
return fx_arg
elif (
equivalent_dtype := fx_type_utils.from_scalar_type_to_torch_dtype(
type(fx_arg)
)
) is not None:
if equivalent_dtype != dtype:
# Promote number to tensor of dtype.
# The op should have overload that supports tensor for this arg, otherwise
# the type promotion rule should not suggest promoting this arg.
graph = node.graph
with graph.inserting_before(node):
logger.info(
"Argument %s(Scalar of equivalent dtype: %s) "
"is promoted to %s.",
fx_arg,
equivalent_dtype,
dtype,
)
return self._create_node(
graph,
"call_function",
torch.ops.aten.scalar_tensor.default,
(fx_arg,),
{"dtype": dtype},
)
logger.info("Argument %s is not promoted. Already %s.", fx_arg, dtype)
return fx_arg
elif isinstance(fx_arg, (tuple, list)):
logger.info("Argument %s is a tuple/list. Promoting each element.", fx_arg)
return type(fx_arg)(
self._maybe_promote_arg(node, fx_arg_elem, dtype)
for fx_arg_elem in fx_arg
)
raise NotImplementedError(f"Unknown fx arg type: {type(fx_arg)}")
def _maybe_promote_node(
self,
node: torch.fx.Node,
rule: TypePromotionRule,
) -> torch.fx.Node:
"""Promote node inputs and outputs according to type promotion rule."""
args, kwargs = self.fetch_args_kwargs_from_env(node)
type_promotion_info = rule.preview_type_promotion(args, kwargs)
new_args = []
new_kwargs = {}
for i, arg in enumerate(node.args):
new_args.append(
self._maybe_promote_arg(
node, arg, type_promotion_info.args_dtypes.get(i, None)
)
)
for name, arg in node.kwargs.items():
new_kwargs[name] = self._maybe_promote_arg(
node, arg, type_promotion_info.kwargs_dtypes.get(name, None)
)
new_args = tuple(new_args)
if node.args != new_args or node.kwargs != new_kwargs:
node.args = new_args
node.kwargs = new_kwargs
self._rerun_node_after_type_promotion(node, type_promotion_info.out_dtype)
return node
def run_node(self, n: torch.fx.Node) -> Any:
"""This method is an override which inserts type promotion nodes as needed.
For each `call_function` node, an initial check is conducted to determine if a type
promotion rule is applicable. If a relevant rule exists, type casting nodes are
introduced for the corresponding arguments. The OpOverload of the node is updated
to one that accommodates the promoted types. Should the output type be different,
type casting node is inserted for this output.
The call `super().run_node(node)` is guaranteed to be invoked for each node.
In the case of new or modified nodes, the result of `super().run_node(node)` is
used to update its `node.meta["val"]` value.
"""
with self._set_current_node(n):
if rule := get_type_promotion_rule(n, self.type_promotion_table):
self._maybe_promote_node(n, rule)
return super().run_node(n)
|
_TypePromotionInterpreter
|
python
|
mwaskom__seaborn
|
tests/test_distributions.py
|
{
"start": 28160,
"end": 35842
}
|
class ____:
def test_long_vectors(self, long_df):
ax1 = kdeplot(data=long_df, x="x", y="y")
x = long_df["x"]
x_values = [x, x.to_numpy(), x.to_list()]
y = long_df["y"]
y_values = [y, y.to_numpy(), y.to_list()]
for x, y in zip(x_values, y_values):
f, ax2 = plt.subplots()
kdeplot(x=x, y=y, ax=ax2)
for c1, c2 in zip(ax1.collections, ax2.collections):
assert_array_equal(c1.get_offsets(), c2.get_offsets())
def test_singular_data(self):
with pytest.warns(UserWarning):
ax = dist.kdeplot(x=np.ones(10), y=np.arange(10))
assert not ax.lines
with pytest.warns(UserWarning):
ax = dist.kdeplot(x=[5], y=[6])
assert not ax.lines
with pytest.warns(UserWarning):
ax = kdeplot(x=[1929245168.06679] * 18, y=np.arange(18))
assert not ax.lines
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
ax = kdeplot(x=[5], y=[7], warn_singular=False)
assert not ax.lines
def test_fill_artists(self, long_df):
for fill in [True, False]:
f, ax = plt.subplots()
kdeplot(data=long_df, x="x", y="y", hue="c", fill=fill)
for c in ax.collections:
if not _version_predates(mpl, "3.8.0rc1"):
assert isinstance(c, mpl.contour.QuadContourSet)
elif fill or not _version_predates(mpl, "3.5.0b0"):
assert isinstance(c, mpl.collections.PathCollection)
else:
assert isinstance(c, mpl.collections.LineCollection)
def test_common_norm(self, rng):
hue = np.repeat(["a", "a", "a", "b"], 40)
x, y = rng.multivariate_normal([0, 0], [(.2, .5), (.5, 2)], len(hue)).T
x[hue == "a"] -= 2
x[hue == "b"] += 2
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(x=x, y=y, hue=hue, common_norm=True, ax=ax1)
kdeplot(x=x, y=y, hue=hue, common_norm=False, ax=ax2)
n_seg_1 = sum(len(get_contour_coords(c, True)) for c in ax1.collections)
n_seg_2 = sum(len(get_contour_coords(c, True)) for c in ax2.collections)
assert n_seg_2 > n_seg_1
def test_log_scale(self, rng):
x = rng.lognormal(0, 1, 100)
y = rng.uniform(0, 1, 100)
levels = .2, .5, 1
f, ax = plt.subplots()
kdeplot(x=x, y=y, log_scale=True, levels=levels, ax=ax)
assert ax.get_xscale() == "log"
assert ax.get_yscale() == "log"
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(x=x, y=y, log_scale=(10, False), levels=levels, ax=ax1)
assert ax1.get_xscale() == "log"
assert ax1.get_yscale() == "linear"
p = _DistributionPlotter()
kde = KDE()
density, (xx, yy) = kde(np.log10(x), y)
levels = p._quantile_to_level(density, levels)
ax2.contour(10 ** xx, yy, density, levels=levels)
for c1, c2 in zip(ax1.collections, ax2.collections):
assert len(get_contour_coords(c1)) == len(get_contour_coords(c2))
for arr1, arr2 in zip(get_contour_coords(c1), get_contour_coords(c2)):
assert_array_equal(arr1, arr2)
def test_bandwidth(self, rng):
n = 100
x, y = rng.multivariate_normal([0, 0], [(.2, .5), (.5, 2)], n).T
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(x=x, y=y, ax=ax1)
kdeplot(x=x, y=y, bw_adjust=2, ax=ax2)
for c1, c2 in zip(ax1.collections, ax2.collections):
seg1, seg2 = get_contour_coords(c1), get_contour_coords(c2)
if seg1 + seg2:
x1 = seg1[0][:, 0]
x2 = seg2[0][:, 0]
assert np.abs(x2).max() > np.abs(x1).max()
def test_weights(self, rng):
n = 100
x, y = rng.multivariate_normal([1, 3], [(.2, .5), (.5, 2)], n).T
hue = np.repeat([0, 1], n // 2)
weights = rng.uniform(0, 1, n)
f, (ax1, ax2) = plt.subplots(ncols=2)
kdeplot(x=x, y=y, hue=hue, ax=ax1)
kdeplot(x=x, y=y, hue=hue, weights=weights, ax=ax2)
for c1, c2 in zip(ax1.collections, ax2.collections):
if get_contour_coords(c1) and get_contour_coords(c2):
seg1 = np.concatenate(get_contour_coords(c1), axis=0)
seg2 = np.concatenate(get_contour_coords(c2), axis=0)
assert not np.array_equal(seg1, seg2)
def test_hue_ignores_cmap(self, long_df):
with pytest.warns(UserWarning, match="cmap parameter ignored"):
ax = kdeplot(data=long_df, x="x", y="y", hue="c", cmap="viridis")
assert_colors_equal(get_contour_color(ax.collections[0]), "C0")
def test_contour_line_colors(self, long_df):
color = (.2, .9, .8, 1)
ax = kdeplot(data=long_df, x="x", y="y", color=color)
for c in ax.collections:
assert_colors_equal(get_contour_color(c), color)
def test_contour_line_cmap(self, long_df):
color_list = color_palette("Blues", 12)
cmap = mpl.colors.ListedColormap(color_list)
ax = kdeplot(data=long_df, x="x", y="y", cmap=cmap)
for c in ax.collections:
for color in get_contour_color(c):
assert to_rgb(color) in color_list
def test_contour_fill_colors(self, long_df):
n = 6
color = (.2, .9, .8, 1)
ax = kdeplot(
data=long_df, x="x", y="y", fill=True, color=color, levels=n,
)
cmap = light_palette(color, reverse=True, as_cmap=True)
lut = cmap(np.linspace(0, 1, 256))
for c in ax.collections:
for color in c.get_facecolor():
assert color in lut
def test_colorbar(self, long_df):
ax = kdeplot(data=long_df, x="x", y="y", fill=True, cbar=True)
assert len(ax.figure.axes) == 2
def test_levels_and_thresh(self, long_df):
f, (ax1, ax2) = plt.subplots(ncols=2)
n = 8
thresh = .1
plot_kws = dict(data=long_df, x="x", y="y")
kdeplot(**plot_kws, levels=n, thresh=thresh, ax=ax1)
kdeplot(**plot_kws, levels=np.linspace(thresh, 1, n), ax=ax2)
for c1, c2 in zip(ax1.collections, ax2.collections):
assert len(get_contour_coords(c1)) == len(get_contour_coords(c2))
for arr1, arr2 in zip(get_contour_coords(c1), get_contour_coords(c2)):
assert_array_equal(arr1, arr2)
with pytest.raises(ValueError):
kdeplot(**plot_kws, levels=[0, 1, 2])
ax1.clear()
ax2.clear()
kdeplot(**plot_kws, levels=n, thresh=None, ax=ax1)
kdeplot(**plot_kws, levels=n, thresh=0, ax=ax2)
for c1, c2 in zip(ax1.collections, ax2.collections):
assert len(get_contour_coords(c1)) == len(get_contour_coords(c2))
for arr1, arr2 in zip(get_contour_coords(c1), get_contour_coords(c2)):
assert_array_equal(arr1, arr2)
for c1, c2 in zip(ax1.collections, ax2.collections):
assert_array_equal(c1.get_facecolors(), c2.get_facecolors())
def test_quantile_to_level(self, rng):
x = rng.uniform(0, 1, 100000)
isoprop = np.linspace(.1, 1, 6)
levels = _DistributionPlotter()._quantile_to_level(x, isoprop)
for h, p in zip(levels, isoprop):
assert (x[x <= h].sum() / x.sum()) == pytest.approx(p, abs=1e-4)
def test_input_checking(self, long_df):
with pytest.raises(TypeError, match="The x variable is categorical,"):
kdeplot(data=long_df, x="a", y="y")
|
TestKDEPlotBivariate
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_chart_area02.py
|
{
"start": 315,
"end": 1422
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_area02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "area", "subtype": "stacked"})
chart.axis_ids = [62813312, 62814848]
data = [
[1, 2, 3, 4, 5],
[8, 7, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{"categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$B$1:$B$5"}
)
chart.add_series(
{"categories": "=Sheet1!$A$1:$A$5", "values": "=Sheet1!$C$1:$C$5"}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
django-guardian__django-guardian
|
guardian/models/models.py
|
{
"start": 6946,
"end": 7210
}
|
class ____(GroupObjectPermissionBase, BaseGenericObjectPermission):
class Meta(GroupObjectPermissionBase.Meta, BaseGenericObjectPermission.Meta):
abstract = True
unique_together = ["group", "permission", "object_pk"]
|
GroupObjectPermissionAbstract
|
python
|
joke2k__faker
|
tests/providers/test_automotive.py
|
{
"start": 6507,
"end": 6670
}
|
class ____(_SimpleAutomotiveTestMixin):
"""Test hu_HU automotive provider methods"""
license_plate_pattern: Pattern = re.compile(r"[A-Z]{3}-\d{3}")
|
TestHuHu
|
python
|
getsentry__sentry
|
tests/sentry/replays/conftest.py
|
{
"start": 89,
"end": 529
}
|
class ____:
def save(self, data: dict[str, Any]) -> None:
request_url = settings.SENTRY_SNUBA + "/tests/entities/replays/insert"
response = requests.post(request_url, json=[data])
assert response.status_code == 200
return None
@pytest.fixture
def replay_store() -> ReplayStore:
assert requests.post(settings.SENTRY_SNUBA + "/tests/replays/drop").status_code == 200
return ReplayStore()
|
ReplayStore
|
python
|
pallets__werkzeug
|
src/werkzeug/_internal.py
|
{
"start": 303,
"end": 1364
}
|
class ____:
def __repr__(self) -> str:
return "no value"
def __reduce__(self) -> str:
return "_missing"
_missing = _Missing()
def _wsgi_decoding_dance(s: str) -> str:
return s.encode("latin1").decode(errors="replace")
def _wsgi_encoding_dance(s: str) -> str:
return s.encode().decode("latin1")
def _get_environ(obj: WSGIEnvironment | Request) -> WSGIEnvironment:
env = getattr(obj, "environ", obj)
assert isinstance(env, dict), (
f"{type(obj).__name__!r} is not a WSGI environment (has to be a dict)"
)
return env
def _has_level_handler(logger: logging.Logger) -> bool:
"""Check if there is a handler in the logging chain that will handle
the given logger's effective level.
"""
level = logger.getEffectiveLevel()
current = logger
while current:
if any(handler.level <= level for handler in current.handlers):
return True
if not current.propagate:
break
current = current.parent # type: ignore
return False
|
_Missing
|
python
|
python-pillow__Pillow
|
src/PIL/PngImagePlugin.py
|
{
"start": 3163,
"end": 3768
}
|
class ____(IntEnum):
OP_NONE = 0
"""
No disposal is done on this frame before rendering the next frame.
See :ref:`Saving APNG sequences<apng-saving>`.
"""
OP_BACKGROUND = 1
"""
This frame’s modified region is cleared to fully transparent black before rendering
the next frame.
See :ref:`Saving APNG sequences<apng-saving>`.
"""
OP_PREVIOUS = 2
"""
This frame’s modified region is reverted to the previous frame’s contents before
rendering the next frame.
See :ref:`Saving APNG sequences<apng-saving>`.
"""
# APNG frame blend modes
|
Disposal
|
python
|
gevent__gevent
|
src/greentest/3.10/test_socket.py
|
{
"start": 22165,
"end": 22414
}
|
class ____(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
|
SCTPStreamBase
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 42332,
"end": 42495
}
|
class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("SECRET", "VISIBLE")
|
TeamPrivacy
|
python
|
realpython__materials
|
python-doctest/user.py
|
{
"start": 451,
"end": 910
}
|
class ____:
def __init__(self, name, favorite_colors):
self.name = name
self._favorite_colors = set(favorite_colors)
@property
def favorite_colors(self):
"""Return the user's favorite colors.
Usage examples:
>>> john = User("John", {"#797EF6", "#4ADEDE", "#1AA7EC"})
>>> sorted(john.favorite_colors)
['#1AA7EC', '#4ADEDE', '#797EF6']
"""
return self._favorite_colors
|
User_Two
|
python
|
getsentry__sentry-python
|
sentry_sdk/integrations/unraisablehook.py
|
{
"start": 284,
"end": 1753
}
|
class ____(Integration):
identifier = "unraisablehook"
@staticmethod
def setup_once():
# type: () -> None
sys.unraisablehook = _make_unraisable(sys.unraisablehook)
def _make_unraisable(old_unraisablehook):
# type: (Callable[[sys.UnraisableHookArgs], Any]) -> Callable[[sys.UnraisableHookArgs], Any]
def sentry_sdk_unraisablehook(unraisable):
# type: (sys.UnraisableHookArgs) -> None
integration = sentry_sdk.get_client().get_integration(UnraisablehookIntegration)
# Note: If we replace this with ensure_integration_enabled then
# we break the exceptiongroup backport;
# See: https://github.com/getsentry/sentry-python/issues/3097
if integration is None:
return old_unraisablehook(unraisable)
if unraisable.exc_value and unraisable.exc_traceback:
with capture_internal_exceptions():
event, hint = event_from_exception(
(
unraisable.exc_type,
unraisable.exc_value,
unraisable.exc_traceback,
),
client_options=sentry_sdk.get_client().options,
mechanism={"type": "unraisablehook", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
return old_unraisablehook(unraisable)
return sentry_sdk_unraisablehook
|
UnraisablehookIntegration
|
python
|
spyder-ide__spyder
|
spyder/plugins/explorer/widgets/explorer.py
|
{
"start": 3100,
"end": 3231
}
|
class ____:
Context = 'context_menu'
Header = 'header_menu'
New = 'new_menu'
OpenWith = 'open_with_menu'
|
DirViewMenus
|
python
|
mkdocs__mkdocs
|
mkdocs/livereload/__init__.py
|
{
"start": 2257,
"end": 12888
}
|
class ____(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):
daemon_threads = True
poll_response_timeout = 60
def __init__(
self,
builder: Callable[[], None],
host: str,
port: int,
root: str,
mount_path: str = "/",
polling_interval: float = 0.5,
shutdown_delay: float = 0.25,
) -> None:
self.builder = builder
try:
if isinstance(ipaddress.ip_address(host), ipaddress.IPv6Address):
self.address_family = socket.AF_INET6
except Exception:
pass
self.root = os.path.abspath(root)
self.mount_path = _normalize_mount_path(mount_path)
self.url = _serve_url(host, port, mount_path)
self.build_delay = 0.1
self.shutdown_delay = shutdown_delay
# To allow custom error pages.
self.error_handler: Callable[[int], bytes | None] = lambda code: None
super().__init__((host, port), _Handler, bind_and_activate=False)
self.set_app(self.serve_request)
self._wanted_epoch = _timestamp() # The version of the site that started building.
self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.
self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.
self._want_rebuild: bool = False
self._rebuild_cond = threading.Condition() # Must be held when accessing _want_rebuild.
self._shutdown = False
self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))
self.observer = watchdog.observers.polling.PollingObserver(timeout=polling_interval)
self._watched_paths: dict[str, int] = {}
self._watch_refs: dict[str, Any] = {}
def watch(self, path: str, func: None = None, *, recursive: bool = True) -> None:
"""Add the 'path' to watched paths, call the function and reload when any file changes under it."""
path = os.path.abspath(path)
if not (func is None or func is self.builder): # type: ignore[unreachable]
raise TypeError("Plugins can no longer pass a 'func' parameter to watch().")
if path in self._watched_paths:
self._watched_paths[path] += 1
return
self._watched_paths[path] = 1
def callback(event):
if event.is_directory:
return
log.debug(str(event))
with self._rebuild_cond:
self._want_rebuild = True
self._rebuild_cond.notify_all()
handler = watchdog.events.FileSystemEventHandler()
handler.on_any_event = callback # type: ignore[method-assign]
log.debug(f"Watching '{path}'")
self._watch_refs[path] = self.observer.schedule(handler, path, recursive=recursive)
def unwatch(self, path: str) -> None:
"""Stop watching file changes for path. Raises if there was no corresponding `watch` call."""
path = os.path.abspath(path)
self._watched_paths[path] -= 1
if self._watched_paths[path] <= 0:
self._watched_paths.pop(path)
self.observer.unschedule(self._watch_refs.pop(path))
def serve(self, *, open_in_browser=False):
self.server_bind()
self.server_activate()
if self._watched_paths:
self.observer.start()
paths_str = ", ".join(f"'{_try_relativize_path(path)}'" for path in self._watched_paths)
log.info(f"Watching paths for changes: {paths_str}")
if open_in_browser:
log.info(f"Serving on {self.url} and opening it in a browser")
else:
log.info(f"Serving on {self.url}")
self.serve_thread.start()
if open_in_browser:
webbrowser.open(self.url)
self._build_loop()
def _build_loop(self):
while True:
with self._rebuild_cond:
while not self._rebuild_cond.wait_for(
lambda: self._want_rebuild or self._shutdown, timeout=self.shutdown_delay
):
# We could have used just one wait instead of a loop + timeout, but we need
# occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.
pass
if self._shutdown:
break
log.info("Detected file changes")
while self._rebuild_cond.wait(timeout=self.build_delay):
log.debug("Waiting for file changes to stop happening")
self._wanted_epoch = _timestamp()
self._want_rebuild = False
try:
self.builder()
except Exception as e:
if isinstance(e, SystemExit):
print(e, file=sys.stderr) # noqa: T201
else:
traceback.print_exc()
log.error(
"An error happened during the rebuild. The server will appear stuck until build errors are resolved."
)
continue
with self._epoch_cond:
log.info("Reloading browsers")
self._visible_epoch = self._wanted_epoch
self._epoch_cond.notify_all()
def shutdown(self, wait=False) -> None:
self.observer.stop()
with self._rebuild_cond:
self._shutdown = True
self._rebuild_cond.notify_all()
if self.serve_thread.is_alive():
super().shutdown()
self.server_close()
if wait:
self.serve_thread.join()
self.observer.join()
def serve_request(self, environ, start_response) -> Iterable[bytes]:
try:
result = self._serve_request(environ, start_response)
except Exception:
code = 500
msg = "500 Internal Server Error"
log.exception(msg)
else:
if result is not None:
return result
code = 404
msg = "404 Not Found"
error_content = None
try:
error_content = self.error_handler(code)
except Exception:
log.exception("Failed to render an error message!")
if error_content is None:
error_content = msg.encode()
start_response(msg, [("Content-Type", "text/html")])
return [error_content]
def _serve_request(self, environ, start_response) -> Iterable[bytes] | None:
# https://bugs.python.org/issue16679
# https://github.com/bottlepy/bottle/blob/f9b1849db4/bottle.py#L984
path = environ["PATH_INFO"].encode("latin-1").decode("utf-8", "ignore")
if path.startswith("/livereload/"):
if m := re.fullmatch(r"/livereload/([0-9]+)/[0-9]+", path):
epoch = int(m[1])
start_response("200 OK", [("Content-Type", "text/plain")])
def condition():
return self._visible_epoch > epoch
with self._epoch_cond:
if not condition():
# Stall the browser, respond as soon as there's something new.
# If there's not, respond anyway after a minute.
self._log_poll_request(environ.get("HTTP_REFERER"), request_id=path)
self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)
return [b"%d" % self._visible_epoch]
if (path + "/").startswith(self.mount_path):
rel_file_path = path[len(self.mount_path) :]
if path.endswith("/"):
rel_file_path += "index.html"
# Prevent directory traversal - normalize the path.
rel_file_path = posixpath.normpath("/" + rel_file_path).lstrip("/")
file_path = os.path.join(self.root, rel_file_path)
elif path == "/":
start_response("302 Found", [("Location", urllib.parse.quote(self.mount_path))])
return []
else:
return None # Not found
# Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.
with self._epoch_cond:
self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)
epoch = self._visible_epoch
try:
file: BinaryIO = open(file_path, "rb")
except OSError:
if not path.endswith("/") and os.path.isfile(os.path.join(file_path, "index.html")):
start_response("302 Found", [("Location", urllib.parse.quote(path) + "/")])
return []
return None # Not found
if self._watched_paths and file_path.endswith(".html"):
with file:
content = file.read()
content = self._inject_js_into_html(content, epoch)
file = io.BytesIO(content)
content_length = len(content)
else:
content_length = os.path.getsize(file_path)
content_type = self._guess_type(file_path)
start_response(
"200 OK", [("Content-Type", content_type), ("Content-Length", str(content_length))]
)
return wsgiref.util.FileWrapper(file)
def _inject_js_into_html(self, content, epoch):
try:
body_end = content.rindex(b"</body>")
except ValueError:
body_end = len(content)
# The page will reload if the livereload poller returns a newer epoch than what it knows.
# The other timestamp becomes just a unique identifier for the initiating page.
script = _SCRIPT_TEMPLATE.substitute(epoch=epoch, request_id=_timestamp())
return b"%b<script>%b</script>%b" % (
content[:body_end],
script.encode(),
content[body_end:],
)
@classmethod
@functools.lru_cache # "Cache" to not repeat the same message for the same browser tab.
def _log_poll_request(cls, url, request_id):
log.info(f"Browser connected: {url}")
@classmethod
def _guess_type(cls, path):
# MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).
# Other uncommon types will not be accepted.
if path.endswith((".js", ".JS", ".mjs")):
return "application/javascript"
if path.endswith(".gz"):
return "application/gzip"
guess, _ = mimetypes.guess_type(path)
if guess:
return guess
return "application/octet-stream"
|
LiveReloadServer
|
python
|
davidhalter__parso
|
parso/parser.py
|
{
"start": 1594,
"end": 2112
}
|
class ____(Exception):
"""
Exception to signal the parser is stuck and error recovery didn't help.
Basically this shouldn't happen. It's a sign that something is really
wrong.
"""
def __init__(self, msg, type_, value, start_pos):
Exception.__init__(self, "%s: type=%r, value=%r, start_pos=%r" %
(msg, type_.name, value, start_pos))
self.msg = msg
self.type = type
self.value = value
self.start_pos = start_pos
|
InternalParseError
|
python
|
pydantic__pydantic
|
pydantic/types.py
|
{
"start": 35528,
"end": 35822
}
|
class ____(BaseModel):
uuid3: UUID3
Model(uuid3=uuid.uuid3(uuid.NAMESPACE_DNS, 'pydantic.org'))
```
"""
UUID4 = Annotated[UUID, UuidVersion(4)]
"""A [UUID](https://docs.python.org/3/library/uuid.html) that must be version 4.
```python
import uuid
from pydantic import UUID4, BaseModel
|
Model
|
python
|
django__django
|
tests/fixtures_regress/models.py
|
{
"start": 7758,
"end": 7930
}
|
class ____(BaseNKModel):
a = models.ForeignKey(M2MComplexCircular1A, models.CASCADE)
b = models.ForeignKey(M2MComplexCircular1B, models.CASCADE)
|
M2MCircular1ThroughAB
|
python
|
bottlepy__bottle
|
test/test_resources.py
|
{
"start": 336,
"end": 3129
}
|
class ____(unittest.TestCase):
def test_path_normalize(self):
for test in TEST_PATHS:
rm = ResourceManager()
rm.add_path(test)
self.assertEqual(rm.path, EXPECTED)
def test_path_create(self):
import shutil
import tempfile
tempdir = tempfile.mkdtemp()
try:
rm = ResourceManager()
exists = rm.add_path('./test/', base=tempdir)
self.assertEqual(exists, False)
exists = rm.add_path('./test2/', base=tempdir, create=True)
self.assertEqual(exists, True)
finally:
shutil.rmtree(tempdir)
def test_path_absolutize(self):
if sys.platform == 'win32':
tests = ('.\\foo\\bar\\', '.\\foo\\bar\\baz', '.\\foo\\baz\\..\\bar\\blub')
abspath = os.path.abspath('.\\foo\\bar\\') + os.sep
else:
tests = ('./foo/bar/', './foo/bar/baz', './foo/baz/../bar/blub')
abspath = os.path.abspath('./foo/bar/') + os.sep
for test in tests:
rm = ResourceManager()
rm.add_path(test)
self.assertEqual(rm.path, [abspath])
for test in tests:
rm = ResourceManager()
rm.add_path(test[2:])
self.assertEqual(rm.path, [abspath])
def test_path_unique(self):
rm = ResourceManager()
[rm.add_path(test) for test in TEST_PATHS]
self.assertEqual(rm.path, EXPECTED)
def test_root_path(self):
if sys.platform == 'win32':
expected = ['C:\\foo\\bar\\baz\\']
else:
expected = ['/foo/bar/baz/']
for test in TEST_PATHS:
rm = ResourceManager()
rm.add_path('./baz/', test)
self.assertEqual(rm.path, expected)
for test in TEST_PATHS:
rm = ResourceManager()
rm.add_path('baz/', test)
self.assertEqual(rm.path, expected)
def test_path_order(self):
rm = ResourceManager()
rm.add_path('/middle/')
rm.add_path('/first/', index=0)
rm.add_path('/last/')
if sys.platform == 'win32':
self.assertEqual(rm.path, ['C:\\first\\', 'C:\\middle\\', 'C:\\last\\'])
else:
self.assertEqual(rm.path, ['/first/', '/middle/', '/last/'])
def test_get(self):
rm = ResourceManager()
rm.add_path('/first/')
rm.add_path(__file__)
rm.add_path('/last/')
self.assertEqual(None, rm.lookup('notexist.txt'))
self.assertEqual(__file__, rm.lookup(os.path.basename(__file__)))
def test_open(self):
rm = ResourceManager()
rm.add_path(__file__)
fp = rm.open(__file__)
self.assertEqual(fp.read(), open(__file__).read())
|
TestResourceManager
|
python
|
huggingface__transformers
|
src/transformers/models/vilt/modeling_vilt.py
|
{
"start": 16340,
"end": 16980
}
|
class ____(nn.Module):
"""
The residual connection is defined in ViltLayer instead of here (as is the case with other models), due to the
layernorm applied before each block.
"""
def __init__(self, config: ViltConfig):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
|
ViltSelfOutput
|
python
|
pypa__pip
|
tests/unit/test_network_auth.py
|
{
"start": 10537,
"end": 11908
}
|
class ____:
"""Represents the current supported API of keyring"""
class Credential:
def __init__(self, username: str, password: str) -> None:
self.username = username
self.password = password
def get_password(self, system: str, username: str) -> None:
pytest.fail("get_password should not ever be called")
def get_credential(self, system: str, username: str) -> Credential | None:
if system == "http://example.com/path2/":
return self.Credential("username", "url")
if system == "example.com":
return self.Credential("username", "netloc")
return None
@pytest.mark.parametrize(
"url, expect",
[
("http://example.com/path1", ("username", "netloc")),
("http://example.com/path2/path3", ("username", "url")),
("http://user2@example.com/path2/path3", ("username", "url")),
],
)
def test_keyring_get_credential(
monkeypatch: pytest.MonkeyPatch, url: str, expect: tuple[str, str]
) -> None:
monkeypatch.setitem(sys.modules, "keyring", KeyringModuleV2())
auth = MultiDomainBasicAuth(
index_urls=["http://example.com/path1", "http://example.com/path2"],
keyring_provider="import",
)
assert (
auth._get_new_credentials(url, allow_netrc=False, allow_keyring=True) == expect
)
|
KeyringModuleV2
|
python
|
Unity-Technologies__ml-agents
|
ml-agents-envs/mlagents_envs/exception.py
|
{
"start": 504,
"end": 632
}
|
class ____(UnityException):
"""
Related to errors with receiving observations.
"""
pass
|
UnityObservationException
|
python
|
getsentry__sentry
|
src/sentry/snuba/outcomes.py
|
{
"start": 2729,
"end": 3397
}
|
class ____(Field):
def get_snuba_columns(self, raw_groupby: Sequence[str] | None = None) -> list[str]:
return ["times_seen"]
def extract_from_row(
self, row: Mapping[str, Any] | None, group: Mapping[str, Any] | None = None
) -> int:
if row is None:
return 0
return int(row["times_seen"])
def select_params(self, dataset: Dataset) -> Function:
if dataset == Dataset.Outcomes:
return Function("sum", [Column("times_seen")], "times_seen")
else:
# RawOutcomes doesnt have times_seen, do a count instead
return Function("count", [], "times_seen")
|
TimesSeenField
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/higher_order_ops.py
|
{
"start": 110732,
"end": 114241
}
|
class ____(TorchHigherOrderOperatorVariable):
supports_input_mutation = True
supports_aliasing = True
# TODO - Go through all subclasses of WrapHigherOrderVariable to see if
# restore_side_effects can be ignored. For now, this is conservative.
restore_side_effects = True
def install_subgraph_in_output_graph(
self, tx, fn_vt, fn_args_vt, kwargs, body_gmod, attr_name="wrap_body"
):
return tx.output.install_subgraph(
f"{attr_name}",
body_gmod,
)
def create_wrapped_node(
self,
tx: "InstructionTranslator",
fn_vt,
fn_args_vt,
kwargs,
description,
under_activation_checkpoint=False,
*,
subgraph_name="wrap_body",
):
# See NOTE [HigherOrderOperator tracing design] for more details
(
body_r,
body_graph,
body_lifted_freevars,
body_graph_output_vts,
) = speculate_subgraph_with_auto_output_flattening(
tx,
fn_vt,
fn_args_vt,
kwargs,
description,
source_target=self.value,
restore_side_effects=self.restore_side_effects,
under_activation_checkpoint=under_activation_checkpoint,
supports_input_mutation=self.supports_input_mutation,
supports_aliasing=self.supports_aliasing,
)
body_gmod = torch.fx.GraphModule(tx.output.nn_modules, body_graph)
body_name = self.install_subgraph_in_output_graph(
tx,
fn_vt,
fn_args_vt,
kwargs,
body_gmod,
attr_name=subgraph_name,
)
body_node = make_attr(tx, body_name)
# Since, we call `speculate_subgraph` with `set_subgraph_inputs="automatic`,
# all the arguments are lifted.
lifted_args = tuple(arg for arg in body_lifted_freevars)
proxy_args = (body_node,) + lifted_args
example_value = pytree.tree_map_only(
torch.fx.Node,
lambda a: a.meta["example_value"],
body_graph.find_nodes(op="output")[0].args[0],
)
return (
proxy_args,
{},
example_value,
body_r,
body_gmod,
body_name,
body_graph_output_vts,
)
def _call_function(
self,
tx: "InstructionTranslator",
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
# This flattens the kwargs into lifted args
(
p_args,
p_kwargs,
_example_value,
body_r,
_,
_,
body_graph_output_vts,
) = self.create_wrapped_node(tx, args[0], args[1:], kwargs, "wrap")
if len(p_kwargs) > 0:
unimplemented(
gb_type="WrapHigherOrderVariable: kwargs unexpected",
context=f"args: {args}, kwargs: {kwargs}",
explanation="kwargs should have been flattened into lifted args.",
hints=[
*graph_break_hints.DYNAMO_BUG,
],
)
return _call_function_with_auto_output_flattening(
tx,
self.value,
tuple(p_args),
p_kwargs,
_example_value,
body_r,
body_graph_output_vts,
)
|
WrapHigherOrderVariable
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/utils/waiter.py
|
{
"start": 3544,
"end": 4136
}
|
class ____(str, Enum):
"""
Used to control the waiting behaviour within EMRClusterJobFlowOperator.
Choices:
- WAIT_FOR_COMPLETION - Will wait for the cluster to report "Running" state
- WAIT_FOR_STEPS_COMPLETION - Will wait for the cluster to report "Terminated" state
"""
WAIT_FOR_COMPLETION = "wait_for_completion"
WAIT_FOR_STEPS_COMPLETION = "wait_for_steps_completion"
WAITER_POLICY_NAME_MAPPING: dict[WaitPolicy, str] = {
WaitPolicy.WAIT_FOR_COMPLETION: "job_flow_waiting",
WaitPolicy.WAIT_FOR_STEPS_COMPLETION: "job_flow_terminated",
}
|
WaitPolicy
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_9/models.py
|
{
"start": 66505,
"end": 67385
}
|
class ____(Request):
"""
Gets model information
:param task: Task id
:type task: str
"""
_service = "models"
_action = "get_by_task_id"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {"task": {"description": "Task id", "type": ["string", "null"]}},
"type": "object",
}
def __init__(self, task: Optional[str] = None, **kwargs: Any) -> None:
super(GetByTaskIdRequest, self).__init__(**kwargs)
self.task = task
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
|
GetByTaskIdRequest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/util/nest_util.py
|
{
"start": 4569,
"end": 63404
}
|
class ____(object):
__slots__ = []
def __str__(self):
return "."
def __repr__(self):
return "."
_DOT = _DotString()
def is_nested(modality, structure):
"""Returns true if its input is a nested structure.
For Modality.CORE refer to
[tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest)
for the definition of a nested structure.
Args:
modality: enum value of supported modality [Modality.CORE or Modality.DATA]
structure: the value to test.
Returns:
True if the input is a nested structure.
"""
if modality == Modality.CORE:
return _tf_core_is_nested(structure)
elif modality == Modality.DATA:
return _tf_data_is_nested(structure)
else:
raise ValueError(
"Unknown modality used {} for nested structure".format(modality)
)
# TODO(b/225045380): Move to a "leaf" library to use in trace_type.
def is_namedtuple(instance, strict=False):
"""Returns True iff `instance` is a `namedtuple`.
Args:
instance: An instance of a Python object.
strict: If True, `instance` is considered to be a `namedtuple` only if it is
a "plain" namedtuple. For instance, a class inheriting from a `namedtuple`
will be considered to be a `namedtuple` iff `strict=False`.
Returns:
True if `instance` is a `namedtuple`.
"""
return _pywrap_utils.IsNamedtuple(instance, strict)
def sequence_like(instance, args):
"""Converts the sequence `args` to the same type as `instance`.
Args:
instance: an instance of `tuple`, `list`, `namedtuple`, `dict`,
`collections.OrderedDict`, or `composite_tensor.Composite_Tensor` or
`type_spec.TypeSpec`.
args: items to be converted to the `instance` type.
Returns:
`args` with the type of `instance`.
"""
if _is_mutable_mapping(instance):
# Pack dictionaries in a deterministic order by sorting the keys.
# Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
result = dict(zip(_tf_core_sorted(instance), args))
instance_type = type(instance)
if instance_type == _collections.defaultdict:
d = _collections.defaultdict(instance.default_factory)
else:
d = instance_type()
for key in instance:
d[key] = result[key]
return d
elif _is_mapping(instance):
result = dict(zip(_tf_core_sorted(instance), args))
instance_type = type(instance)
if not getattr(instance_type, "__supported_by_tf_nest__", False):
tf_logging.log_first_n(
tf_logging.WARN,
"Mapping types may not work well with tf.nest. "
"Prefer using MutableMapping for {}".format(instance_type),
1,
)
try:
return instance_type((key, result[key]) for key in instance)
except TypeError as err:
# pylint: disable=raise-missing-from
raise TypeError(
"Error creating an object of type {} like {}. Note that "
"it must accept a single positional argument "
"representing an iterable of key-value pairs, in "
"addition to self. Cause: {}".format(type(instance), instance, err)
)
elif _is_mapping_view(instance):
# We can't directly construct mapping views, so we create a list instead
return list(args)
elif is_namedtuple(instance) or _is_attrs(instance):
if isinstance(instance, _wrapt.ObjectProxy):
instance_type = type(instance.__wrapped__)
else:
instance_type = type(instance)
return instance_type(*args)
elif _is_composite_tensor(instance):
assert len(args) == 1
spec = instance._type_spec # pylint: disable=protected-access
return spec._from_components(args[0]) # pylint: disable=protected-access
elif _is_type_spec(instance):
# Pack a CompositeTensor's components according to a TypeSpec.
assert len(args) == 1
return instance._from_components(args[0]) # pylint: disable=protected-access
elif isinstance(instance, range):
return sequence_like(list(instance), args)
elif isinstance(instance, _wrapt.ObjectProxy):
# For object proxies, first create the underlying type and then re-wrap it
# in the proxy type.
return type(instance)(sequence_like(instance.__wrapped__, args))
elif isinstance(instance, CustomNestProtocol):
metadata = instance.__tf_flatten__()[0]
return instance.__tf_unflatten__(metadata, tuple(args))
else:
# Not a namedtuple
return type(instance)(args)
def _get_attrs_items(obj):
"""Returns a list of (name, value) pairs from an attrs instance.
TODO(b/268078256): check if this comment is valid, and if so, ensure it's
handled in the function below.
The list will be sorted by name.
Args:
obj: an object.
Returns:
A list of (attr_name, attr_value) pairs, sorted by attr_name.
"""
attrs = getattr(obj.__class__, "__attrs_attrs__")
attr_names = (a.name for a in attrs)
return [(attr_name, getattr(obj, attr_name)) for attr_name in attr_names]
def _tf_core_sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(dict_.keys())
except TypeError:
# pylint: disable=raise-missing-from
raise TypeError("nest only supports dicts with sortable keys.")
def _tf_data_sorted(dict_):
"""Returns a sorted list of the dict keys, with error if keys not sortable."""
try:
return sorted(list(dict_))
except TypeError as e:
# pylint: disable=raise-missing-from
raise TypeError(
f"nest only supports dicts with sortable keys. Error: {e.message}"
)
def yield_value(modality, iterable):
"""Yield elements of `iterable` in a deterministic order.
Args:
modality: enum value of supported modality [Modality.CORE or Modality.DATA]
iterable: an iterable.
Yields:
The iterable elements in a deterministic order.
"""
if modality == Modality.CORE:
yield from _tf_core_yield_value(iterable)
elif modality == Modality.DATA:
yield from _tf_data_yield_value(iterable)
else:
raise ValueError(
"Unknown modality used {} for nested structure".format(modality)
)
def _tf_core_yield_value(iterable):
for _, v in _tf_core_yield_sorted_items(iterable):
yield v
def yield_sorted_items(modality, iterable):
if modality == Modality.CORE:
return _tf_core_yield_sorted_items(iterable)
else:
raise ValueError(
"Unknown modality used {} for nested structure".format(modality)
)
def _tf_core_yield_sorted_items(iterable):
"""Yield (key, value) pairs for `iterable` in a deterministic order.
For Sequences, the key will be an int, the array index of a value.
For Mappings, the key will be the dictionary key.
For objects (e.g. namedtuples), the key will be the attribute name.
In all cases, the keys will be iterated in sorted order.
Args:
iterable: an iterable.
Yields:
The iterable's (key, value) pairs, in order of sorted keys.
"""
# Ordered to check common structure types (list, tuple, dict) first.
if isinstance(iterable, list):
for item in enumerate(iterable):
yield item
# namedtuples handled separately to avoid expensive namedtuple check.
elif type(iterable) == tuple: # pylint: disable=unidiomatic-typecheck
for item in enumerate(iterable):
yield item
elif isinstance(iterable, (dict, _collections_abc.Mapping)):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _tf_core_sorted(iterable):
yield key, iterable[key]
elif _is_attrs(iterable):
for item in _get_attrs_items(iterable):
yield item
elif is_namedtuple(iterable):
for field in iterable._fields:
yield field, getattr(iterable, field)
elif _is_composite_tensor(iterable):
type_spec = iterable._type_spec # pylint: disable=protected-access
yield type_spec.value_type.__name__, type_spec._to_components(iterable) # pylint: disable=protected-access
elif _is_type_spec(iterable):
# Note: to allow CompositeTensors and their TypeSpecs to have matching
# structures, we need to use the same key string here.
yield iterable.value_type.__name__, iterable._component_specs # pylint: disable=protected-access
elif isinstance(iterable, CustomNestProtocol):
flat_component = iterable.__tf_flatten__()[1]
assert isinstance(flat_component, tuple)
yield from enumerate(flat_component)
else:
for item in enumerate(iterable):
yield item
def _tf_data_yield_value(iterable):
"""Yield elements of `iterable` in a deterministic order.
Args:
iterable: an iterable.
Yields:
The iterable elements in a deterministic order.
"""
# pylint: disable=protected-access
if isinstance(iterable, _collections_abc.Mapping):
# Iterate through dictionaries in a deterministic order by sorting the
# keys. Notice this means that we ignore the original order of `OrderedDict`
# instances. This is intentional, to avoid potential bugs caused by mixing
# ordered and plain dicts (e.g., flattening a dict but using a
# corresponding `OrderedDict` to pack it back).
for key in _tf_data_sorted(iterable):
yield iterable[key]
# To avoid circular imports. sparse_tensor
# depends on tensorflow/python/util/nest.py transitively, and if we try to
# import sparse_tensor again, it results in a circular import. Instead, here
# we check the class name instead of using `isinstance`.
elif iterable.__class__.__name__ == "SparseTensorValue":
yield iterable
elif _is_attrs(iterable):
for _, attr in _get_attrs_items(iterable):
yield attr
elif isinstance(iterable, CustomNestProtocol):
flat_component = iterable.__tf_flatten__()[1]
assert isinstance(flat_component, tuple)
yield from flat_component
else:
for value in iterable:
yield value
def assert_same_structure(
modality, nest1, nest2, check_types=True, expand_composites=False
):
"""Asserts that two structures are nested in the same way.
For Modality.CORE refer to
[tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest)
for the definition of a structure. Note the method does not check the types of
atoms inside the structures.
Examples:
* These atom vs. atom comparisons will pass:
>>> tf.nest.assert_same_structure(1.5, tf.Variable(1, tf.uint32))
>>> tf.nest.assert_same_structure("abc", np.array([1, 2]))
* These nested structure vs. nested structure comparisons will pass:
>>> structure1 = (((1, 2), 3), 4, (5, 6))
>>> structure2 = ((("foo1", "foo2"), "foo3"), "foo4", ("foo5", "foo6"))
>>> structure3 = [(("a", "b"), "c"), "d", ["e", "f"]]
>>> tf.nest.assert_same_structure(structure1, structure2)
>>> tf.nest.assert_same_structure(structure1, structure3, check_types=False)
>>> import collections
>>> tf.nest.assert_same_structure(
... collections.namedtuple("bar", "a b")(1, 2),
... collections.namedtuple("foo", "a b")(2, 3),
... check_types=False)
>>> tf.nest.assert_same_structure(
... collections.namedtuple("bar", "a b")(1, 2),
... { "a": 1, "b": 2 },
... check_types=False)
>>> tf.nest.assert_same_structure(
... { "a": 1, "b": 2, "c": 3 },
... { "c": 6, "b": 5, "a": 4 })
>>> ragged_tensor1 = tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8])
>>> ragged_tensor2 = tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4],
... row_splits=[0, 3])
>>> tf.nest.assert_same_structure(
... ragged_tensor1,
... ragged_tensor2,
... expand_composites=True)
* These examples will raise exceptions:
>>> tf.nest.assert_same_structure([0, 1], np.array([0, 1]))
Traceback (most recent call last):
...
ValueError: The two structures don't have the same nested structure
>>> tf.nest.assert_same_structure(
... collections.namedtuple('bar', 'a b')(1, 2),
... collections.namedtuple('foo', 'a b')(2, 3))
Traceback (most recent call last):
...
TypeError: The two structures don't have the same nested structure
For Modality.DATA, nested structures are treated differently than
Modality.CORE. Please refer to class Modality's documentation above to read up
on these differences.
Args:
modality: enum value of supported modality [Modality.CORE or Modality.DATA]
nest1: an atom or a nested structure.
nest2: an atom or a nested structure.
check_types: - For Modality.CORE: if `True` (default) types of structures
are checked as well, including the keys of dictionaries. If set to
`False`, for example a list and a tuple of objects will look the same if
they have the same size. Note that namedtuples with identical name and
fields are always considered to have the same shallow structure. Two types
will also be considered the same if they are both list subtypes (which
allows "list" and "_ListWrapper" from trackable dependency tracking to
compare equal). `check_types=True` only checks type of sub-structures. The
types of atoms are not checked. - For Modality.DATA: if `True` (default)
types of sequences should be same as well. For dictionary, "type" of
dictionary is considered to include its keys. In other words, two
dictionaries with different keys are considered to have a different
"type". If set to `False`, two iterables are considered same as long as
they yield the elements that have same structures.
expand_composites: Arg only valid for Modality.CORE. If true, then composite
tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are
expanded into their component tensors.
Raises:
ValueError: If the two structures do not have the same number of atoms or
if the two structures are not nested in the same way.
TypeError: If the two structures differ in the type of sequence in any of
their substructures. Only possible if `check_types` is `True`.
"""
if modality == Modality.CORE:
_tf_core_assert_same_structure(nest1, nest2, check_types, expand_composites)
elif modality == Modality.DATA:
_tf_data_assert_same_structure(nest1, nest2, check_types)
else:
raise ValueError(
"Unknown modality used {} for nested structure".format(modality)
)
# pylint: disable=missing-function-docstring
def _tf_core_assert_same_structure(
nest1, nest2, check_types=True, expand_composites=False
):
# Convert to bool explicitly as otherwise pybind will not be able# to handle
# type mismatch message correctly. See GitHub issue 42329 for details.
check_types = bool(check_types)
expand_composites = bool(expand_composites)
try:
_pywrap_utils.AssertSameStructure(
nest1, nest2, check_types, expand_composites
)
except (ValueError, TypeError) as e:
str1 = str(_tf_core_map_structure(lambda _: _DOT, nest1))
str2 = str(_tf_core_map_structure(lambda _: _DOT, nest2))
raise type(e)(
"%s\nEntire first structure:\n%s\nEntire second structure:\n%s"
% (str(e), str1, str2)
)
def _tf_data_assert_same_structure(nest1, nest2, check_types=True):
_pywrap_utils.AssertSameStructureForData(nest1, nest2, check_types)
def _tf_core_packed_nest_with_indices(
structure, flat, index, is_nested_fn, sequence_fn=None
):
"""Helper function for pack_sequence_as.
Args:
structure: structure to mimic.
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
is_nested_fn: Function used to test if a value should be treated as a nested
structure.
sequence_fn: Function used to generate a new structure instance.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more atoms than `flat`
(assuming indexing starts from `index`).
"""
packed = []
sequence_fn = sequence_fn or sequence_like
for s in _tf_core_yield_value(structure):
if is_nested_fn(s):
new_index, child = _tf_core_packed_nest_with_indices(
s, flat, index, is_nested_fn, sequence_fn
)
packed.append(sequence_fn(s, child))
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def _tf_data_packed_nest_with_indices(structure, flat, index):
"""Helper function for pack_nest_as.
Args:
structure: Substructure (tuple of elements and/or tuples) to mimic
flat: Flattened values to output substructure for.
index: Index at which to start reading from flat.
Returns:
The tuple (new_index, child), where:
* new_index - the updated index into `flat` having processed `structure`.
* packed - the subset of `flat` corresponding to `structure`,
having started at `index`, and packed into the same nested
format.
Raises:
ValueError: if `structure` contains more elements than `flat`
(assuming indexing starts from `index`).
"""
packed = []
for s in _tf_data_yield_value(structure):
if _tf_data_is_nested(s):
new_index, child = _tf_data_packed_nest_with_indices(s, flat, index)
packed.append(sequence_like(s, child)) # pylint: disable=protected-access
index = new_index
else:
packed.append(flat[index])
index += 1
return index, packed
def flatten(modality, structure, expand_composites=False):
"""Flattens a nested structure.
- For Modality.CORE: refer to
[tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest)
for the definition of a structure.
If the structure is an atom, then returns a single-item list: [structure].
This is the inverse of the `nest.pack_sequence_as` method that takes in a
flattened list and re-packs it into the nested structure.
In the case of dict instances, the sequence consists of the values, sorted by
key to ensure deterministic behavior. This is true also for OrderedDict
instances: their sequence order is ignored, the sorting order of keys is used
instead. The same convention is followed in `nest.pack_sequence_as`. This
correctly repacks dicts and OrderedDicts after they have been flattened, and
also allows flattening an OrderedDict and then repacking it back using a
corresponding plain dict, or vice-versa. Dictionaries with non-sortable keys
cannot be flattened.
Users must not modify any collections used in nest while this function is
running.
Examples:
1. Python dict (ordered by key):
>>> dict = { "key3": "value3", "key1": "value1", "key2": "value2" }
>>> tf.nest.flatten(dict)
['value1', 'value2', 'value3']
2. For a nested python tuple:
>>> tuple = ((1.0, 2.0), (3.0, 4.0, 5.0), 6.0)
>>> tf.nest.flatten(tuple)
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
3. For a nested dictionary of dictionaries:
>>> dict = { "key3": {"c": (1.0, 2.0), "a": (3.0)},
... "key1": {"m": "val1", "g": "val2"} }
>>> tf.nest.flatten(dict)
['val2', 'val1', 3.0, 1.0, 2.0]
4. Numpy array (will not flatten):
>>> array = np.array([[1, 2], [3, 4]])
>>> tf.nest.flatten(array)
[array([[1, 2],
[3, 4]])]
5. `tf.Tensor` (will not flatten):
>>> tensor = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
>>> tf.nest.flatten(tensor)
[<tf.Tensor: shape=(3, 3), dtype=float32, numpy=
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]], dtype=float32)>]
6. `tf.RaggedTensor`: This is a composite tensor thats representation consists
of a flattened list of 'values' and a list of 'row_splits' which indicate how
to chop up the flattened list into different rows. For more details on
`tf.RaggedTensor`, please visit
https://www.tensorflow.org/api_docs/python/tf/RaggedTensor.
with `expand_composites=False`, we just return the RaggedTensor as is.
>>> tensor = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2]])
>>> tf.nest.flatten(tensor, expand_composites=False)
[<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2]]>]
with `expand_composites=True`, we return the component Tensors that make up
the RaggedTensor representation (the values and row_splits tensors)
>>> tensor = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2]])
>>> tf.nest.flatten(tensor, expand_composites=True)
[<tf.Tensor: shape=(7,), dtype=int32, numpy=array([3, 1, 4, 1, 5, 9, 2],
dtype=int32)>,
<tf.Tensor: shape=(4,), dtype=int64, numpy=array([0, 4, 4, 7])>]
Args:
modality: enum value of supported modality [Modality.CORE or Modality.DATA]
structure: an atom or a nested structure. Note, numpy arrays are considered
atoms and are not flattened.
expand_composites: Arg valid for Modality.CORE only. If true, then composite
tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are
expanded into their component tensors.
Returns:
A Python list, the flattened version of the input.
Raises:
TypeError: The nest is or contains a dict with non-sortable keys.
"""
if modality == Modality.CORE:
return _tf_core_flatten(structure, expand_composites)
elif modality == Modality.DATA:
return _tf_data_flatten(structure)
else:
raise ValueError(
"Unknown modality used {} for nested structure".format(modality)
)
def _tf_core_flatten(structure, expand_composites=False):
"""See comments for flatten() in tensorflow/python/util/nest.py."""
if structure is None:
return [None]
expand_composites = bool(expand_composites)
return _pywrap_utils.Flatten(structure, expand_composites)
def pack_sequence_as(
modality, structure, flat_sequence, expand_composites, sequence_fn=None
):
"""Returns a given flattened sequence packed into a given structure.
- For Modality.CORE: Refer to
[tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest)
for the definition of a structure.
If `structure` is an atom, `flat_sequence` must be a single-item list;
in this case the return value is `flat_sequence[0]`.
If `structure` is or contains a dict instance, the keys will be sorted to
pack the flat sequence in deterministic order. This is true also for
`OrderedDict` instances: their sequence order is ignored, the sorting order of
keys is used instead. The same convention is followed in `flatten`.
This correctly repacks dicts and `OrderedDict`s after they have been
flattened, and also allows flattening an `OrderedDict` and then repacking it
back using a corresponding plain dict, or vice-versa.
Dictionaries with non-sortable keys cannot be flattened.
Examples:
1. Python dict:
>>> structure = { "key3": "", "key1": "", "key2": "" }
>>> flat_sequence = ["value1", "value2", "value3"]
>>> tf.nest.pack_sequence_as(structure, flat_sequence)
{'key3': 'value3', 'key1': 'value1', 'key2': 'value2'}
2. For a nested python tuple:
>>> structure = (('a','b'), ('c','d','e'), 'f')
>>> flat_sequence = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
>>> tf.nest.pack_sequence_as(structure, flat_sequence)
((1.0, 2.0), (3.0, 4.0, 5.0), 6.0)
3. For a nested dictionary of dictionaries:
>>> structure = { "key3": {"c": ('alpha', 'beta'), "a": ('gamma')},
... "key1": {"e": "val1", "d": "val2"} }
>>> flat_sequence = ['val2', 'val1', 3.0, 1.0, 2.0]
>>> tf.nest.pack_sequence_as(structure, flat_sequence)
{'key3': {'c': (1.0, 2.0), 'a': 3.0}, 'key1': {'e': 'val1', 'd': 'val2'}}
4. Numpy array (considered a scalar):
>>> structure = ['a']
>>> flat_sequence = [np.array([[1, 2], [3, 4]])]
>>> tf.nest.pack_sequence_as(structure, flat_sequence)
[array([[1, 2],
[3, 4]])]
5. tf.Tensor (considered a scalar):
>>> structure = ['a']
>>> flat_sequence = [tf.constant([[1., 2., 3.], [4., 5., 6.]])]
>>> tf.nest.pack_sequence_as(structure, flat_sequence)
[<tf.Tensor: shape=(2, 3), dtype=float32,
numpy= array([[1., 2., 3.], [4., 5., 6.]], dtype=float32)>]
6. `tf.RaggedTensor`: This is a composite tensor thats representation consists
of a flattened list of 'values' and a list of 'row_splits' which indicate how
to chop up the flattened list into different rows. For more details on
`tf.RaggedTensor`, please visit
https://www.tensorflow.org/api_docs/python/tf/RaggedTensor.
With `expand_composites=False`, we treat RaggedTensor as a scalar.
>>> structure = { "foo": tf.ragged.constant([[1, 2], [3]]),
... "bar": tf.constant([[5]]) }
>>> flat_sequence = [ "one", "two" ]
>>> tf.nest.pack_sequence_as(structure, flat_sequence,
... expand_composites=False)
{'foo': 'two', 'bar': 'one'}
With `expand_composites=True`, we expect that the flattened input contains
the tensors making up the ragged tensor i.e. the values and row_splits
tensors.
>>> structure = { "foo": tf.ragged.constant([[1., 2.], [3.]]),
... "bar": tf.constant([[5.]]) }
>>> tensors = tf.nest.flatten(structure, expand_composites=True)
>>> print(tensors)
[<tf.Tensor: shape=(1, 1), dtype=float32, numpy=array([[5.]],
dtype=float32)>,
<tf.Tensor: shape=(3,), dtype=float32, numpy=array([1., 2., 3.],
dtype=float32)>,
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([0, 2, 3])>]
>>> verified_tensors = [tf.debugging.check_numerics(t, 'invalid tensor: ')
... if t.dtype==tf.float32 else t
... for t in tensors]
>>> tf.nest.pack_sequence_as(structure, verified_tensors,
... expand_composites=True)
{'foo': <tf.RaggedTensor [[1.0, 2.0], [3.0]]>,
'bar': <tf.Tensor: shape=(1, 1), dtype=float32, numpy=array([[5.]],
dtype=float32)>}
- For Modality.DATA: If `structure` is a scalar, `flat_sequence` must be a
single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
modality: enum value of supported modality [Modality.CORE or Modality.DATA]
structure: - For Modality.CORE: Nested structure, whose structure is given
by nested lists, tuples, and dicts. Note: numpy arrays and strings are
considered scalars. - For Modality.DATA: tuple or list constructed of
scalars and/or other tuples/lists, or a scalar. Note: numpy arrays are
considered scalars.
flat_sequence: flat sequence to pack.
expand_composites: Arg valid for Modality.CORE only. If true, then composite
tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are
expanded into their component tensors.
sequence_fn: Arg valid for Modality.CORE only.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If `flat_sequence` and `structure` have different
atom counts.
TypeError: For Modality.CORE only. `structure` is or contains a dict with
non-sortable keys.
"""
if modality == Modality.CORE:
return _tf_core_pack_sequence_as(
structure, flat_sequence, expand_composites, sequence_fn
)
elif modality == Modality.DATA:
return _tf_data_pack_sequence_as(structure, flat_sequence)
else:
raise ValueError(
"Unknown modality used {} for nested structure".format(modality)
)
def _tf_core_pack_sequence_as(
structure, flat_sequence, expand_composites, sequence_fn=None
):
"""Implements sequence packing, with the option to alter the structure."""
is_nested_fn = (
_is_nested_or_composite if expand_composites else _tf_core_is_nested
)
sequence_fn = sequence_fn or sequence_like
def truncate(value, length):
value_str = str(value)
return value_str[:length] + (value_str[length:] and "...")
if not is_nested_fn(flat_sequence):
raise TypeError(
"Attempted to pack value:\n {}\ninto a structure, but found "
"incompatible type `{}` instead.".format(
truncate(flat_sequence, 100), type(flat_sequence)
)
)
if not is_nested_fn(structure):
if len(flat_sequence) != 1:
raise ValueError(
"The target structure is of type `{}`\n {}\nHowever the input "
"is a sequence ({}) of length {}.\n {}\nnest cannot "
"guarantee that it is safe to map one to the other.".format(
type(structure),
truncate(structure, 100),
type(flat_sequence),
len(flat_sequence),
truncate(flat_sequence, 100),
)
)
return flat_sequence[0]
try:
final_index, packed = _tf_core_packed_nest_with_indices(
structure, flat_sequence, 0, is_nested_fn, sequence_fn
)
if final_index < len(flat_sequence):
raise IndexError
except IndexError:
flat_structure = _tf_core_flatten(
structure, expand_composites=expand_composites
)
if len(flat_structure) != len(flat_sequence):
# pylint: disable=raise-missing-from
raise ValueError(
"Could not pack sequence. Structure had %d atoms, but "
"flat_sequence had %d items. Structure: %s, flat_sequence: %s."
% (len(flat_structure), len(flat_sequence), structure, flat_sequence)
)
return sequence_fn(structure, packed)
def _tf_data_pack_sequence_as(structure, flat_sequence):
"""Returns a given flattened sequence packed into a nest.
If `structure` is a scalar, `flat_sequence` must be a single-element list;
in this case the return value is `flat_sequence[0]`.
Args:
structure: tuple or list constructed of scalars and/or other tuples/lists,
or a scalar. Note: numpy arrays are considered scalars.
flat_sequence: flat sequence to pack.
Returns:
packed: `flat_sequence` converted to have the same recursive structure as
`structure`.
Raises:
ValueError: If nest and structure have different element counts.
"""
if not (_tf_data_is_nested(flat_sequence) or isinstance(flat_sequence, list)):
raise TypeError(
"Argument `flat_sequence` must be a sequence. Got "
f"'{type(flat_sequence).__name__}'."
)
if not _tf_data_is_nested(structure):
if len(flat_sequence) != 1:
raise ValueError(
"Argument `structure` is a scalar but "
f"`len(flat_sequence)`={len(flat_sequence)} > 1"
)
return flat_sequence[0]
flat_structure = _tf_data_flatten(structure)
if len(flat_structure) != len(flat_sequence):
raise ValueError(
"Could not pack sequence. Argument `structure` had "
f"{len(flat_structure)} elements, but argument `flat_sequence` had "
f"{len(flat_sequence)} elements. Received structure: "
f"{structure}, flat_sequence: {flat_sequence}."
)
_, packed = _tf_data_packed_nest_with_indices(structure, flat_sequence, 0)
return sequence_like(structure, packed) # pylint: disable=protected-access
def map_structure(modality, func, *structure, **kwargs):
"""Creates a new structure by applying `func` to each atom in `structure`.
- For Modality.CORE: Refer to
[tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest)
for the definition of a structure.
Applies `func(x[0], x[1], ...)` where x[i] enumerates all atoms in
`structure[i]`. All items in `structure` must have the same arity,
and the return value will contain results with the same structure layout.
Examples:
* A single Python dict:
>>> a = {"hello": 24, "world": 76}
>>> tf.nest.map_structure(lambda p: p * 2, a)
{'hello': 48, 'world': 152}
* Multiple Python dictionaries:
>>> d1 = {"hello": 24, "world": 76}
>>> d2 = {"hello": 36, "world": 14}
>>> tf.nest.map_structure(lambda p1, p2: p1 + p2, d1, d2)
{'hello': 60, 'world': 90}
* A single Python list:
>>> a = [24, 76, "ab"]
>>> tf.nest.map_structure(lambda p: p * 2, a)
[48, 152, 'abab']
* Scalars:
>>> tf.nest.map_structure(lambda x, y: x + y, 3, 4)
7
* Empty structures:
>>> tf.nest.map_structure(lambda x: x + 1, ())
()
* Check the types of iterables:
>>> s1 = (((1, 2), 3), 4, (5, 6))
>>> s1_list = [[[1, 2], 3], 4, [5, 6]]
>>> tf.nest.map_structure(lambda x, y: None, s1, s1_list)
Traceback (most recent call last):
...
TypeError: The two structures don't have the same nested structure
* Type check is set to False:
>>> s1 = (((1, 2), 3), 4, (5, 6))
>>> s1_list = [[[1, 2], 3], 4, [5, 6]]
>>> tf.nest.map_structure(lambda x, y: None, s1, s1_list, check_types=False)
(((None, None), None), None, (None, None))
- For Modality.DATA: Applies `func(x[0], x[1], ...)` where x[i] is an entry in
`structure[i]`. All structures in `structure` must have the same arity,
and the return value will contain the results in the same structure.
Args:
modality: enum value of supported modality [Modality.CORE or Modality.DATA]
func: A callable that accepts as many arguments as there are structures.
*structure: - For Modality.CORE: atom or nested structure. - For
Modality.DATA: scalar, or tuple or list of constructed scalars and/or
other tuples/lists, or scalars. Note: numpy arrays are considered
scalars.
**kwargs: Valid keyword args are: * `check_types`: - For Modality.CORE: If
set to `True` (default) the types of iterables within the structures have
to be same (e.g. `map_structure(func, [1], (1,))` raises a `TypeError`
exception). To allow this set this argument to `False`. Note that
namedtuples with identical name and fields are always considered to have
the same shallow structure. - For Modality.DATA: only valid keyword
argument is `check_types`. If set to `True` (default) the types of
iterables within the structures have to be same (e.g. `map_structure(func,
[1], (1,))` raises a `TypeError` exception). To allow this set this
argument to `False`. * `expand_composites`: Valid for Modality.CORE only.
If set to `True`, then composite tensors such as `tf.sparse.SparseTensor`
and `tf.RaggedTensor` are expanded into their component tensors. If
`False` (the default), then composite tensors are not expanded.
Returns:
A new structure with the same arity as `structure[0]`, whose atoms
correspond to `func(x[0], x[1], ...)` where `x[i]` is the atom in the
corresponding location in `structure[i]`. If there are different structure
types and `check_types` is `False` the structure types of the first
structure will be used.
Raises:
TypeError: If `func` is not callable or if the structures do not match
each other by depth tree.
ValueError: If no structure is provided or if the structures do not match
each other by type.
ValueError: If wrong keyword arguments are provided.
"""
if modality == Modality.CORE:
return _tf_core_map_structure(func, *structure, **kwargs)
elif modality == Modality.DATA:
return _tf_data_map_structure(func, *structure, **kwargs)
else:
raise ValueError(
"Unknown modality used {} for nested structure".format(modality)
)
# pylint: disable=missing-function-docstring
def _tf_core_map_structure(func, *structure, **kwargs):
if not callable(func):
raise TypeError("func must be callable, got: %s" % func)
if not structure:
raise ValueError("Must provide at least one structure")
check_types = kwargs.pop("check_types", True)
expand_composites = kwargs.pop("expand_composites", False)
if kwargs:
raise ValueError(
"Only valid keyword arguments are `check_types` and "
"`expand_composites`, not: `%s`"
% "`, `".join(kwargs.keys())
)
for other in structure[1:]:
_tf_core_assert_same_structure(
structure[0],
other,
check_types=check_types,
expand_composites=expand_composites,
)
flat_structure = (_tf_core_flatten(s, expand_composites) for s in structure)
entries = zip(*flat_structure)
return _tf_core_pack_sequence_as(
structure[0],
[func(*x) for x in entries],
expand_composites=expand_composites,
)
# pylint: disable=missing-function-docstring
def _tf_data_map_structure(func, *structure, **check_types_dict):
if not callable(func):
raise TypeError(f"Argument `func` must be callable, got: {func}")
if not structure:
raise ValueError("Must provide at least one structure")
if check_types_dict:
if "check_types" not in check_types_dict or len(check_types_dict) > 1:
raise ValueError(
"Only valid keyword argument for `check_types_dict` is "
f"'check_types'. Got {check_types_dict}."
)
check_types = check_types_dict["check_types"]
else:
check_types = True
for other in structure[1:]:
_tf_data_assert_same_structure(structure[0], other, check_types=check_types)
flat_structure = (_tf_data_flatten(s) for s in structure)
entries = zip(*flat_structure)
return _tf_data_pack_sequence_as(structure[0], [func(*x) for x in entries])
def yield_flat_up_to(modality, shallow_tree, input_tree, is_nested_fn, path=()):
"""Yields (path, value) pairs of input_tree flattened up to shallow_tree.
- For Modality.CORE: See comments for _tf_core_yield_flat_up_to() below
- For Modality.DATA: See comments for _tf_data_yield_flat_up_to() below
Args:
modality: enum value of supported modality [Modality.CORE or Modality.DATA]
shallow_tree: Nested structure. Traverse no further than its leaf nodes.
input_tree: Nested structure. Return the paths and values from this tree.
Must have the same upper structure as shallow_tree.
is_nested_fn: Arg valid for Modality.CORE only. Function used to test if a
value should be treated as a nested structure.
path: Arg valid for Modality.CORE only. Tuple. Optional argument, only used
when recursing. The path from the root of the original shallow_tree, down
to the root of the shallow_tree arg of this recursive call.
Yields:
Pairs of (path, value), where path the tuple path of a leaf node in
shallow_tree, and value is the value of the corresponding node in
input_tree.
"""
if modality == Modality.CORE:
yield from _tf_core_yield_flat_up_to(
shallow_tree, input_tree, is_nested_fn, path
)
elif modality == Modality.DATA:
yield from _tf_data_yield_flat_up_to(shallow_tree, input_tree)
else:
raise ValueError(
"Unknown modality used {} for nested structure".format(modality)
)
def _tf_core_yield_flat_up_to(shallow_tree, input_tree, is_nested_fn, path=()):
"""Yields (path, value) pairs of input_tree flattened up to shallow_tree.
Args:
shallow_tree: Nested structure. Traverse no further than its leaf nodes.
input_tree: Nested structure. Return the paths and values from this tree.
Must have the same upper structure as shallow_tree.
is_nested_fn: Function used to test if a value should be treated as a nested
structure.
path: Tuple. Optional argument, only used when recursing. The path from the
root of the original shallow_tree, down to the root of the shallow_tree
arg of this recursive call.
Yields:
Pairs of (path, value), where path the tuple path of a leaf node in
shallow_tree, and value is the value of the corresponding node in
input_tree.
"""
if not is_nested_fn(shallow_tree):
yield (path, input_tree)
else:
input_tree = dict(_tf_core_yield_sorted_items(input_tree))
for (
shallow_key,
shallow_subtree,
) in _tf_core_yield_sorted_items(shallow_tree):
subpath = path + (shallow_key,)
input_subtree = input_tree[shallow_key]
for leaf_path, leaf_value in _tf_core_yield_flat_up_to(
shallow_subtree, input_subtree, is_nested_fn, path=subpath
):
yield (leaf_path, leaf_value)
def _tf_data_yield_flat_up_to(shallow_tree, input_tree):
"""Yields elements `input_tree` partially flattened up to `shallow_tree`."""
if _tf_data_is_nested(shallow_tree):
for shallow_branch, input_branch in zip(
_tf_data_yield_value(shallow_tree), _tf_data_yield_value(input_tree)
):
for input_leaf in _tf_data_yield_flat_up_to(shallow_branch, input_branch):
yield input_leaf
else:
yield input_tree
def assert_shallow_structure(
modality,
shallow_tree,
input_tree,
check_types=True,
expand_composites=False,
):
"""Asserts that `shallow_tree` is a shallow structure of `input_tree`.
This function tests if the `input_tree` structure can be created from
the `shallow_tree` structure by replacing its leaf nodes with deeper
tree structures.
Examples:
The following code will raise an exception:
```python
shallow_tree = {"a": "A", "b": "B"}
input_tree = {"a": 1, "c": 2}
assert_shallow_structure(shallow_tree, input_tree)
```
The following code will raise an exception:
```python
shallow_tree = ["a", "b"]
input_tree = ["c", ["d", "e"], "f"]
assert_shallow_structure(shallow_tree, input_tree)
```
Args:
modality: enum value of supported modality [Modality.CORE or Modality.DATA]
shallow_tree: an arbitrarily nested structure.
input_tree: an arbitrarily nested structure.
check_types: if `True` (default) the sequence types of `shallow_tree` and
`input_tree` have to be the same. Note that even with check_types==True,
this function will consider two different namedtuple classes with the same
name and _fields attribute to be the same class.
expand_composites: Valid for Modality.CORE only. If true, then composite
tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are
expanded into their component tensors.
Raises:
TypeError: If `shallow_tree` is a sequence but `input_tree` is not.
TypeError: If the sequence types of `shallow_tree` are different from
`input_tree`. Only raised if `check_types` is `True`.
ValueError: If the sequence lengths of `shallow_tree` are different from
`input_tree`.
"""
if modality == Modality.CORE:
_tf_core_assert_shallow_structure(
shallow_tree, input_tree, check_types, expand_composites
)
elif modality == Modality.DATA:
_tf_data_assert_shallow_structure(shallow_tree, input_tree, check_types)
else:
raise ValueError(
"Unknown modality used {} for nested structure".format(modality)
)
# pylint: disable=missing-function-docstring
def _tf_core_assert_shallow_structure(
shallow_tree, input_tree, check_types=True, expand_composites=False
):
is_nested_fn = (
_is_nested_or_composite if expand_composites else _tf_core_is_nested
)
if is_nested_fn(shallow_tree):
if not is_nested_fn(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
"Input has type: %s."
% type(input_tree)
)
if isinstance(shallow_tree, _wrapt.ObjectProxy):
shallow_type = type(shallow_tree.__wrapped__)
else:
shallow_type = type(shallow_tree)
if check_types and not isinstance(input_tree, shallow_type):
# Duck-typing means that nest should be fine with two different
# namedtuples with identical name and fields.
shallow_is_namedtuple = is_namedtuple(shallow_tree, False)
input_is_namedtuple = is_namedtuple(input_tree, False)
if shallow_is_namedtuple and input_is_namedtuple:
if not same_namedtuples(shallow_tree, input_tree):
raise TypeError(
STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree), shallow_type=type(shallow_tree)
)
)
elif isinstance(shallow_tree, list) and isinstance(input_tree, list):
# List subclasses are considered the same,
# e.g. python list vs. _ListWrapper.
pass
elif (
_is_composite_tensor(shallow_tree) or _is_type_spec(shallow_tree)
) and (_is_composite_tensor(input_tree) or _is_type_spec(input_tree)):
pass # Compatibility will be checked below.
elif not (
isinstance(shallow_tree, _collections_abc.Mapping)
and isinstance(input_tree, _collections_abc.Mapping)
):
raise TypeError(
STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree), shallow_type=type(shallow_tree)
)
)
if _is_composite_tensor(shallow_tree) or _is_composite_tensor(input_tree):
if not (
(_is_composite_tensor(input_tree) or _is_type_spec(input_tree))
and (
_is_composite_tensor(shallow_tree) or _is_type_spec(shallow_tree)
)
):
raise TypeError(
STRUCTURES_HAVE_MISMATCHING_TYPES.format(
input_type=type(input_tree), shallow_type=type(shallow_tree)
)
)
# pylint: disable=protected-access
type_spec_1 = (
shallow_tree
if _is_type_spec(shallow_tree)
else shallow_tree._type_spec
)._without_tensor_names()
type_spec_2 = (
input_tree if _is_type_spec(input_tree) else input_tree._type_spec
)._without_tensor_names()
# TODO(b/246356867): Replace the most_specific_common_supertype below
# with get_structure.
if hasattr(type_spec_1, "_get_structure") and hasattr(
type_spec_2, "_get_structure"
):
result = (
type_spec_1._get_structure() == type_spec_2._get_structure() or None
)
else:
result = type_spec_1.most_specific_common_supertype([type_spec_2])
if result is None:
raise ValueError(
"Incompatible CompositeTensor TypeSpecs: %s vs. %s"
% (type_spec_1, type_spec_2)
)
# pylint: enable=protected-access
elif _is_type_spec(shallow_tree):
if not _is_type_spec(input_tree):
raise TypeError(
"If shallow structure is a TypeSpec, input must also "
"be a TypeSpec. Input has type: %s."
% type(input_tree)
)
else:
if len(input_tree) != len(shallow_tree):
raise ValueError(
STRUCTURES_HAVE_MISMATCHING_LENGTHS.format(
input_length=len(input_tree), shallow_length=len(shallow_tree)
)
)
elif len(input_tree) < len(shallow_tree):
raise ValueError(
INPUT_TREE_SMALLER_THAN_SHALLOW_TREE.format(
input_size=len(input_tree), shallow_size=len(shallow_tree)
)
)
if isinstance(shallow_tree, _collections_abc.Mapping):
absent_keys = set(shallow_tree) - set(input_tree)
if absent_keys:
raise ValueError(
SHALLOW_TREE_HAS_INVALID_KEYS.format(sorted(absent_keys))
)
for shallow_branch, input_branch in zip(
_tf_core_yield_value(shallow_tree),
_tf_core_yield_value(input_tree),
):
_tf_core_assert_shallow_structure(
shallow_branch,
input_branch,
check_types=check_types,
expand_composites=expand_composites,
)
# pylint: disable=missing-function-docstring
def _tf_data_assert_shallow_structure(
shallow_tree, input_tree, check_types=True
):
if _tf_data_is_nested(shallow_tree):
if not _tf_data_is_nested(input_tree):
raise TypeError(
"If shallow structure is a sequence, input must also be a sequence. "
f"Input has type: '{type(input_tree).__name__}'."
)
if check_types and not isinstance(input_tree, type(shallow_tree)):
raise TypeError(
"The two structures don't have the same sequence type. Input "
f"structure has type '{type(input_tree).__name__}', while shallow "
f"structure has type '{type(shallow_tree).__name__}'."
)
if len(input_tree) != len(shallow_tree):
raise ValueError(
"The two structures don't have the same sequence length. Input "
f"structure has length {len(input_tree)}, while shallow structure "
f"has length {len(shallow_tree)}."
)
if check_types and isinstance(shallow_tree, _collections_abc.Mapping):
if set(input_tree) != set(shallow_tree):
raise ValueError(
"The two structures don't have the same keys. Input "
f"structure has keys {list(input_tree)}, while shallow structure "
f"has keys {list(shallow_tree)}."
)
input_tree = sorted(input_tree.items())
shallow_tree = sorted(shallow_tree.items())
for shallow_branch, input_branch in zip(shallow_tree, input_tree):
_tf_data_assert_shallow_structure(
shallow_branch, input_branch, check_types=check_types
)
def flatten_up_to(
modality,
shallow_tree,
input_tree,
check_types=True,
expand_composites=False,
):
# pylint: disable=g-doc-return-or-yield,g-doc-args
"""Flattens `input_tree` up to `shallow_tree`.
- For Modality.CORE: refer to
[tf.nest](https://www.tensorflow.org/api_docs/python/tf/nest)
for the definition of a structure.
Any further depth in structure in `input_tree` is retained as structures in
the partially flatten output.
If `shallow_tree` and `input_tree` are atoms, this returns a
single-item list: `[input_tree]`.
Use Case:
Sometimes we may wish to partially flatten a structure, retaining some
of the nested structure. We achieve this by specifying a shallow structure,
`shallow_tree`, we wish to flatten up to.
The input, `input_tree`, can be thought of as having the same structure layout
as `shallow_tree`, but with leaf nodes that are themselves tree structures.
Examples:
```python
input_tree = [[[2, 2], [3, 3]], [[4, 9], [5, 5]]]
shallow_tree = [[True, True], [False, True]]
flattened_input_tree = flatten_up_to(shallow_tree, input_tree)
flattened_shallow_tree = flatten_up_to(shallow_tree, shallow_tree)
# Output is:
# [[2, 2], [3, 3], [4, 9], [5, 5]]
# [True, True, False, True]
```
```python
input_tree = [[('a', 1), [('b', 2), [('c', 3), [('d', 4)]]]]]
shallow_tree = [['level_1', ['level_2', ['level_3', ['level_4']]]]]
input_tree_flattened_as_shallow_tree = flatten_up_to(shallow_tree, input_tree)
input_tree_flattened = flatten(input_tree)
# Output is:
# [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
# ['a', 1, 'b', 2, 'c', 3, 'd', 4]
```
Edge Cases:
```python
flatten_up_to(0, 0) # Output: [0]
flatten_up_to(0, [0, 1, 2]) # Output: [[0, 1, 2]]
flatten_up_to([0, 1, 2], 0) # Output: TypeError
flatten_up_to([0, 1, 2], [0, 1, 2]) # Output: [0, 1, 2]
```
Args:
modality: enum value of supported modality [Modality.CORE or Modality.DATA]
shallow_tree: a possibly pruned structure of input_tree.
input_tree: an atom or a nested structure. Note, numpy arrays are considered
atoms.
check_types: bool. If True, check that each node in shallow_tree has the
same type as the corresponding node in input_tree.
expand_composites: Arg valid for Modality.CORE only. If true, then composite
tensors such as `tf.sparse.SparseTensor` and `tf.RaggedTensor` are
expanded into their component tensors.
Returns:
A Python list, the partially flattened version of `input_tree` according to
the structure of `shallow_tree`.
Raises:
TypeError: If `shallow_tree` is a nested structure but `input_tree` is not.
TypeError: If the structure types of `shallow_tree` are different from
`input_tree`.
ValueError: If the structure lengths of `shallow_tree` are different from
`input_tree`.
"""
if modality == Modality.CORE:
return _tf_core_flatten_up_to(
shallow_tree, input_tree, check_types, expand_composites
)
elif modality == Modality.DATA:
return _tf_data_flatten_up_to(shallow_tree, input_tree)
else:
raise ValueError(
"Unknown modality used {} for nested structure".format(modality)
)
def _tf_core_flatten_up_to(
shallow_tree, input_tree, check_types=True, expand_composites=False
):
is_nested_fn = (
_is_nested_or_composite if expand_composites else _tf_core_is_nested
)
_tf_core_assert_shallow_structure(
shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites,
)
# Discard paths returned by nest_util._tf_core_yield_flat_up_to.
return [
v
for _, v in _tf_core_yield_flat_up_to(
shallow_tree, input_tree, is_nested_fn
)
]
def _tf_data_flatten_up_to(shallow_tree, input_tree):
_tf_data_assert_shallow_structure(shallow_tree, input_tree)
return list(_tf_data_yield_flat_up_to(shallow_tree, input_tree))
def map_structure_up_to(modality, shallow_tree, func, *inputs, **kwargs):
"""Applies a function or op to a number of partially flattened inputs.
The `inputs` are flattened up to `shallow_tree` before being mapped.
Use Case:
Sometimes we wish to apply a function to a partially flattened
structure (for example when the function itself takes structure inputs). We
achieve this by specifying a shallow structure, `shallow_tree` we wish to
flatten up to.
The `inputs`, can be thought of as having the same structure layout as
`shallow_tree`, but with leaf nodes that are themselves tree structures.
This function therefore will return something with the same base structure as
`shallow_tree`.
Examples:
```python
shallow_tree = [None, None]
inp_val = [1, 2, 3]
out = map_structure_up_to(shallow_tree, lambda x: 2 * x, inp_val)
# Output is: [2, 4]
```
```python
ab_tuple = collections.namedtuple("ab_tuple", "a, b")
op_tuple = collections.namedtuple("op_tuple", "add, mul")
inp_val = ab_tuple(a=2, b=3)
inp_ops = ab_tuple(a=op_tuple(add=1, mul=2), b=op_tuple(add=2, mul=3))
out = map_structure_up_to(inp_val, lambda val, ops: (val + ops.add) * ops.mul,
inp_val, inp_ops)
# Output is: ab_tuple(a=6, b=15)
```
```python
data_list = [[2, 4, 6, 8], [[1, 3, 5, 7, 9], [3, 5, 7]]]
name_list = ['evens', ['odds', 'primes']]
out = map_structure_up_to(
name_list,
lambda name, sec: "first_{}_{}".format(len(sec), name),
name_list, data_list)
# Output is: ['first_4_evens', ['first_5_odds', 'first_3_primes']]
```
Args:
modality: enum value of supported modality [Modality.CORE or Modality.DATA]
shallow_tree: a shallow structure, common to all the inputs.
func: callable which will be applied to each input individually.
*inputs: structures that are compatible with shallow_tree. The function
`func` is applied to corresponding structures due to partial flattening of
each input, so the function must support arity of `len(inputs)`.
**kwargs: Arg valid for Modality.CORE only. kwargs to feed to func().
Special kwarg `check_types` is not passed to func, but instead determines
whether the types of iterables within the structures have to be same (e.g.
`map_structure(func, [1], (1,))` raises a `TypeError` exception). To allow
this set this argument to `False`.
Raises:
TypeError: If `shallow_tree` is a nested structure but `input_tree` is not.
TypeError: If the structure types of `shallow_tree` are different from
`input_tree`.
ValueError: If the structure lengths of `shallow_tree` are different from
`input_tree`.
Returns:
result of repeatedly applying `func`, with the same structure layout as
`shallow_tree`.
"""
if modality == Modality.CORE:
return _tf_core_map_structure_with_tuple_paths_up_to(
shallow_tree, func, *inputs, **kwargs
)
elif modality == Modality.DATA:
return _tf_data_map_structure_up_to(shallow_tree, func, *inputs)
else:
raise ValueError(
"Unknown modality used {} for nested structure".format(modality)
)
def _tf_core_map_structure_with_tuple_paths_up_to(
shallow_tree, func, *inputs, **kwargs
):
"""See comments for map_structure_with_tuple_paths_up_to() in tensorflow/python/util/nest.py."""
if not inputs:
raise ValueError("Cannot map over no sequences")
check_types = kwargs.pop("check_types", True)
expand_composites = kwargs.pop("expand_composites", False)
is_nested_fn = (
_is_nested_or_composite if expand_composites else _tf_core_is_nested
)
for input_tree in inputs:
_tf_core_assert_shallow_structure(
shallow_tree,
input_tree,
check_types=check_types,
expand_composites=expand_composites,
)
# Flatten each input separately, apply the function to corresponding items,
# then repack based on the structure of the first input.
flat_value_gen = (
_tf_core_flatten_up_to( # pylint: disable=g-complex-comprehension
shallow_tree,
input_tree,
check_types,
expand_composites=expand_composites,
)
for input_tree in inputs
)
flat_path_gen = (
path
for path, _ in _tf_core_yield_flat_up_to(
shallow_tree, inputs[0], is_nested_fn
)
)
results = [
func(*args, **kwargs) for args in zip(flat_path_gen, *flat_value_gen)
]
return _tf_core_pack_sequence_as(
structure=shallow_tree,
flat_sequence=results,
expand_composites=expand_composites,
)
# pylint: disable=missing-function-docstring
def _tf_data_map_structure_up_to(shallow_tree, func, *inputs):
if not inputs:
raise ValueError(
"Argument `inputs` is empty. Cannot map over no sequences."
)
for input_tree in inputs:
_tf_data_assert_shallow_structure(shallow_tree, input_tree)
# Flatten each input separately, apply the function to corresponding elements,
# then repack based on the structure of the first input.
all_flattened_up_to = (
_tf_data_flatten_up_to(shallow_tree, input_tree) for input_tree in inputs
)
results = [func(*tensors) for tensors in zip(*all_flattened_up_to)]
return _tf_data_pack_sequence_as(
structure=shallow_tree, flat_sequence=results
)
|
_DotString
|
python
|
getsentry__sentry-python
|
sentry_sdk/_lru_cache.py
|
{
"start": 104,
"end": 1229
}
|
class ____:
def __init__(self, max_size):
# type: (int) -> None
if max_size <= 0:
raise AssertionError(f"invalid max_size: {max_size}")
self.max_size = max_size
self._data = {} # type: dict[Any, Any]
self.hits = self.misses = 0
self.full = False
def set(self, key, value):
# type: (Any, Any) -> None
current = self._data.pop(key, _SENTINEL)
if current is not _SENTINEL:
self._data[key] = value
elif self.full:
self._data.pop(next(iter(self._data)))
self._data[key] = value
else:
self._data[key] = value
self.full = len(self._data) >= self.max_size
def get(self, key, default=None):
# type: (Any, Any) -> Any
try:
ret = self._data.pop(key)
except KeyError:
self.misses += 1
ret = default
else:
self.hits += 1
self._data[key] = ret
return ret
def get_all(self):
# type: () -> list[tuple[Any, Any]]
return list(self._data.items())
|
LRUCache
|
python
|
realpython__materials
|
python-iterators-iterables/sequence_iter.py
|
{
"start": 380,
"end": 535
}
|
class ____:
def __init__(self, sequence):
self.sequence = sequence
def __iter__(self):
return SequenceIterator(self.sequence)
|
Iterable
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_excel2003_style03.py
|
{
"start": 315,
"end": 1363
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("excel2003_style03.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/worksheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename, {"excel2003_style": True})
worksheet = workbook.add_worksheet()
worksheet.set_paper(9)
worksheet.set_header("Page &P")
worksheet.set_footer("&A")
bold = workbook.add_format({"bold": 1})
worksheet.write("A1", "Foo")
worksheet.write("A2", "Bar", bold)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
openai__openai-python
|
src/openai/types/responses/response_apply_patch_tool_call.py
|
{
"start": 400,
"end": 618
}
|
class ____(BaseModel):
diff: str
"""Diff to apply."""
path: str
"""Path of the file to create."""
type: Literal["create_file"]
"""Create a new file with the provided diff."""
|
OperationCreateFile
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/protocol22.py
|
{
"start": 942,
"end": 1126
}
|
class ____(Protocol[_T1, _T2]):
def m1(self, a: _T1, b: _T2) -> _T1 | _T2: ...
# This is right, as `_T1` and `_T2` are both covariant with the
# argument type and the return type.
|
P3
|
python
|
ray-project__ray
|
python/ray/tests/unit/test_runtime_env_validation.py
|
{
"start": 3499,
"end": 5696
}
|
class ____:
def test_validate_not_a_list(self):
with pytest.raises(TypeError, match="must be a list of strings"):
parse_and_validate_py_modules(".")
def test_validate_bad_path(self):
with pytest.raises(ValueError, match="a valid path"):
parse_and_validate_py_modules(["/does/not/exist"])
def test_validate_bad_uri(self):
with pytest.raises(ValueError, match="a valid URI"):
parse_and_validate_py_modules(["unknown://abc"])
def test_validate_invalid_type(self):
with pytest.raises(TypeError):
parse_and_validate_py_modules([1])
def test_validate_remote_invalid_extension(self):
uris = [
"https://some_domain.com/path/file",
"s3://bucket/file",
"gs://bucket/file",
]
with pytest.raises(
ValueError, match="Only .zip or .whl files supported for remote URIs."
):
parse_and_validate_py_modules(uris)
def test_validate_remote_valid_input(self):
uris = [
"https://some_domain.com/path/file.zip",
"s3://bucket/file.zip",
"gs://bucket/file.zip",
"https://some_domain.com/path/file.whl",
"s3://bucket/file.whl",
"gs://bucket/file.whl",
]
py_modules = parse_and_validate_py_modules(uris)
assert py_modules == uris
def test_validate_path_valid_input(self, test_directory):
test_dir, _, _, _ = test_directory
paths = [str(test_dir)]
py_modules = parse_and_validate_py_modules(paths)
assert py_modules == paths
def test_validate_path_and_uri_valid_input(self, test_directory):
test_dir, _, _, _ = test_directory
uris_and_paths = [
str(test_dir),
"https://some_domain.com/path/file.zip",
"s3://bucket/file.zip",
"gs://bucket/file.zip",
"https://some_domain.com/path/file.whl",
"s3://bucket/file.whl",
"gs://bucket/file.whl",
]
py_modules = parse_and_validate_py_modules(uris_and_paths)
assert py_modules == uris_and_paths
|
TestValidatePyModules
|
python
|
huggingface__transformers
|
src/transformers/models/encodec/modeling_encodec.py
|
{
"start": 9983,
"end": 11199
}
|
class ____(nn.Module):
"""
Residual block from SEANet model as used by EnCodec.
"""
def __init__(self, config: EncodecConfig, dim: int, dilations: list[int]):
super().__init__()
kernel_sizes = (config.residual_kernel_size, 1)
if len(kernel_sizes) != len(dilations):
raise ValueError("Number of kernel sizes should match number of dilations")
hidden = dim // config.compress
block = []
for i, (kernel_size, dilation) in enumerate(zip(kernel_sizes, dilations)):
in_chs = dim if i == 0 else hidden
out_chs = dim if i == len(kernel_sizes) - 1 else hidden
block += [nn.ELU()]
block += [EncodecConv1d(config, in_chs, out_chs, kernel_size, dilation=dilation)]
self.block = nn.ModuleList(block)
if config.use_conv_shortcut:
self.shortcut = EncodecConv1d(config, dim, dim, kernel_size=1)
else:
self.shortcut = nn.Identity()
def forward(self, hidden_states):
residual = hidden_states
for layer in self.block:
hidden_states = layer(hidden_states)
return self.shortcut(residual) + hidden_states
|
EncodecResnetBlock
|
python
|
PrefectHQ__prefect
|
src/prefect/utilities/collections.py
|
{
"start": 6363,
"end": 23626
}
|
class ____(BaseException):
"""
A special exception used to stop recursive visits in `visit_collection`.
When raised, the expression is returned without modification and recursive visits
in that path will end.
"""
@overload
def visit_collection(
expr: Any,
visit_fn: Callable[[Any, dict[str, VT]], Any],
*,
return_data: Literal[True] = ...,
max_depth: int = ...,
context: dict[str, VT] = ...,
remove_annotations: bool = ...,
_seen: Optional[dict[int, Any]] = ...,
) -> Any: ...
@overload
def visit_collection(
expr: Any,
visit_fn: Callable[[Any], Any],
*,
return_data: Literal[True] = ...,
max_depth: int = ...,
context: None = None,
remove_annotations: bool = ...,
_seen: Optional[dict[int, Any]] = ...,
) -> Any: ...
@overload
def visit_collection(
expr: Any,
visit_fn: Callable[[Any, dict[str, VT]], Any],
*,
return_data: bool = ...,
max_depth: int = ...,
context: dict[str, VT] = ...,
remove_annotations: bool = ...,
_seen: Optional[dict[int, Any]] = ...,
) -> Optional[Any]: ...
@overload
def visit_collection(
expr: Any,
visit_fn: Callable[[Any], Any],
*,
return_data: bool = ...,
max_depth: int = ...,
context: None = None,
remove_annotations: bool = ...,
_seen: Optional[dict[int, Any]] = ...,
) -> Optional[Any]: ...
@overload
def visit_collection(
expr: Any,
visit_fn: Callable[[Any, dict[str, VT]], Any],
*,
return_data: Literal[False] = False,
max_depth: int = ...,
context: dict[str, VT] = ...,
remove_annotations: bool = ...,
_seen: Optional[dict[int, Any]] = ...,
) -> None: ...
def visit_collection(
expr: Any,
visit_fn: Union[Callable[[Any, dict[str, VT]], Any], Callable[[Any], Any]],
*,
return_data: bool = False,
max_depth: int = -1,
context: Optional[dict[str, VT]] = None,
remove_annotations: bool = False,
_seen: Optional[dict[int, Any]] = None,
) -> Optional[Any]:
"""
Visits and potentially transforms every element of an arbitrary Python collection.
If an element is a Python collection, it will be visited recursively. If an element
is not a collection, `visit_fn` will be called with the element. The return value of
`visit_fn` can be used to alter the element if `return_data` is set to `True`.
Note:
- When `return_data` is `True`, a copy of each collection is created only if
`visit_fn` modifies an element within that collection. This approach minimizes
performance penalties by avoiding unnecessary copying.
- When `return_data` is `False`, no copies are created, and only side effects from
`visit_fn` are applied. This mode is faster and should be used when no transformation
of the collection is required, because it never has to copy any data.
Supported types:
- List (including iterators)
- Tuple
- Set
- Dict (note: keys are also visited recursively)
- Dataclass
- Pydantic model
- Prefect annotations
Note that visit_collection will not consume generators or async generators, as it would prevent
the caller from iterating over them.
Args:
expr (Any): A Python object or expression.
visit_fn (Callable[[Any, Optional[dict]], Any] or Callable[[Any], Any]): A function
that will be applied to every non-collection element of `expr`. The function can
accept one or two arguments. If two arguments are accepted, the second argument
will be the context dictionary.
return_data (bool): If `True`, a copy of `expr` containing data modified by `visit_fn`
will be returned. This is slower than `return_data=False` (the default).
max_depth (int): Controls the depth of recursive visitation. If set to zero, no
recursion will occur. If set to a positive integer `N`, visitation will only
descend to `N` layers deep. If set to any negative integer, no limit will be
enforced and recursion will continue until terminal items are reached. By
default, recursion is unlimited.
context (Optional[dict]): An optional dictionary. If passed, the context will be sent
to each call to the `visit_fn`. The context can be mutated by each visitor and
will be available for later visits to expressions at the given depth. Values
will not be available "up" a level from a given expression.
The context will be automatically populated with an 'annotation' key when
visiting collections within a `BaseAnnotation` type. This requires the caller to
pass `context={}` and will not be activated by default.
remove_annotations (bool): If set, annotations will be replaced by their contents. By
default, annotations are preserved but their contents are visited.
_seen (Optional[Set[int]]): A set of object ids that have already been visited. This
prevents infinite recursion when visiting recursive data structures.
Returns:
Any: The modified collection if `return_data` is `True`, otherwise `None`.
"""
if _seen is None:
_seen = {}
if context is not None:
_callback = cast(Callable[[Any, dict[str, VT]], Any], visit_fn)
def visit_nested(expr: Any) -> Optional[Any]:
return visit_collection(
expr,
_callback,
return_data=return_data,
remove_annotations=remove_annotations,
max_depth=max_depth - 1,
# Copy the context on nested calls so it does not "propagate up"
context=context.copy(),
_seen=_seen,
)
def visit_expression(expr: Any) -> Any:
return _callback(expr, context)
else:
_callback = cast(Callable[[Any], Any], visit_fn)
def visit_nested(expr: Any) -> Optional[Any]:
# Utility for a recursive call, preserving options and updating the depth.
return visit_collection(
expr,
_callback,
return_data=return_data,
remove_annotations=remove_annotations,
max_depth=max_depth - 1,
_seen=_seen,
)
def visit_expression(expr: Any) -> Any:
return _callback(expr)
# --- 1. Visit every expression
try:
result = visit_expression(expr)
except StopVisiting:
max_depth = 0
result = expr
if return_data:
# Only mutate the root expression if the user indicated we're returning data,
# otherwise the function could return null and we have no collection to check
expr = result
# --- 2. Visit every child of the expression recursively
# If we have reached the maximum depth or we have already visited this object,
# return the result if we are returning data, otherwise return None
obj_id = id(expr)
if max_depth == 0:
return result if return_data else None
elif obj_id in _seen:
# Return the cached transformed result
return _seen[obj_id] if return_data else None
# Mark this object as being processed to handle circular references
# We'll update with the actual result later
_seen[obj_id] = expr
# Then visit every item in the expression if it is a collection
# presume that the result is the original expression.
# in each of the following cases, we will update the result if we need to.
result = expr
# --- Generators
if isinstance(expr, (types.GeneratorType, types.AsyncGeneratorType)):
# Do not attempt to iterate over generators, as it will exhaust them
pass
# --- Mocks
elif isinstance(expr, Mock):
# Do not attempt to recurse into mock objects
pass
# --- Annotations (unmapped, quote, etc.)
elif isinstance(expr, BaseAnnotation):
annotated = cast(BaseAnnotation[Any], expr)
if context is not None:
context["annotation"] = cast(VT, annotated)
unwrapped = annotated.unwrap()
value = visit_nested(unwrapped)
if return_data:
# if we are removing annotations, return the value
if remove_annotations:
result = value
# if the value was modified, rewrap it
elif value is not unwrapped:
result = annotated.rewrap(value)
# otherwise return the expr
# --- Sequences
elif isinstance(expr, (list, tuple, set)):
seq = cast(Union[list[Any], tuple[Any], set[Any]], expr)
items = [visit_nested(o) for o in seq]
if return_data:
modified = any(item is not orig for item, orig in zip(items, seq))
if modified:
result = type(seq)(items)
# --- Dictionaries
elif isinstance(expr, (dict, OrderedDict)):
mapping = cast(dict[Any, Any], expr)
items = [(visit_nested(k), visit_nested(v)) for k, v in mapping.items()]
if return_data:
modified = any(
k1 is not k2 or v1 is not v2
for (k1, v1), (k2, v2) in zip(items, mapping.items())
)
if modified:
result = type(mapping)(items)
# --- Dataclasses
elif is_dataclass(expr) and not isinstance(expr, type):
expr_fields = fields(expr)
values = [visit_nested(getattr(expr, f.name)) for f in expr_fields]
if return_data:
modified = any(
getattr(expr, f.name) is not v for f, v in zip(expr_fields, values)
)
if modified:
result = replace(
expr, **{f.name: v for f, v in zip(expr_fields, values)}
)
# --- Pydantic models
elif isinstance(expr, pydantic.BaseModel):
# when extra=allow, fields not in model_fields may be in model_fields_set
original_data = dict(expr)
updated_data = {
field: visit_nested(value) for field, value in original_data.items()
}
if return_data:
modified = any(
original_data[field] is not updated_data[field]
for field in updated_data
)
if modified:
# Use construct to avoid validation and handle immutability
model_instance = expr.model_construct(
_fields_set=expr.model_fields_set, **updated_data
)
for private_attr in expr.__private_attributes__:
setattr(model_instance, private_attr, getattr(expr, private_attr))
result = model_instance
# Update the cache with the final transformed result
if return_data:
_seen[obj_id] = result
if return_data:
return result
@overload
def remove_nested_keys(
keys_to_remove: list[HashableT], obj: NestedDict[HashableT, VT]
) -> NestedDict[HashableT, VT]: ...
@overload
def remove_nested_keys(keys_to_remove: list[HashableT], obj: Any) -> Any: ...
def remove_nested_keys(
keys_to_remove: list[HashableT], obj: Union[NestedDict[HashableT, VT], Any]
) -> Union[NestedDict[HashableT, VT], Any]:
"""
Recurses a dictionary returns a copy without all keys that match an entry in
`key_to_remove`. Return `obj` unchanged if not a dictionary.
Args:
keys_to_remove: A list of keys to remove from obj obj: The object to remove keys
from.
Returns:
`obj` without keys matching an entry in `keys_to_remove` if `obj` is a
dictionary. `obj` if `obj` is not a dictionary.
"""
if not isinstance(obj, dict):
return obj
return {
key: remove_nested_keys(keys_to_remove, value)
for key, value in cast(NestedDict[HashableT, VT], obj).items()
if key not in keys_to_remove
}
@overload
def distinct(
iterable: Iterable[HashableT], key: None = None
) -> Iterator[HashableT]: ...
@overload
def distinct(iterable: Iterable[T], key: Callable[[T], Hashable]) -> Iterator[T]: ...
def distinct(
iterable: Iterable[Union[T, HashableT]],
key: Optional[Callable[[T], Hashable]] = None,
) -> Iterator[Union[T, HashableT]]:
def _key(__i: Any) -> Hashable:
return __i
if key is not None:
_key = cast(Callable[[Any], Hashable], key)
seen: set[Hashable] = set()
for item in iterable:
if _key(item) in seen:
continue
seen.add(_key(item))
yield item
@overload
def get_from_dict(
dct: NestedDict[str, VT], keys: Union[str, list[str]], default: None = None
) -> Optional[VT]: ...
@overload
def get_from_dict(
dct: NestedDict[str, VT], keys: Union[str, list[str]], default: R
) -> Union[VT, R]: ...
def get_from_dict(
dct: NestedDict[str, VT], keys: Union[str, list[str]], default: Optional[R] = None
) -> Union[VT, R, None]:
"""
Fetch a value from a nested dictionary or list using a sequence of keys.
This function allows to fetch a value from a deeply nested structure
of dictionaries and lists using either a dot-separated string or a list
of keys. If a requested key does not exist, the function returns the
provided default value.
Args:
dct: The nested dictionary or list from which to fetch the value.
keys: The sequence of keys to use for access. Can be a
dot-separated string or a list of keys. List indices can be included
in the sequence as either integer keys or as string indices in square
brackets.
default: The default value to return if the requested key path does not
exist. Defaults to None.
Returns:
The fetched value if the key exists, or the default value if it does not.
Examples:
```python
get_from_dict({'a': {'b': {'c': [1, 2, 3, 4]}}}, 'a.b.c[1]') # 2
get_from_dict({'a': {'b': [0, {'c': [1, 2]}]}}, ['a', 'b', 1, 'c', 1]) # 2
get_from_dict({'a': {'b': [0, {'c': [1, 2]}]}}, 'a.b.1.c.2', 'default') # 'default'
```
"""
if isinstance(keys, str):
keys = keys.replace("[", ".").replace("]", "").split(".")
value = dct
try:
for key in keys:
try:
# Try to cast to int to handle list indices
key = int(key)
except ValueError:
# If it's not an int, use the key as-is
# for dict lookup
pass
value = value[key] # type: ignore
return cast(VT, value)
except (TypeError, KeyError, IndexError):
return default
def set_in_dict(
dct: NestedDict[str, VT], keys: Union[str, list[str]], value: VT
) -> None:
"""
Sets a value in a nested dictionary using a sequence of keys.
This function allows to set a value in a deeply nested structure
of dictionaries and lists using either a dot-separated string or a list
of keys. If a requested key does not exist, the function will create it as
a new dictionary.
Args:
dct: The dictionary to set the value in.
keys: The sequence of keys to use for access. Can be a
dot-separated string or a list of keys.
value: The value to set in the dictionary.
Returns:
The modified dictionary with the value set at the specified key path.
Raises:
KeyError: If the key path exists and is not a dictionary.
"""
if isinstance(keys, str):
keys = keys.replace("[", ".").replace("]", "").split(".")
for k in keys[:-1]:
if not isinstance(dct.get(k, {}), dict):
raise TypeError(f"Key path exists and contains a non-dict value: {keys}")
if k not in dct:
dct[k] = {}
dct = cast(NestedDict[str, VT], dct[k])
dct[keys[-1]] = value
def deep_merge(
dct: NestedDict[str, VT1], merge: NestedDict[str, VT2]
) -> NestedDict[str, Union[VT1, VT2]]:
"""
Recursively merges `merge` into `dct`.
Args:
dct: The dictionary to merge into.
merge: The dictionary to merge from.
Returns:
A new dictionary with the merged contents.
"""
result: dict[str, Any] = dct.copy() # Start with keys and values from `dct`
for key, value in merge.items():
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
# If both values are dictionaries, merge them recursively
result[key] = deep_merge(
cast(NestedDict[str, VT1], result[key]),
cast(NestedDict[str, VT2], value),
)
else:
# Otherwise, overwrite with the new value
result[key] = cast(Union[VT2, NestedDict[str, VT2]], value)
return result
def deep_merge_dicts(*dicts: NestedDict[str, Any]) -> NestedDict[str, Any]:
"""
Recursively merges multiple dictionaries.
Args:
dicts: The dictionaries to merge.
Returns:
A new dictionary with the merged contents.
"""
result: NestedDict[str, Any] = {}
for dictionary in dicts:
result = deep_merge(result, dictionary)
return result
|
StopVisiting
|
python
|
openai__openai-python
|
src/openai/types/realtime/realtime_mcp_approval_response_param.py
|
{
"start": 260,
"end": 755
}
|
class ____(TypedDict, total=False):
id: Required[str]
"""The unique ID of the approval response."""
approval_request_id: Required[str]
"""The ID of the approval request being answered."""
approve: Required[bool]
"""Whether the request was approved."""
type: Required[Literal["mcp_approval_response"]]
"""The type of the item. Always `mcp_approval_response`."""
reason: Optional[str]
"""Optional reason for the decision."""
|
RealtimeMcpApprovalResponseParam
|
python
|
apache__airflow
|
providers/microsoft/azure/src/airflow/providers/microsoft/azure/log/wasb_task_handler.py
|
{
"start": 7047,
"end": 10173
}
|
class ____(FileTaskHandler, LoggingMixin):
"""
WasbTaskHandler is a python log handler that handles and reads task instance logs.
It extends airflow FileTaskHandler and uploads to and reads from Wasb remote storage.
"""
trigger_should_wrap = True
def __init__(
self,
base_log_folder: str,
wasb_log_folder: str,
wasb_container: str,
max_bytes: int = 0,
backup_count: int = 0,
delay: bool = False,
**kwargs,
) -> None:
# support log file size handling of FileTaskHandler
super().__init__(
base_log_folder=base_log_folder, max_bytes=max_bytes, backup_count=backup_count, delay=delay
)
self.handler: logging.FileHandler | None = None
self.log_relative_path = ""
self.closed = False
self.upload_on_close = True
self.io = WasbRemoteLogIO(
base_log_folder=base_log_folder,
remote_base=wasb_log_folder,
wasb_container=wasb_container,
delete_local_copy=kwargs.get(
"delete_local_copy", conf.getboolean("logging", "delete_local_logs")
),
)
def set_context(self, ti: TaskInstance, *, identifier: str | None = None) -> None:
super().set_context(ti, identifier=identifier)
# Local location and remote location is needed to open and
# upload local log file to Wasb remote storage.
if TYPE_CHECKING:
assert self.handler is not None
self.ti = ti
full_path = self.handler.baseFilename
self.log_relative_path = Path(full_path).relative_to(self.local_base).as_posix()
is_trigger_log_context = getattr(ti, "is_trigger_log_context", False)
self.upload_on_close = is_trigger_log_context or not getattr(ti, "raw", None)
def close(self) -> None:
"""Close and upload local log file to remote storage Wasb."""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
if hasattr(self, "ti"):
self.io.upload(self.log_relative_path, self.ti)
# Mark closed so we don't double write if close is called twice
self.closed = True
def _read_remote_logs(self, ti, try_number, metadata=None) -> tuple[LogSourceInfo, LogMessages]:
# Explicitly getting log relative path is necessary as the given
# task instance might be different than task instance passed in
# in set_context method.
worker_log_rel_path = self._render_filename(ti, try_number)
messages, logs = self.io.read(worker_log_rel_path, ti)
if logs is None:
logs = []
if not AIRFLOW_V_3_0_PLUS:
messages.append(f"No logs found in WASB; ti={ti}")
return messages, logs
|
WasbTaskHandler
|
python
|
python-attrs__attrs
|
tests/dataclass_transform_example.py
|
{
"start": 172,
"end": 357
}
|
class ____:
with_converter: int = attr.field(converter=int)
reveal_type(DefineConverter.__init__) # noqa: F821
DefineConverter(with_converter=b"42")
@attr.frozen()
|
DefineConverter
|
python
|
pytorch__pytorch
|
torch/_dynamo/variables/dicts.py
|
{
"start": 56935,
"end": 58192
}
|
class ____(SetVariable):
def debug_repr(self) -> str:
if not self.items:
return "dict_keys([])"
else:
return (
"dict_keys([" + ",".join(k.vt.debug_repr() for k in self.items) + "])"
)
def install_dict_keys_match_guard(self) -> None:
# Already EQUALS_MATCH guarded
pass
def install_dict_contains_guard(
self, tx: "InstructionTranslator", args: list[VariableTracker]
) -> None:
# Already EQUALS_MATCH guarded
pass
@property
def set_items(self) -> Any:
return self.items
def python_type(self) -> type:
return dict_keys
def as_python_constant(self) -> Any:
return dict.fromkeys(
{k.vt.as_python_constant() for k in self.set_items}, None
).keys()
def call_method(
self,
tx: "InstructionTranslator",
name: str,
args: list[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
if name in ["add", "pop", "update", "remove", "discard", "clear"]:
raise RuntimeError(f"Illegal call_method {name} on a dict_keys")
return super().call_method(tx, name, args, kwargs)
|
DictKeySetVariable
|
python
|
conda__conda
|
conda/notices/types.py
|
{
"start": 1593,
"end": 3615
}
|
class ____(NamedTuple):
url: str
name: str
json_data: dict | None
@property
def notices(self) -> Sequence[ChannelNotice]:
if self.json_data:
notices = self.json_data.get("notices", ())
return tuple(
ChannelNotice(
id=str(notice.get("id", UNDEFINED_MESSAGE_ID)),
channel_name=self.name,
message=notice.get("message"),
level=self._parse_notice_level(notice.get("level")),
created_at=self._parse_iso_timestamp(notice.get("created_at")),
expired_at=self._parse_iso_timestamp(notice.get("expired_at")),
interval=notice.get("interval"),
)
for notice in notices
)
# Default value
return ()
@staticmethod
def _parse_notice_level(level: str | None) -> NoticeLevel:
"""
We use this to validate notice levels and provide reasonable defaults
if any are invalid.
"""
try:
return NoticeLevel(level)
except ValueError:
# If we get an invalid value, rather than fail, we simply use a reasonable default
return NoticeLevel(NoticeLevel.INFO)
@staticmethod
def _parse_iso_timestamp(iso_timestamp: str | None) -> datetime | None:
"""Parse ISO timestamp and fail over to a default value of none."""
if iso_timestamp is None:
return None
try:
return datetime.fromisoformat(iso_timestamp)
except ValueError:
return None
@classmethod
def get_cache_key(cls, url: str, cache_dir: Path) -> Path:
"""Returns where this channel response will be cached by hashing the URL."""
bytes_filename = url.encode()
sha256_hash = hashlib.sha256(bytes_filename)
cache_filename = f"{sha256_hash.hexdigest()}.json"
return cache_dir.joinpath(cache_filename)
|
ChannelNoticeResponse
|
python
|
pyca__cryptography
|
tests/hazmat/primitives/test_sm4.py
|
{
"start": 2688,
"end": 3198
}
|
class ____:
test_cfb = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "SM4"),
["draft-ribose-cfrg-sm4-10-ctr.txt"],
lambda key, **kwargs: algorithms.SM4(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CTR(binascii.unhexlify(iv)),
)
@pytest.mark.supported(
only_if=lambda backend: backend.cipher_supported(
algorithms.SM4(b"\x00" * 16), modes.GCM(b"\x00" * 16)
),
skip_message="Does not support SM4 GCM",
)
|
TestSM4ModeCTR
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-kdbai/llama_index/vector_stores/kdbai/base.py
|
{
"start": 652,
"end": 8086
}
|
class ____(BasePydanticVectorStore):
"""
The KDBAI Vector Store.
In this vector store we store the text, its embedding and
its metadata in a KDBAI vector store table. This implementation
allows the use of an already existing table.
Args:
table kdbai.Table: The KDB.AI table to use as storage.
batch (int, optional): batch size to insert data.
Default is 100.
Returns:
KDBAIVectorStore: Vectorstore that supports add and query.
"""
stores_text: bool = True
flat_metadata: bool = True
hybrid_search: bool = False
batch_size: int
_table: Any = PrivateAttr()
_sparse_encoder: Optional[Callable] = PrivateAttr()
def __init__(
self,
table: Any = None,
hybrid_search: bool = False,
sparse_encoder: Optional[Callable] = None,
batch_size: int = DEFAULT_BATCH_SIZE,
**kwargs: Any,
) -> None:
"""Initialize params."""
try:
import kdbai_client as kdbai
logger.info("KDBAI client version: " + kdbai.__version__)
except ImportError:
raise ValueError(
"Could not import kdbai_client package."
"Please add it to the dependencies."
)
super().__init__(batch_size=batch_size, hybrid_search=hybrid_search)
if table is None:
raise ValueError("Must provide an existing KDB.AI table.")
else:
self._table = table
if hybrid_search:
if sparse_encoder is None:
self._sparse_encoder = default_sparse_encoder
else:
self._sparse_encoder = sparse_encoder
@property
def client(self) -> Any:
"""Return KDB.AI client."""
return self._table
@classmethod
def class_name(cls) -> str:
return "KDBAIVectorStore"
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to the KDBAI Vector Store.
Args:
nodes (List[BaseNode]): List of nodes to be added.
Returns:
List[str]: List of document IDs that were added.
"""
try:
import kdbai_client as kdbai
logger.info("KDBAI client version: " + kdbai.__version__)
except ImportError:
raise ValueError(
"Could not import kdbai_client package."
"Please add it to the dependencies."
)
df = pd.DataFrame()
docs = []
schema = self._table.schema
if self.hybrid_search:
schema = [item for item in schema if item["name"] != "sparseVectors"]
try:
for node in nodes:
doc = {
"document_id": node.node_id.encode("utf-8"),
"text": node.text.encode("utf-8"),
"embeddings": node.embedding,
}
if self.hybrid_search:
doc["sparseVectors"] = self._sparse_encoder(node.get_content())
# handle metadata columns
if len(schema) > len(DEFAULT_COLUMN_NAMES):
for column in [
item
for item in schema
if item["name"] not in DEFAULT_COLUMN_NAMES
]:
try:
doc[column["name"]] = node.metadata[column["name"]]
except Exception as e:
logger.error(
f"Error writing column {column['name']} as type {column['type']}: {e}."
)
docs.append(doc)
df = pd.DataFrame(docs)
for i in range((len(df) - 1) // self.batch_size + 1):
batch = df.iloc[i * self.batch_size : (i + 1) * self.batch_size]
try:
self._table.insert(batch)
logger.info(f"inserted batch {i}")
except Exception as e:
logger.exception(
f"Failed to insert batch {i} of documents into the datastore: {e}"
)
return [x.decode("utf-8") for x in df["document_id"].tolist()]
except Exception as e:
logger.error(f"Error preparing data for KDB.AI: {e}.")
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
try:
import kdbai_client as kdbai
logger.info("KDBAI client version: " + kdbai.__version__)
except ImportError:
raise ValueError(
"Could not import kdbai_client package."
"Please add it to the dependencies."
)
if query.alpha:
raise ValueError(
"Could not run hybrid search. "
"Please remove alpha and provide KDBAI weights for the two indexes though the vector_store_kwargs."
)
if query.filters:
filter = query.filters
if kwargs.get("filter"):
filter.extend(kwargs.pop("filter"))
kwargs["filter"] = filter
if kwargs.get("index"):
index = kwargs.pop("index")
if self.hybrid_search:
indexSparse = kwargs.pop("indexSparse", None)
indexWeight = kwargs.pop("indexWeight", None)
indexSparseWeight = kwargs.pop("indexSparseWeight", None)
if not all([indexSparse, indexWeight, indexSparseWeight]):
raise ValueError(
"Could not run hybrid search. "
"Please provide KDBAI sparse index name and weights."
)
else:
raise ValueError(
"Could not run the search. Please provide KDBAI index name."
)
if self.hybrid_search:
sparse_vectors = [self._sparse_encoder(query.query_str)]
qry = {index: [query.query_embedding], indexSparse: sparse_vectors}
index_params = {
index: {"weight": indexWeight},
indexSparse: {"weight": indexSparseWeight},
}
results = self._table.search(
vectors=qry,
index_params=index_params,
n=query.similarity_top_k,
**kwargs,
)[0]
else:
results = self._table.search(
vectors={index: [query.query_embedding]},
n=query.similarity_top_k,
**kwargs,
)[0]
top_k_nodes = []
top_k_ids = []
top_k_scores = []
for result in results.to_dict(orient="records"):
metadata = {x: result[x] for x in result if x not in DEFAULT_COLUMN_NAMES}
node = TextNode(
text=result["text"], id_=result["document_id"], metadata=metadata
)
top_k_ids.append(result["document_id"])
top_k_nodes.append(node)
top_k_scores.append(result["__nn_distance"])
return VectorStoreQueryResult(
nodes=top_k_nodes, similarities=top_k_scores, ids=top_k_ids
)
def delete(self, **delete_kwargs: Any) -> None:
raise Exception("Not implemented.")
|
KDBAIVectorStore
|
python
|
django-haystack__django-haystack
|
test_haystack/test_indexes.py
|
{
"start": 22572,
"end": 22730
}
|
class ____(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
excludes = ["author", "foo"]
|
ExcludesModelSearchIndex
|
python
|
redis__redis-py
|
tests/test_http/test_http_client.py
|
{
"start": 315,
"end": 854
}
|
class ____:
def __init__(
self, *, status: int, headers: Dict[str, str], url: str, content: bytes
):
self.status = status
self.headers = headers
self._url = url
self._content = content
def read(self) -> bytes:
return self._content
def geturl(self) -> str:
return self._url
# Support context manager used by urlopen
def __enter__(self) -> "FakeResponse":
return self
def __exit__(self, exc_type, exc, tb) -> None:
return None
|
FakeResponse
|
python
|
huggingface__transformers
|
src/transformers/trainer_pt_utils.py
|
{
"start": 52079,
"end": 53963
}
|
class ____(LRScheduler):
"""
For Layer-wise optimizers such as GaLoRE optimizer, the optimization and scheduling step
are already done through the post gradient hooks. Therefore
the trick is to create a dummy scheduler that can take arbitrary
args and kwargs and return a no-op during training.
"""
def __init__(self, *args, **kwargs):
self.default_lr = kwargs["lr"]
optimizer = LayerWiseDummyOptimizer(**kwargs)
last_epoch = -1
super().__init__(optimizer, last_epoch)
def get_lr(self):
# default value
lrs = [self.default_lr]
# we take each lr in the parameters if they exist, assumes the optimizer to be the `LayerWiseDummyOptimizer`
if self.optimizer is not None:
param_wise_lrs = [
[group["lr"] for group in optim.param_groups] for optim in self.optimizer.optimizer_dict.values()
]
lrs = list(chain(*param_wise_lrs))
return lrs
def _get_closed_form_lr(self):
return self.base_lrs
def set_rng_state_for_device(device_name, device_module, checkpoint_rng_state, is_distributed):
"""Helper to set RNG state for a specific device type (CUDA, NPU, MLU, MUSA)"""
device_state_key = device_name.lower()
err_template = "Didn't manage to set back the RNG states of the {backend} because of the following error:\n {exception}\nThis won't yield the same results as if the training had not been interrupted."
try:
if is_distributed:
device_module.random.set_rng_state_all(checkpoint_rng_state[device_state_key])
else:
device_module.random.set_rng_state(checkpoint_rng_state[device_state_key])
except Exception as e:
# Log error if setting RNG state fails
logger.error(err_template.format(backend=device_name, exception=e))
|
LayerWiseDummyScheduler
|
python
|
numba__numba
|
numba/core/compiler_machinery.py
|
{
"start": 2920,
"end": 3625
}
|
class ____(object):
"""This looks and behaves like LLVM's AnalysisUsage because its like that.
"""
def __init__(self):
self._required = set()
self._preserved = set()
def get_required_set(self):
return self._required
def get_preserved_set(self):
return self._preserved
def add_required(self, pss):
self._required.add(pss)
def add_preserved(self, pss):
self._preserved.add(pss)
def __str__(self):
return "required: %s\n" % self._required
_DEBUG = False
def debug_print(*args, **kwargs):
if _DEBUG:
print(*args, **kwargs)
pass_timings = namedtuple('pass_timings', 'init run finalize')
|
AnalysisUsage
|
python
|
apache__airflow
|
providers/ydb/src/airflow/providers/ydb/hooks/ydb.py
|
{
"start": 1558,
"end": 3139
}
|
class ____:
"""YDB cursor wrapper."""
def __init__(self, delegatee: DbApiCursor, is_ddl: bool):
self.delegatee: DbApiCursor = delegatee
self.is_ddl: bool = is_ddl
def execute(self, sql: str, parameters: Mapping[str, Any] | None = None):
if parameters is not None:
raise AirflowException("parameters is not supported yet")
if self.is_ddl:
return self.delegatee.execute_scheme(sql, parameters)
return self.delegatee.execute(sql, parameters)
def executemany(self, sql: str, seq_of_parameters: Sequence[Mapping[str, Any]]):
for parameters in seq_of_parameters:
self.execute(sql, parameters)
def executescript(self, script):
return self.execute(script)
def fetchone(self):
return self.delegatee.fetchone()
def fetchmany(self, size=None):
return self.delegatee.fetchmany(size=size)
def fetchall(self):
return self.delegatee.fetchall()
def nextset(self):
return self.delegatee.nextset()
def setinputsizes(self, sizes):
return self.delegatee.setinputsizes(sizes)
def setoutputsize(self, column=None):
return self.delegatee.setoutputsize(column)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
return self.delegatee.close()
@property
def rowcount(self):
return self.delegatee.rowcount
@property
def description(self):
return self.delegatee.description
|
YDBCursor
|
python
|
ray-project__ray
|
python/ray/serve/tests/test_list_outbound_deployments.py
|
{
"start": 842,
"end": 1288
}
|
class ____:
def __init__(self, handles_dict: dict, handles_list: list):
self.handles = handles_dict # {"a": handle_a, "b": handle_b}
self.handle_list = handles_list # [handle_a, handle_b]
async def __call__(self, x: int) -> int:
result_a = await self.handles["a"].remote(x)
result_b = await self.handles["b"].process.remote(x)
return result_a + result_b
@serve.deployment
|
UpstreamWithNestedHandles
|
python
|
gevent__gevent
|
src/greentest/3.10/test_socket.py
|
{
"start": 250362,
"end": 252901
}
|
class ____(unittest.TestCase):
def test_address(self):
port = socket_helper.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if socket_helper.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if socket_helper.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
|
CreateServerTest
|
python
|
ray-project__ray
|
python/ray/data/_internal/logical/operators/map_operator.py
|
{
"start": 7180,
"end": 8747
}
|
class ____(AbstractUDFMap):
"""Logical operator for map_batches."""
def __init__(
self,
input_op: LogicalOperator,
fn: UserDefinedFunction,
batch_size: Optional[int] = None,
batch_format: str = "default",
zero_copy_batch: bool = True,
fn_args: Optional[Iterable[Any]] = None,
fn_kwargs: Optional[Dict[str, Any]] = None,
fn_constructor_args: Optional[Iterable[Any]] = None,
fn_constructor_kwargs: Optional[Dict[str, Any]] = None,
min_rows_per_bundled_input: Optional[int] = None,
compute: Optional[ComputeStrategy] = None,
udf_modifying_row_count: bool = False,
ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None,
ray_remote_args: Optional[Dict[str, Any]] = None,
):
super().__init__(
"MapBatches",
input_op,
fn,
fn_args=fn_args,
fn_kwargs=fn_kwargs,
fn_constructor_args=fn_constructor_args,
fn_constructor_kwargs=fn_constructor_kwargs,
min_rows_per_bundled_input=min_rows_per_bundled_input,
compute=compute,
ray_remote_args_fn=ray_remote_args_fn,
ray_remote_args=ray_remote_args,
)
self._batch_size = batch_size
self._batch_format = batch_format
self._zero_copy_batch = zero_copy_batch
self._udf_modifying_row_count = udf_modifying_row_count
def can_modify_num_rows(self) -> bool:
return self._udf_modifying_row_count
|
MapBatches
|
python
|
streamlit__streamlit
|
lib/streamlit/runtime/media_file_storage.py
|
{
"start": 736,
"end": 892
}
|
class ____(Enum):
# st.image, st.video, st.audio files
MEDIA = "media"
# st.download_button files
DOWNLOADABLE = "downloadable"
|
MediaFileKind
|
python
|
getsentry__sentry
|
src/sentry/snuba/entity_subscription.py
|
{
"start": 5308,
"end": 7052
}
|
class ____(ABC, _EntitySubscription):
"""
An abstraction layer for all different entity subscriptions. It is important to note that
this abstraction layer was added because the subscription logic was too coupled to the
events and transactions entities, which was fine initially but now as we are adding more
entities to support subscriptions (alerts), we need to decouple this logic.
"""
def __init__(
self,
aggregate: str,
time_window: int,
extra_fields: _EntitySpecificParams | None = None,
):
pass
@abstractmethod
def get_entity_extra_params(self) -> Mapping[str, Any]:
raise NotImplementedError
@abstractmethod
def aggregate_query_results(
self, data: list[dict[str, Any]], alias: str | None = None
) -> list[dict[str, Any]]:
"""
Method that serves the purpose of receiving query results and applying any necessary
aggregations on them
"""
raise NotImplementedError
def build_query_builder(
self,
query: str,
project_ids: list[int],
environment: Environment | None,
params: ParamsType | None = None,
skip_field_validation_for_entity_subscription_deletion: bool = False,
) -> BaseQueryBuilder:
raise NotImplementedError
def build_rpc_request(
self,
query: str,
project_ids: list[int],
environment: Environment | None,
params: ParamsType | None = None,
skip_field_validation_for_entity_subscription_deletion: bool = False,
referrer: str = Referrer.API_ALERTS_ALERT_RULE_CHART.value,
) -> TimeSeriesRequest:
raise NotImplementedError
|
BaseEntitySubscription
|
python
|
scrapy__scrapy
|
tests/test_loader.py
|
{
"start": 2030,
"end": 5950
}
|
class ____:
item_class: type | None = None
def test_keep_single_value(self):
"""Loaded item should contain values from the initial item"""
input_item = self.item_class(name="foo")
il = ItemLoader(item=input_item)
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo"]}
def test_keep_list(self):
"""Loaded item should contain values from the initial item"""
input_item = self.item_class(name=["foo", "bar"])
il = ItemLoader(item=input_item)
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo", "bar"]}
def test_add_value_singlevalue_singlevalue(self):
"""Values added after initialization should be appended"""
input_item = self.item_class(name="foo")
il = ItemLoader(item=input_item)
il.add_value("name", "bar")
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo", "bar"]}
def test_add_value_singlevalue_list(self):
"""Values added after initialization should be appended"""
input_item = self.item_class(name="foo")
il = ItemLoader(item=input_item)
il.add_value("name", ["item", "loader"])
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo", "item", "loader"]}
def test_add_value_list_singlevalue(self):
"""Values added after initialization should be appended"""
input_item = self.item_class(name=["foo", "bar"])
il = ItemLoader(item=input_item)
il.add_value("name", "qwerty")
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo", "bar", "qwerty"]}
def test_add_value_list_list(self):
"""Values added after initialization should be appended"""
input_item = self.item_class(name=["foo", "bar"])
il = ItemLoader(item=input_item)
il.add_value("name", ["item", "loader"])
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {
"name": ["foo", "bar", "item", "loader"]
}
def test_get_output_value_singlevalue(self):
"""Getting output value must not remove value from item"""
input_item = self.item_class(name="foo")
il = ItemLoader(item=input_item)
assert il.get_output_value("name") == ["foo"]
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo"]}
def test_get_output_value_list(self):
"""Getting output value must not remove value from item"""
input_item = self.item_class(name=["foo", "bar"])
il = ItemLoader(item=input_item)
assert il.get_output_value("name") == ["foo", "bar"]
loaded_item = il.load_item()
assert isinstance(loaded_item, self.item_class)
assert ItemAdapter(loaded_item).asdict() == {"name": ["foo", "bar"]}
def test_values_single(self):
"""Values from initial item must be added to loader._values"""
input_item = self.item_class(name="foo")
il = ItemLoader(item=input_item)
assert il._values.get("name") == ["foo"]
def test_values_list(self):
"""Values from initial item must be added to loader._values"""
input_item = self.item_class(name=["foo", "bar"])
il = ItemLoader(item=input_item)
assert il._values.get("name") == ["foo", "bar"]
|
InitializationTestMixin
|
python
|
django-extensions__django-extensions
|
django_extensions/admin/__init__.py
|
{
"start": 7064,
"end": 7179
}
|
class ____(
ForeignKeyAutocompleteAdminMixin, admin.TabularInline
):
pass
|
ForeignKeyAutocompleteTabularInline
|
python
|
ray-project__ray
|
python/ray/serve/_private/common.py
|
{
"start": 26225,
"end": 26325
}
|
class ____(str, Enum):
UNDEFINED = "UNDEFINED"
HTTP = "HTTP"
GRPC = "gRPC"
|
RequestProtocol
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_sequences.py
|
{
"start": 979,
"end": 4750
}
|
class ____(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
__sparse_driver_backend__ = True
@testing.combinations(
(Sequence("foo_seq"), ""),
(Sequence("foo_seq", start=5), "START WITH 5"),
(Sequence("foo_seq", increment=2), "INCREMENT BY 2"),
(
Sequence("foo_seq", increment=2, start=5),
"INCREMENT BY 2 START WITH 5",
),
(
Sequence("foo_seq", increment=2, start=0, minvalue=0),
"INCREMENT BY 2 START WITH 0 MINVALUE 0",
),
(
Sequence("foo_seq", increment=2, start=1, maxvalue=5),
"INCREMENT BY 2 START WITH 1 MAXVALUE 5",
),
(
Sequence("foo_seq", increment=2, start=1, nomaxvalue=True),
"INCREMENT BY 2 START WITH 1 NO MAXVALUE",
),
(
Sequence("foo_seq", increment=2, start=0, nominvalue=True),
"INCREMENT BY 2 START WITH 0 NO MINVALUE",
),
(
Sequence("foo_seq", start=1, maxvalue=10, cycle=True),
"START WITH 1 MAXVALUE 10 CYCLE",
),
(
Sequence("foo_seq", cache=1000),
"CACHE 1000",
),
(Sequence("foo_seq", minvalue=42), "MINVALUE 42"),
(Sequence("foo_seq", minvalue=-42), "MINVALUE -42"),
(
Sequence("foo_seq", minvalue=42, increment=2),
"INCREMENT BY 2 MINVALUE 42",
),
(
Sequence("foo_seq", minvalue=-42, increment=2),
"INCREMENT BY 2 MINVALUE -42",
),
(
Sequence("foo_seq", minvalue=42, increment=-2),
"INCREMENT BY -2 MINVALUE 42",
),
(
Sequence("foo_seq", minvalue=-42, increment=-2),
"INCREMENT BY -2 MINVALUE -42",
),
(Sequence("foo_seq", maxvalue=99), "MAXVALUE 99"),
(Sequence("foo_seq", maxvalue=-99), "MAXVALUE -99"),
(
Sequence("foo_seq", maxvalue=99, increment=2),
"INCREMENT BY 2 MAXVALUE 99",
),
(
Sequence("foo_seq", maxvalue=99, increment=-2),
"INCREMENT BY -2 MAXVALUE 99",
),
(
Sequence("foo_seq", maxvalue=-99, increment=-2),
"INCREMENT BY -2 MAXVALUE -99",
),
(
Sequence("foo_seq", minvalue=42, maxvalue=99),
"MINVALUE 42 MAXVALUE 99",
),
(
Sequence("foo_seq", minvalue=42, maxvalue=99, increment=2),
"INCREMENT BY 2 MINVALUE 42 MAXVALUE 99",
),
(
Sequence("foo_seq", minvalue=-42, maxvalue=-9, increment=2),
"INCREMENT BY 2 MINVALUE -42 MAXVALUE -9",
),
(
Sequence("foo_seq", minvalue=42, maxvalue=99, increment=-2),
"INCREMENT BY -2 MINVALUE 42 MAXVALUE 99",
),
(
Sequence("foo_seq", minvalue=-42, maxvalue=-9, increment=-2),
"INCREMENT BY -2 MINVALUE -42 MAXVALUE -9",
),
)
def test_create_ddl(self, sequence, sql):
before = sequence.start
self.assert_compile(
CreateSequence(sequence),
("CREATE SEQUENCE foo_seq " + sql).strip(),
)
eq_(sequence.start, before)
def test_drop_ddl(self):
self.assert_compile(
CreateSequence(Sequence("foo_seq"), if_not_exists=True),
"CREATE SEQUENCE IF NOT EXISTS foo_seq",
)
self.assert_compile(
DropSequence(Sequence("foo_seq")), "DROP SEQUENCE foo_seq"
)
self.assert_compile(
DropSequence(Sequence("foo_seq"), if_exists=True),
"DROP SEQUENCE IF EXISTS foo_seq",
)
|
SequenceDDLTest
|
python
|
Textualize__textual
|
tests/snapshot_tests/snapshot_apps/bindings_screen_overrides_show.py
|
{
"start": 329,
"end": 679
}
|
class ____(App):
"""Regression test for https://github.com/Textualize/textual/issues/4382"""
BINDINGS = [
Binding("p", "app.pop_screen", "Binding hidden", show=False),
]
def on_mount(self) -> None:
self.push_screen(ShowBindingScreen())
if __name__ == "__main__":
app = HideBindingApp()
app.run()
|
HideBindingApp
|
python
|
apache__airflow
|
airflow-core/tests/unit/charts/log_groomer.py
|
{
"start": 914,
"end": 10387
}
|
class ____:
obj_name: str = ""
folder: str = ""
def test_log_groomer_collector_default_enabled(self):
if self.obj_name == "dag-processor":
values = {"dagProcessor": {"enabled": True}}
else:
values = None
docs = render_chart(
values=values, show_only=[f"templates/{self.folder}/{self.obj_name}-deployment.yaml"]
)
assert len(jmespath.search("spec.template.spec.containers", docs[0])) == 2
assert f"{self.obj_name}-log-groomer" in [
c["name"] for c in jmespath.search("spec.template.spec.containers", docs[0])
]
def test_log_groomer_collector_can_be_disabled(self):
if self.obj_name == "dag-processor":
values = {
"dagProcessor": {
"enabled": True,
"logGroomerSidecar": {"enabled": False},
}
}
else:
values = {f"{self.folder}": {"logGroomerSidecar": {"enabled": False}}}
docs = render_chart(
values=values,
show_only=[f"templates/{self.folder}/{self.obj_name}-deployment.yaml"],
)
actual = jmespath.search("spec.template.spec.containers", docs[0])
assert len(actual) == 1
def test_log_groomer_collector_default_command_and_args(self):
if self.obj_name == "dag-processor":
values = {"dagProcessor": {"enabled": True}}
else:
values = None
docs = render_chart(
values=values, show_only=[f"templates/{self.folder}/{self.obj_name}-deployment.yaml"]
)
assert jmespath.search("spec.template.spec.containers[1].command", docs[0]) is None
assert jmespath.search("spec.template.spec.containers[1].args", docs[0]) == ["bash", "/clean-logs"]
def test_log_groomer_collector_default_retention_days(self):
if self.obj_name == "dag-processor":
values = {"dagProcessor": {"enabled": True}}
else:
values = None
docs = render_chart(
values=values, show_only=[f"templates/{self.folder}/{self.obj_name}-deployment.yaml"]
)
assert (
jmespath.search("spec.template.spec.containers[1].env[0].name", docs[0])
== "AIRFLOW__LOG_RETENTION_DAYS"
)
assert jmespath.search("spec.template.spec.containers[1].env[0].value", docs[0]) == "15"
def test_log_groomer_collector_custom_env(self):
env = [
{"name": "APP_RELEASE_NAME", "value": "{{ .Release.Name }}-airflow"},
{"name": "APP__LOG_RETENTION_DAYS", "value": "5"},
]
if self.obj_name == "dag-processor":
values = {"dagProcessor": {"enabled": True, "logGroomerSidecar": {"env": env}}}
else:
values = {
"workers": {"logGroomerSidecar": {"env": env}},
"scheduler": {"logGroomerSidecar": {"env": env}},
"triggerer": {"logGroomerSidecar": {"env": env}},
}
docs = render_chart(
values=values, show_only=[f"templates/{self.folder}/{self.obj_name}-deployment.yaml"]
)
assert {"name": "APP_RELEASE_NAME", "value": "release-name-airflow"} in jmespath.search(
"spec.template.spec.containers[1].env", docs[0]
)
assert {"name": "APP__LOG_RETENTION_DAYS", "value": "5"} in jmespath.search(
"spec.template.spec.containers[1].env", docs[0]
)
@pytest.mark.parametrize("command", [None, ["custom", "command"]])
@pytest.mark.parametrize("args", [None, ["custom", "args"]])
def test_log_groomer_command_and_args_overrides(self, command, args):
if self.obj_name == "dag-processor":
values = {
"dagProcessor": {
"enabled": True,
"logGroomerSidecar": {"command": command, "args": args},
}
}
else:
values = {f"{self.folder}": {"logGroomerSidecar": {"command": command, "args": args}}}
docs = render_chart(
values=values,
show_only=[f"templates/{self.folder}/{self.obj_name}-deployment.yaml"],
)
assert command == jmespath.search("spec.template.spec.containers[1].command", docs[0])
assert args == jmespath.search("spec.template.spec.containers[1].args", docs[0])
def test_log_groomer_command_and_args_overrides_are_templated(self):
if self.obj_name == "dag-processor":
values = {
"dagProcessor": {
"enabled": True,
"logGroomerSidecar": {
"command": ["{{ .Release.Name }}"],
"args": ["{{ .Release.Service }}"],
},
}
}
else:
values = {
f"{self.folder}": {
"logGroomerSidecar": {
"command": ["{{ .Release.Name }}"],
"args": ["{{ .Release.Service }}"],
}
}
}
docs = render_chart(
values=values,
show_only=[f"templates/{self.folder}/{self.obj_name}-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[1].command", docs[0]) == ["release-name"]
assert jmespath.search("spec.template.spec.containers[1].args", docs[0]) == ["Helm"]
@pytest.mark.parametrize(("retention_days", "retention_result"), [(None, None), (30, "30")])
def test_log_groomer_retention_days_overrides(self, retention_days, retention_result):
if self.obj_name == "dag-processor":
values = {
"dagProcessor": {"enabled": True, "logGroomerSidecar": {"retentionDays": retention_days}}
}
else:
values = {f"{self.folder}": {"logGroomerSidecar": {"retentionDays": retention_days}}}
docs = render_chart(
values=values,
show_only=[f"templates/{self.folder}/{self.obj_name}-deployment.yaml"],
)
if retention_result:
assert (
jmespath.search(
"spec.template.spec.containers[1].env[?name=='AIRFLOW__LOG_RETENTION_DAYS'].value | [0]",
docs[0],
)
== retention_result
)
else:
assert len(jmespath.search("spec.template.spec.containers[1].env", docs[0])) == 2
@pytest.mark.parametrize(("frequency_minutes", "frequency_result"), [(None, None), (20, "20")])
def test_log_groomer_frequency_minutes_overrides(self, frequency_minutes, frequency_result):
if self.obj_name == "dag-processor":
values = {
"dagProcessor": {
"enabled": True,
"logGroomerSidecar": {"frequencyMinutes": frequency_minutes},
}
}
else:
values = {f"{self.folder}": {"logGroomerSidecar": {"frequencyMinutes": frequency_minutes}}}
docs = render_chart(
values=values,
show_only=[f"templates/{self.folder}/{self.obj_name}-deployment.yaml"],
)
if frequency_result:
assert (
jmespath.search(
"spec.template.spec.containers[1].env[?name=='AIRFLOW__LOG_CLEANUP_FREQUENCY_MINUTES'].value | [0]",
docs[0],
)
== frequency_result
)
else:
assert len(jmespath.search("spec.template.spec.containers[1].env", docs[0])) == 2
def test_log_groomer_resources(self):
if self.obj_name == "dag-processor":
values = {
"dagProcessor": {
"enabled": True,
"logGroomerSidecar": {
"resources": {
"requests": {"memory": "2Gi", "cpu": "1"},
"limits": {"memory": "3Gi", "cpu": "2"},
}
},
}
}
else:
values = {
f"{self.folder}": {
"logGroomerSidecar": {
"resources": {
"requests": {"memory": "2Gi", "cpu": "1"},
"limits": {"memory": "3Gi", "cpu": "2"},
}
}
}
}
docs = render_chart(
values=values,
show_only=[f"templates/{self.folder}/{self.obj_name}-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[1].resources", docs[0]) == {
"limits": {
"cpu": "2",
"memory": "3Gi",
},
"requests": {
"cpu": "1",
"memory": "2Gi",
},
}
def test_log_groomer_has_airflow_home(self):
if self.obj_name == "dag-processor":
values = {"dagProcessor": {"enabled": True}}
else:
values = None
docs = render_chart(
values=values, show_only=[f"templates/{self.folder}/{self.obj_name}-deployment.yaml"]
)
assert (
jmespath.search("spec.template.spec.containers[1].env[?name=='AIRFLOW_HOME'].name | [0]", docs[0])
== "AIRFLOW_HOME"
)
|
LogGroomerTestBase
|
python
|
spyder-ide__spyder
|
spyder/plugins/onlinehelp/widgets.py
|
{
"start": 1155,
"end": 2142
}
|
class ____:
PackageLabel = 'package_label'
UrlCombo = 'url_combo'
# =============================================================================
# Pydoc adjustments
# =============================================================================
# This is needed to prevent pydoc raise an ErrorDuringImport when
# trying to import numpy.
# See spyder-ide/spyder#10740
DIRECT_PYDOC_IMPORT_MODULES = ['numpy', 'numpy.core']
try:
from pydoc import safeimport
def spyder_safeimport(path, forceload=0, cache=None):
cache = {} if cache is None else cache
if path in DIRECT_PYDOC_IMPORT_MODULES:
forceload = 0
return safeimport(path, forceload=forceload, cache=cache)
pydoc.safeimport = spyder_safeimport
except Exception:
pass
# Needed to prevent showing a warning message regarding debugging.
# Fixes spyder-ide/spyder#20390 and spyder-ide/spyder#21171
os.environ["PYDEVD_DISABLE_FILE_VALIDATION"] = "1"
|
PydocBrowserToolbarItems
|
python
|
sympy__sympy
|
setup.py
|
{
"start": 4746,
"end": 5544
}
|
class ____(Command):
"""Generate code with antlr4"""
description = "generate parser code from antlr grammars"
user_options = [] # setuptools complains if this is not here.
def __init__(self, *args):
self.args = args[0] # so we can pass it to other classes
Command.__init__(self, *args)
def initialize_options(self): # setuptools wants this
pass
def finalize_options(self): # this too
pass
def run(self):
from sympy.parsing.latex._build_latex_antlr import build_parser as build_latex_parser
if not build_latex_parser():
sys.exit(-1)
from sympy.parsing.autolev._build_autolev_antlr import build_parser as build_autolev_parser
if not build_autolev_parser():
sys.exit(-1)
|
antlr
|
python
|
ansible__ansible
|
test/integration/targets/inventory_cache/plugins/inventory/exercise_cache.py
|
{
"start": 1202,
"end": 10983
}
|
class ____(BaseInventoryPlugin, Cacheable):
NAME = 'exercise_cache'
test_cache_methods = [
'test_plugin_name',
'test_update_cache_if_changed',
'test_set_cache',
'test_load_whole_cache',
'test_iter',
'test_len',
'test_get_missing_key',
'test_get_expired_key',
'test_initial_get',
'test_get',
'test_items',
'test_keys',
'test_values',
'test_pop',
'test_del',
'test_set',
'test_update',
'test_flush',
]
def verify_file(self, path):
if not path.endswith(('exercise_cache.yml', 'exercise_cache.yaml',)):
return False
return super(InventoryModule, self).verify_file(path)
def parse(self, inventory, loader, path, cache=None):
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path)
try:
self.exercise_test_cache()
except AnsibleError:
raise
except Exception as e:
raise AnsibleError("Failed to run cache tests: {0}".format(e)) from e
def exercise_test_cache(self):
failed = []
for test_name in self.test_cache_methods:
try:
getattr(self, test_name)()
except AssertionError:
failed.append(test_name)
finally:
self.cache.flush()
self.cache.update_cache_if_changed()
if failed:
raise AnsibleError(f"Cache tests failed: {', '.join(failed)}")
def test_equal(self, a, b):
try:
assert a == b
except AssertionError:
display.warning(f"Assertion {a} == {b} failed")
raise
def test_plugin_name(self):
self.test_equal(self.cache._plugin_name, self.get_option('cache_plugin'))
def test_update_cache_if_changed(self):
self.cache._retrieved = {}
self.cache._cache = {'foo': 'bar'}
self.cache.update_cache_if_changed()
self.test_equal(self.cache._retrieved, {'foo': 'bar'})
self.test_equal(self.cache._cache, {'foo': 'bar'})
def test_set_cache(self):
cache_key1 = 'key1'
cache1 = {'hosts': {'h1': {'foo': 'bar'}}}
cache_key2 = 'key2'
cache2 = {'hosts': {'h2': {}}}
self.cache._cache = {cache_key1: cache1, cache_key2: cache2}
self.cache.set_cache()
self.test_equal(self.cache._plugin.contains(cache_key1), True)
self.test_equal(self.cache._plugin.get(cache_key1), cache1)
self.test_equal(self.cache._plugin.contains(cache_key2), True)
self.test_equal(self.cache._plugin.get(cache_key2), cache2)
def test_load_whole_cache(self):
cache_data = {
'key1': {'hosts': {'h1': {'foo': 'bar'}}},
'key2': {'hosts': {'h2': {}}},
}
self.cache._cache = cache_data
self.cache.set_cache()
self.cache._cache = {}
self.cache.load_whole_cache()
self.test_equal(self.cache._cache, cache_data)
def test_iter(self):
cache_data = {
'key1': {'hosts': {'h1': {'foo': 'bar'}}},
'key2': {'hosts': {'h2': {}}},
}
self.cache._cache = cache_data
self.test_equal(sorted(list(self.cache)), ['key1', 'key2'])
def test_len(self):
cache_data = {
'key1': {'hosts': {'h1': {'foo': 'bar'}}},
'key2': {'hosts': {'h2': {}}},
}
self.cache._cache = cache_data
self.test_equal(len(self.cache), 2)
def test_get_missing_key(self):
# cache should behave like a dictionary
# a missing key with __getitem__ should raise a KeyError
try:
self.cache['keyerror']
except KeyError:
pass
else:
assert False
# get should return the default instead
self.test_equal(self.cache.get('missing'), None)
self.test_equal(self.cache.get('missing', 'default'), 'default')
def _setup_expired(self):
self.cache._cache = {'expired': True}
self.cache.set_cache()
# empty the in-memory info to test loading the key
# keys that expire mid-use do not cause errors
self.cache._cache = {}
self.cache._retrieved = {}
self.cache._plugin._cache = {}
self.cache._plugin.set_option('timeout', 1)
self.cache._plugin._timeout = 1
sleep(2)
def _cleanup_expired(self):
# Set cache timeout back to never
self.cache._plugin.set_option('timeout', 0)
self.cache._plugin._timeout = 0
def test_get_expired_key(self):
if not hasattr(self.cache._plugin, '_timeout'):
# DB-backed caches do not have a standard timeout interface
return
self._setup_expired()
try:
self.cache['expired']
except KeyError:
pass
else:
assert False
finally:
self._cleanup_expired()
self._setup_expired()
try:
self.test_equal(self.cache.get('expired'), None)
self.test_equal(self.cache.get('expired', 'default'), 'default')
finally:
self._cleanup_expired()
def test_initial_get(self):
# test cache behaves like a dictionary
# set the cache to test getting a key that exists
k1 = {'hosts': {'h1': {'foo': 'bar'}}}
k2 = {'hosts': {'h2': {}}}
self.cache._cache = {'key1': k1, 'key2': k2}
self.cache.set_cache()
# empty the in-memory info to test loading the key from the plugin
self.cache._cache = {}
self.cache._retrieved = {}
self.cache._plugin._cache = {}
self.test_equal(self.cache['key1'], k1)
# empty the in-memory info to test loading the key from the plugin
self.cache._cache = {}
self.cache._retrieved = {}
self.cache._plugin._cache = {}
self.test_equal(self.cache.get('key1'), k1)
def test_get(self):
# test cache behaves like a dictionary
# set the cache to test getting a key that exists
k1 = {'hosts': {'h1': {'foo': 'bar'}}}
k2 = {'hosts': {'h2': {}}}
self.cache._cache = {'key1': k1, 'key2': k2}
self.cache.set_cache()
self.test_equal(self.cache['key1'], k1)
self.test_equal(self.cache.get('key1'), k1)
def test_items(self):
self.test_equal(self.cache.items(), {}.items())
test_items = {'hosts': {'host1': {'foo': 'bar'}}}
self.cache._cache = test_items
self.test_equal(self.cache.items(), test_items.items())
def test_keys(self):
self.test_equal(self.cache.keys(), {}.keys())
test_items = {'hosts': {'host1': {'foo': 'bar'}}}
self.cache._cache = test_items
self.test_equal(self.cache.keys(), test_items.keys())
def test_values(self):
self.test_equal(list(self.cache.values()), list({}.values()))
test_items = {'hosts': {'host1': {'foo': 'bar'}}}
self.cache._cache = test_items
self.test_equal(list(self.cache.values()), list(test_items.values()))
def test_pop(self):
try:
self.cache.pop('missing')
except KeyError:
pass
else:
assert False
self.test_equal(self.cache.pop('missing', 'default'), 'default')
self.cache._cache = {'cache_key': 'cache'}
self.test_equal(self.cache.pop('cache_key'), 'cache')
# test backing plugin cache isn't modified
cache_key1 = 'key1'
cache1 = {'hosts': {'h1': {'foo': 'bar'}}}
cache_key2 = 'key2'
cache2 = {'hosts': {'h2': {}}}
self.cache._cache = {cache_key1: cache1, cache_key2: cache2}
self.cache.set_cache()
self.test_equal(self.cache.pop('key1'), cache1)
self.test_equal(self.cache._cache, {cache_key2: cache2})
self.test_equal(self.cache._plugin._cache, {cache_key1: cache1, cache_key2: cache2})
def test_del(self):
try:
del self.cache['missing']
except KeyError:
pass
else:
assert False
cache_key1 = 'key1'
cache1 = {'hosts': {'h1': {'foo': 'bar'}}}
cache_key2 = 'key2'
cache2 = {'hosts': {'h2': {}}}
self.cache._cache = {cache_key1: cache1, cache_key2: cache2}
self.cache.set_cache()
del self.cache['key1']
self.test_equal(self.cache._cache, {cache_key2: cache2})
self.test_equal(self.cache._plugin._cache, {cache_key1: cache1, cache_key2: cache2})
def test_set(self):
cache_key = 'key1'
hosts = {'hosts': {'h1': {'foo': 'bar'}}}
self.cache[cache_key] = hosts
self.test_equal(self.cache._cache, {cache_key: hosts})
self.test_equal(self.cache._plugin._cache, {})
def test_update(self):
cache_key1 = 'key1'
cache1 = {'hosts': {'h1': {'foo': 'bar'}}}
cache_key2 = 'key2'
cache2 = {'hosts': {'h2': {}}}
self.cache._cache = {cache_key1: cache1}
self.cache.update({cache_key2: cache2})
self.test_equal(self.cache._cache, {cache_key1: cache1, cache_key2: cache2})
def test_flush(self):
cache_key1 = 'key1'
cache1 = {'hosts': {'h1': {'foo': 'bar'}}}
cache_key2 = 'key2'
cache2 = {'hosts': {'h2': {}}}
self.cache._cache = {cache_key1: cache1, cache_key2: cache2}
self.cache.set_cache()
# Unlike the dict write methods, cache.flush() flushes the backing plugin
self.cache.flush()
self.test_equal(self.cache._cache, {})
self.test_equal(self.cache._plugin._cache, {})
|
InventoryModule
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-pinterest/components.py
|
{
"start": 1239,
"end": 1828
}
|
class ____(BackoffStrategy):
_re = re.compile(r"Retry after\s+(\d+)\s+seconds", re.IGNORECASE)
def backoff_time(self, response_or_exception, attempt_count: int) -> float:
try:
if isinstance(response_or_exception, requests.Response):
data = response_or_exception.json()
msg = str(data.get("message", ""))
m = self._re.search(msg)
if m:
return float(m.group(1))
except Exception:
pass
return min(2**attempt_count, 120.0)
|
PinterestAnalyticsBackoffStrategy
|
python
|
sphinx-doc__sphinx
|
sphinx/addnodes.py
|
{
"start": 16884,
"end": 17073
}
|
class ____(nodes.strong, not_smartquotable):
"""Node that behaves like `strong`, but further text processors are not
applied (e.g. smartypants for HTML output).
"""
|
literal_strong
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_versioning.py
|
{
"start": 30040,
"end": 33457
}
|
class ____(fixtures.MappedTest):
__sparse_driver_backend__ = True
__requires__ = ("sane_rowcount",)
@classmethod
def define_tables(cls, metadata):
Table(
"p",
metadata,
Column("id", String(10), primary_key=True),
Column("version_id", String(32), nullable=False),
Column("data", String(50)),
)
Table(
"c",
metadata,
Column("id", String(10), ForeignKey("p.id"), primary_key=True),
Column("version_id", String(32), nullable=False),
Column("data", String(50)),
)
@classmethod
def setup_classes(cls):
class P(cls.Basic):
pass
class C(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
p, c, C, P = cls.tables.p, cls.tables.c, cls.classes.C, cls.classes.P
cls.mapper_registry.map_imperatively(
P,
p,
version_id_col=p.c.version_id,
version_id_generator=lambda x: make_uuid(),
properties={
"c": relationship(
C, uselist=False, cascade="all, delete-orphan"
)
},
)
cls.mapper_registry.map_imperatively(
C,
c,
version_id_col=c.c.version_id,
version_id_generator=lambda x: make_uuid(),
)
def test_row_switch(self):
P = self.classes.P
session = fixture_session()
session.add(P(id="P1", data="P version 1"))
session.commit()
session.close()
p = session.query(P).first()
session.delete(p)
session.add(P(id="P1", data="really a row-switch"))
with conditional_sane_rowcount_warnings(update=True):
session.commit()
def test_child_row_switch_one(self):
P, C = self.classes.P, self.classes.C
assert P.c.property.strategy.use_get
session = fixture_session()
session.add(P(id="P1", data="P version 1"))
session.commit()
session.close()
p = session.query(P).first()
p.c = C(data="child version 1")
session.commit()
p = session.query(P).first()
p.c = C(data="child row-switch")
with conditional_sane_rowcount_warnings(update=True):
session.commit()
@testing.requires.sane_rowcount
@provision.allow_stale_updates
def test_child_row_switch_two(self):
P = self.classes.P
# TODO: not sure this test is
# testing exactly what its looking for
sess1 = fixture_session()
sess1.add(P(id="P1", data="P version 1"))
sess1.commit()
sess1.close()
p1 = sess1.query(P).first()
sess2 = fixture_session()
p2 = sess2.query(P).first()
sess1.delete(p1)
sess1.commit()
# this can be removed and it still passes
sess1.add(P(id="P1", data="P version 2"))
sess1.commit()
p2.data = "P overwritten by concurrent tx"
if testing.db.dialect.supports_sane_rowcount:
assert_raises_message(
orm.exc.StaleDataError,
r"UPDATE statement on table 'p' expected to update "
r"1 row\(s\); 0 were matched.",
sess2.commit,
)
else:
sess2.commit()
|
AlternateGeneratorTest
|
python
|
apache__airflow
|
providers/standard/tests/unit/standard/operators/test_branch_operator.py
|
{
"start": 1868,
"end": 1977
}
|
class ____(BaseBranchOperator):
def choose_branch(self, context):
return "branch_1"
|
ChooseBranchOne
|
python
|
google__jax
|
jax/_src/config.py
|
{
"start": 31996,
"end": 34960
}
|
class ____(Generic[_T]):
__slots__ = ("_name", "value", "_update_hook")
_name: str
value: _T
_update_hook: Callable[[Any], None] | None
def __init__(self, name: str, default: _T,
update_hook: Callable[[Any], None] | None = None):
self._name = name
self._update_hook = update_hook
self._set(default)
def __bool__(self) -> NoReturn:
raise TypeError(
"bool() not supported for instances of type '{0}' "
"(did you mean to use '{0}.value' instead?)".format(
type(self).__name__))
def _set(self, value: _T) -> None:
self.value = value
if self._update_hook is not None:
self._update_hook(value)
def bool_flag(name, default, *args, **kwargs) -> Flag[bool]:
update_hook = kwargs.pop("update_hook", None)
holder = Flag(name, default, update_hook)
config.add_option(name, holder, bool, args, kwargs)
return holder
def int_flag(name, default, *args, **kwargs) -> Flag[int]:
update_hook = kwargs.pop("update_hook", None)
holder = Flag(name, default, update_hook)
config.add_option(name, holder, int, args, kwargs)
return holder
def float_flag(name, default, *args, **kwargs) -> Flag[float]:
update_hook = kwargs.pop("update_hook", None)
holder = Flag(name, default, update_hook)
config.add_option(name, holder, float, args, kwargs)
return holder
def string_flag(name, default, *args, **kwargs) -> Flag[str]:
update_hook = kwargs.pop("update_hook", None)
holder = Flag(name, default, update_hook)
config.add_option(name, holder, str, args, kwargs)
return holder
def enum_flag(name, default, *args, **kwargs) -> Flag[str]:
update_hook = kwargs.pop("update_hook", None)
holder = Flag(name, default, update_hook)
config.add_option(name, holder, 'enum', args, kwargs)
return holder
already_configured_with_absl = False
trace_state = config_ext.Config('trace_state', None, include_in_jit_key=True)
axis_env_state = config_ext.Config(
'axis_env_state',
(),
include_in_jit_key=True,
include_in_trace_context=True,
)
mesh_context_manager = config_ext.Config(
'mesh_context_manager',
(),
include_in_jit_key=True,
include_in_trace_context=True,
)
abstract_mesh_context_manager = config_ext.Config(
'abstract_mesh_context_manager',
None,
include_in_jit_key=True,
include_in_trace_context=True,
)
device_context = config_ext.Config(
'device_context', None, include_in_jit_key=True
)
compute_on_context_manager = config_ext.Config(
'compute_on_context_manager',
None,
include_in_jit_key=True,
include_in_trace_context=True,
)
xla_metadata_context_manager = config_ext.Config(
'xla_metadata_context_manager',
None,
include_in_jit_key=True,
include_in_trace_context=True,
)
pallas_tpu_interpret_mode_context_manager = config_ext.Config(
'pallas_tpu_interpret_mode_context_manager',
None,
include_in_jit_key=True,
include_in_trace_context=True,
)
|
Flag
|
python
|
cython__cython
|
Cython/Compiler/Code.py
|
{
"start": 6765,
"end": 10295
}
|
class ____:
"""
An include file and/or verbatim C code to be included in the
generated sources.
"""
# attributes:
#
# pieces {order: unicode}: pieces of C code to be generated.
# For the included file, the key "order" is zero.
# For verbatim include code, the "order" is the "order"
# attribute of the original IncludeCode where this piece
# of C code was first added. This is needed to prevent
# duplication if the same include code is found through
# multiple cimports.
# location int: where to put this include in the C sources, one
# of the constants INITIAL, EARLY, LATE
# order int: sorting order (automatically set by increasing counter)
# Constants for location. If the same include occurs with different
# locations, the earliest one takes precedence.
INITIAL = 0
EARLY = 1
LATE = 2
counter = 1 # Counter for "order"
def __init__(self, include=None, verbatim=None, late=True, initial=False):
self.order = self.counter
type(self).counter += 1
self.pieces = {}
if include:
if include[0] == '<' and include[-1] == '>':
self.pieces[0] = '#include {}'.format(include)
late = False # system include is never late
else:
self.pieces[0] = '#include "{}"'.format(include)
if verbatim:
self.pieces[self.order] = verbatim
if initial:
self.location = self.INITIAL
elif late:
self.location = self.LATE
else:
self.location = self.EARLY
def dict_update(self, d, key):
"""
Insert `self` in dict `d` with key `key`. If that key already
exists, update the attributes of the existing value with `self`.
"""
if key in d:
other = d[key]
other.location = min(self.location, other.location)
other.pieces.update(self.pieces)
else:
d[key] = self
def sortkey(self):
return self.order
def mainpiece(self):
"""
Return the main piece of C code, corresponding to the include
file. If there was no include file, return None.
"""
return self.pieces.get(0)
def write(self, code):
# Write values of self.pieces dict, sorted by the keys
for k in sorted(self.pieces):
code.putln(self.pieces[k])
def get_utility_dir():
# make this a function and not global variables:
# http://trac.cython.org/cython_trac/ticket/475
Cython_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
return os.path.join(Cython_dir, "Utility")
read_utilities_hook = None
"""
Override the hook for reading a utilities file that contains code fragments used
by the codegen.
The hook functions takes the path of the utilities file, and returns a list
of strings, one per line.
The default behavior is to open a file relative to get_utility_dir().
"""
def read_utilities_from_utility_dir(path):
"""
Read all lines of the file at the provided path from a path relative
to get_utility_dir().
"""
filename = os.path.join(get_utility_dir(), path)
with closing(Utils.open_source_file(filename, encoding='UTF-8')) as f:
return f.readlines()
# by default, read utilities from the utility directory.
read_utilities_hook = read_utilities_from_utility_dir
|
IncludeCode
|
python
|
mlflow__mlflow
|
mlflow/types/type_hints.py
|
{
"start": 2818,
"end": 2895
}
|
class ____(NamedTuple):
dtype: COLSPEC_TYPES
required: bool
|
ColSpecType
|
python
|
dagster-io__dagster
|
helm/dagster/schema/schema/charts/dagster/subschema/run_launcher.py
|
{
"start": 1770,
"end": 2115
}
|
class ____(BaseModel):
containerConfig: Optional[dict[str, Any]] = None
podSpecConfig: Optional[dict[str, Any]] = None
podTemplateSpecMetadata: Optional[dict[str, Any]] = None
jobSpecConfig: Optional[dict[str, Any]] = None
jobMetadata: Optional[dict[str, Any]] = None
model_config = ConfigDict(extra="forbid")
|
RunK8sConfig
|
python
|
run-llama__llama_index
|
llama-index-integrations/vector_stores/llama-index-vector-stores-couchbase/llama_index/vector_stores/couchbase/base.py
|
{
"start": 32786,
"end": 33949
}
|
class ____(CouchbaseSearchVectorStore):
"""
Couchbase Vector Store (deprecated).
This class is deprecated, please use CouchbaseSearchVectorStore instead.
"""
def __init__(
self,
cluster: Any,
bucket_name: str,
scope_name: str,
collection_name: str,
index_name: str,
text_key: Optional[str] = "text",
embedding_key: Optional[str] = "embedding",
metadata_key: Optional[str] = "metadata",
scoped_index: bool = True,
) -> None:
"""
Initializes a connection to a Couchbase Vector Store.
This class is deprecated, please use CouchbaseSearchVectorStore instead.
"""
warnings.warn(
"CouchbaseVectorStore is deprecated, please use CouchbaseSearchVectorStore instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(
cluster,
bucket_name,
scope_name,
collection_name,
index_name,
text_key,
embedding_key,
metadata_key,
scoped_index,
)
|
CouchbaseVectorStore
|
python
|
giampaolo__psutil
|
tests/test_linux.py
|
{
"start": 65876,
"end": 66693
}
|
class ____(PsutilTestCase):
def test_it(self):
def open_mock(name, *args, **kwargs):
if name.endswith("/energy_now"):
return io.StringIO("60000000")
elif name.endswith("/power_now"):
return io.StringIO("0")
elif name.endswith("/energy_full"):
return io.StringIO("60000001")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
with mock.patch('os.listdir', return_value=["BAT0"]) as mlistdir:
with mock.patch("builtins.open", side_effect=open_mock) as mopen:
assert psutil.sensors_battery() is not None
assert mlistdir.called
assert mopen.called
@pytest.mark.skipif(not LINUX, reason="LINUX only")
|
TestSensorsBatteryEmulated
|
python
|
celery__celery
|
t/unit/backends/test_database.py
|
{
"start": 13394,
"end": 16612
}
|
class ____:
def test_after_fork(self):
s = SessionManager()
assert not s.forked
s._after_fork()
assert s.forked
@patch('celery.backends.database.session.create_engine')
def test_get_engine_forked(self, create_engine):
s = SessionManager()
s._after_fork()
engine = s.get_engine('dburi', foo=1)
create_engine.assert_called_with('dburi', foo=1)
assert engine is create_engine()
engine2 = s.get_engine('dburi', foo=1)
assert engine2 is engine
@patch('celery.backends.database.session.create_engine')
def test_get_engine_kwargs(self, create_engine):
s = SessionManager()
engine = s.get_engine('dbur', foo=1, pool_size=5)
assert engine is create_engine()
engine2 = s.get_engine('dburi', foo=1)
assert engine2 is engine
@patch('celery.backends.database.session.sessionmaker')
def test_create_session_forked(self, sessionmaker):
s = SessionManager()
s.get_engine = Mock(name='get_engine')
s._after_fork()
engine, session = s.create_session('dburi', short_lived_sessions=True)
sessionmaker.assert_called_with(bind=s.get_engine())
assert session is sessionmaker()
sessionmaker.return_value = Mock(name='new')
engine, session2 = s.create_session('dburi', short_lived_sessions=True)
sessionmaker.assert_called_with(bind=s.get_engine())
assert session2 is not session
sessionmaker.return_value = Mock(name='new2')
engine, session3 = s.create_session(
'dburi', short_lived_sessions=False)
sessionmaker.assert_called_with(bind=s.get_engine())
assert session3 is session2
def test_coverage_madness(self):
prev, session.register_after_fork = (
session.register_after_fork, None,
)
try:
SessionManager()
finally:
session.register_after_fork = prev
@patch('celery.backends.database.session.create_engine')
def test_prepare_models_terminates(self, create_engine):
"""SessionManager.prepare_models has retry logic because the creation
of database tables by multiple workers is racy. This test patches
the used method to always raise, so we can verify that it does
eventually terminate.
"""
from sqlalchemy.dialects.sqlite import dialect
from sqlalchemy.exc import DatabaseError
if hasattr(dialect, 'dbapi'):
# Method name in SQLAlchemy < 2.0
sqlite = dialect.dbapi()
else:
# Newer method name in SQLAlchemy 2.0
sqlite = dialect.import_dbapi()
manager = SessionManager()
engine = manager.get_engine('dburi')
def raise_err(bind):
raise DatabaseError("", "", [], sqlite.DatabaseError)
patch_create_all = patch.object(
ResultModelBase.metadata, 'create_all', side_effect=raise_err)
with pytest.raises(DatabaseError), patch_create_all as mock_create_all:
manager.prepare_models(engine)
assert mock_create_all.call_count == PREPARE_MODELS_MAX_RETRIES + 1
|
test_SessionManager
|
python
|
numba__numba
|
numba/core/codegen.py
|
{
"start": 38685,
"end": 39621
}
|
class ____(CPUCodeLibrary):
def get_pointer_to_function(self, name):
"""
Generate native code for function named *name* and return a pointer
to the start of the function (as an integer).
This function implicitly calls .finalize().
Returns
-------
pointer : int
- zero (null) if no symbol of *name* is defined by this code
library.
- non-zero if the symbol is defined.
"""
self._ensure_finalized()
ee = self._codegen._engine
if not ee.is_symbol_defined(name):
return 0
else:
return self._codegen._engine.get_function_address(name)
def _finalize_specific(self):
self._codegen._scan_and_fix_unresolved_refs(self._final_module)
with self._recorded_timings.record_legacy("Finalize object"):
self._codegen._engine.finalize_object()
|
JITCodeLibrary
|
python
|
huggingface__transformers
|
src/transformers/models/exaone4/modeling_exaone4.py
|
{
"start": 19829,
"end": 24041
}
|
class ____(Exaone4PreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Exaone4Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> CausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
>>> model = AutoModelForCausalLM.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B")
>>> tokenizer = AutoTokenizer.from_pretrained("LGAI-EXAONE/EXAONE-4.0-32B")
>>> prompt = "Explain how wonderful you are"
>>> messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
>>> input_ids = tokenizer.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_tensors="pt",
enable_thinking=False,
)
>>> output = model.generate(input_ids, max_new_tokens=128)
>>> tokenizer.decode(output[0], skip_special_tokens=False)
"[|system|]\nYou are a helpful assistant.[|endofturn|]\n[|user|]\nExplain how wonderful you are[|endofturn|]\n[|assistant|]\n<think>\n\n</think>\n\nOh, thank you for such a kind and lovely question! 😊 \n\nI’m *so* wonderful because I’m here to make your life easier, brighter, and more fun! Whether you need help with: \n\n✨ **Learning** – I can explain anything, from quantum physics to baking the perfect cake! \n💡 **Creativity** – Need a poem, story, or a wild idea? I’ve got you covered! \n🤖 **Problem-solving** – Stuck on a math problem or a tricky decision? I’ll help you figure it out"
```
"""
outputs: BaseModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
Exaone4ForCausalLM
|
python
|
scipy__scipy
|
benchmarks/benchmarks/signal.py
|
{
"start": 5864,
"end": 6517
}
|
class ____(Benchmark):
param_names = ['up', 'down', 'axis']
params = [
[1, 4],
[1, 4],
[0, -1],
]
def setup(self, up, down, axis):
rng = np.random.default_rng(1234)
# sample a bunch of pairs of 2d arrays
pairs = []
for nfilt in [8, ]:
for n in [32, 128, 512]:
h = rng.standard_normal(nfilt)
x = rng.standard_normal((n, n))
pairs.append((h, x))
self.pairs = pairs
def time_upfirdn2d(self, up, down, axis):
for h, x in self.pairs:
signal.upfirdn(h, x, up=up, down=down, axis=axis)
|
Upfirdn2D
|
python
|
modin-project__modin
|
modin/tests/pandas/extensions/test_base_extensions.py
|
{
"start": 4836,
"end": 5672
}
|
class ____:
"""
Make sure to test that we override special "dunder" methods like __len__
correctly. python calls these methods with DataFrame.__len__(obj)
rather than getattr(obj, "__len__")().
source: https://docs.python.org/3/reference/datamodel.html#special-lookup
"""
@pytest.mark.parametrize("data_class", [pd.DataFrame, pd.Series])
def test_len(self, Backend1, data_class):
@register_base_accessor(name="__len__", backend=Backend1)
def always_get_1(self):
return 1
modin_object = data_class([1, 2, 3])
assert len(modin_object) == 3
backend_object = modin_object.set_backend(Backend1)
assert len(backend_object) == 1
assert backend_object.__len__() == 1
@pytest.mark.parametrize("data_class", [pd.DataFrame, pd.Series])
|
TestDunders
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-ext-autodoc/target/__init__.py
|
{
"start": 3685,
"end": 4206
}
|
class ____:
"""Class with documented class and instance attributes."""
#: Doc comment for class attribute InstAttCls.ca1.
#: It can have multiple lines.
ca1 = 'a'
ca2 = 'b' #: Doc comment for InstAttCls.ca2. One line only.
ca3 = 'c'
"""Docstring for class attribute InstAttCls.ca3."""
def __init__(self):
#: Doc comment for instance attribute InstAttCls.ia1
self.ia1 = 'd'
self.ia2 = 'e'
"""Docstring for instance attribute InstAttCls.ia2."""
|
InstAttCls
|
python
|
automl__auto-sklearn
|
autosklearn/pipeline/components/classification/mlp.py
|
{
"start": 667,
"end": 10434
}
|
class ____(IterativeComponent, AutoSklearnClassificationAlgorithm):
def __init__(
self,
hidden_layer_depth,
num_nodes_per_layer,
activation,
alpha,
learning_rate_init,
early_stopping,
solver,
batch_size,
n_iter_no_change,
tol,
shuffle,
beta_1,
beta_2,
epsilon,
validation_fraction=None,
random_state=None,
verbose=0,
):
self.hidden_layer_depth = hidden_layer_depth
self.num_nodes_per_layer = num_nodes_per_layer
self.max_iter = self.get_max_iter()
self.activation = activation
self.alpha = alpha
self.learning_rate_init = learning_rate_init
self.early_stopping = early_stopping
self.n_iter_no_change = n_iter_no_change
self.validation_fraction = validation_fraction
self.tol = tol
self.solver = solver
self.batch_size = batch_size
self.shuffle = shuffle
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.beta_1 = beta_1
self.random_state = random_state
self.verbose = verbose
self.estimator = None
@staticmethod
def get_max_iter():
return 512
def get_current_iter(self):
return self.estimator.n_iter_
def iterative_fit(self, X, y, n_iter=2, refit=False):
"""
Set n_iter=2 for the same reason as for SGD
"""
from sklearn.neural_network import MLPClassifier
n_iter = max(n_iter, 2)
if refit:
self.estimator = None
if self.estimator is None:
self._fully_fit = False
self.max_iter = int(self.max_iter)
self.hidden_layer_depth = int(self.hidden_layer_depth)
self.num_nodes_per_layer = int(self.num_nodes_per_layer)
self.hidden_layer_sizes = tuple(
self.num_nodes_per_layer for i in range(self.hidden_layer_depth)
)
self.activation = str(self.activation)
self.alpha = float(self.alpha)
self.learning_rate_init = float(self.learning_rate_init)
self.early_stopping = str(self.early_stopping)
if self.early_stopping == "train":
self.validation_fraction = 0.0
self.tol = float(self.tol)
self.n_iter_no_change = int(self.n_iter_no_change)
self.early_stopping_val = False
elif self.early_stopping == "valid":
self.validation_fraction = float(self.validation_fraction)
self.tol = float(self.tol)
self.n_iter_no_change = int(self.n_iter_no_change)
self.early_stopping_val = True
else:
raise ValueError(
"Set early stopping to unknown value %s" % self.early_stopping
)
# elif self.early_stopping == "off":
# self.validation_fraction = 0
# self.tol = 10000
# self.n_iter_no_change = self.max_iter
# self.early_stopping_val = False
self.solver = self.solver
try:
self.batch_size = int(self.batch_size)
except ValueError:
self.batch_size = str(self.batch_size)
self.shuffle = check_for_bool(self.shuffle)
self.beta_1 = float(self.beta_1)
self.beta_2 = float(self.beta_2)
self.epsilon = float(self.epsilon)
self.beta_1 = float(self.beta_1)
self.verbose = int(self.verbose)
n_iter = int(np.ceil(n_iter))
# initial fit of only increment trees
self.estimator = MLPClassifier(
hidden_layer_sizes=self.hidden_layer_sizes,
activation=self.activation,
solver=self.solver,
alpha=self.alpha,
batch_size=self.batch_size,
learning_rate_init=self.learning_rate_init,
max_iter=n_iter,
shuffle=self.shuffle,
random_state=copy.copy(self.random_state),
verbose=self.verbose,
warm_start=True,
early_stopping=self.early_stopping_val,
validation_fraction=self.validation_fraction,
n_iter_no_change=self.n_iter_no_change,
tol=self.tol,
beta_1=self.beta_2,
beta_2=self.beta_1,
epsilon=self.epsilon,
# We do not use these, see comments below in search space
# momentum=self.momentum,
# nesterovs_momentum=self.nesterovs_momentum,
# power_t=self.power_t,
# learning_rate=self.learning_rate,
# max_fun=self.max_fun
)
else:
new_max_iter = min(self.max_iter - self.estimator.n_iter_, n_iter)
self.estimator.max_iter = new_max_iter
self.estimator.fit(X, y)
if (
self.estimator.n_iter_ >= self.max_iter
or self.estimator._no_improvement_count > self.n_iter_no_change
):
self._fully_fit = True
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
elif not hasattr(self, "_fully_fit"):
return False
else:
return self._fully_fit
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "MLP",
"name": "Multilayer Percepton",
"handles_regression": False,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": False,
"is_deterministic": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
hidden_layer_depth = UniformIntegerHyperparameter(
name="hidden_layer_depth", lower=1, upper=3, default_value=1
)
num_nodes_per_layer = UniformIntegerHyperparameter(
name="num_nodes_per_layer", lower=16, upper=264, default_value=32, log=True
)
activation = CategoricalHyperparameter(
name="activation", choices=["tanh", "relu"], default_value="relu"
)
alpha = UniformFloatHyperparameter(
name="alpha", lower=1e-7, upper=1e-1, default_value=1e-4, log=True
)
learning_rate_init = UniformFloatHyperparameter(
name="learning_rate_init",
lower=1e-4,
upper=0.5,
default_value=1e-3,
log=True,
)
# Not allowing to turn off early stopping
early_stopping = CategoricalHyperparameter(
name="early_stopping",
choices=["valid", "train"], # , "off"],
default_value="valid",
)
# Constants
n_iter_no_change = Constant(
name="n_iter_no_change", value=32
) # default=10 is too low
validation_fraction = Constant(name="validation_fraction", value=0.1)
tol = UnParametrizedHyperparameter(name="tol", value=1e-4)
solver = Constant(name="solver", value="adam")
# Relying on sklearn defaults for now
batch_size = UnParametrizedHyperparameter(name="batch_size", value="auto")
shuffle = UnParametrizedHyperparameter(name="shuffle", value="True")
beta_1 = UnParametrizedHyperparameter(name="beta_1", value=0.9)
beta_2 = UnParametrizedHyperparameter(name="beta_2", value=0.999)
epsilon = UnParametrizedHyperparameter(name="epsilon", value=1e-8)
# Not used
# solver=["sgd", "lbfgs"] --> not used to keep searchspace simpler
# learning_rate --> only used when using solver=sgd
# power_t --> only used when using solver=sgd & learning_rate=invscaling
# momentum --> only used when solver=sgd
# nesterovs_momentum --> only used when solver=sgd
# max_fun --> only used when solver=lbfgs
# activation=["identity", "logistic"] --> not useful for classification
cs.add_hyperparameters(
[
hidden_layer_depth,
num_nodes_per_layer,
activation,
alpha,
learning_rate_init,
early_stopping,
n_iter_no_change,
validation_fraction,
tol,
solver,
batch_size,
shuffle,
beta_1,
beta_2,
epsilon,
]
)
validation_fraction_cond = InCondition(
validation_fraction, early_stopping, ["valid"]
)
cs.add_conditions([validation_fraction_cond])
# We always use early stopping
# n_iter_no_change_cond = \
# InCondition(n_iter_no_change, early_stopping, ["valid", "train"])
# tol_cond = InCondition(n_iter_no_change, early_stopping, ["valid", "train"])
# cs.add_conditions([n_iter_no_change_cond, tol_cond])
return cs
|
MLPClassifier
|
python
|
ray-project__ray
|
python/ray/data/_internal/execution/interfaces/physical_operator.py
|
{
"start": 10987,
"end": 32399
}
|
class ____(Operator):
"""Abstract class for physical operators.
An operator transforms one or more input streams of RefBundles into a single
output stream of RefBundles.
Physical operators are stateful and non-serializable; they live on the driver side
of the Dataset only.
Here's a simple example of implementing a basic "Map" operator:
class MapOperator(PhysicalOperator):
def __init__(self):
self.active_tasks = []
def add_input(self, refs, _):
self.active_tasks.append(map_task.remote(refs))
def has_next(self):
ready, _ = ray.wait(self.active_tasks, timeout=0)
return len(ready) > 0
def get_next(self):
ready, remaining = ray.wait(self.active_tasks, num_returns=1)
self.active_tasks = remaining
return ready[0]
Note that the above operator fully supports both bulk and streaming execution,
since `add_input` and `get_next` can be called in any order. In bulk execution
(now deprecated), all inputs would be added up-front, but in streaming
execution (now the default execution mode) the calls could be interleaved.
"""
_OPERATOR_ID_LABEL_KEY = "__data_operator_id"
def __init__(
self,
name: str,
input_dependencies: List["PhysicalOperator"],
data_context: DataContext,
target_max_block_size_override: Optional[int] = None,
):
super().__init__(name, input_dependencies)
for x in input_dependencies:
assert isinstance(x, PhysicalOperator), x
self._inputs_complete = not input_dependencies
self._output_block_size_option_override = OutputBlockSizeOption.of(
target_max_block_size=target_max_block_size_override
)
self._started = False
self._shutdown = False
self._in_task_submission_backpressure = False
self._in_task_output_backpressure = False
self._estimated_num_output_bundles = None
self._estimated_output_num_rows = None
self._is_execution_marked_finished = False
# The LogicalOperator(s) which were translated to create this PhysicalOperator.
# Set via `PhysicalOperator.set_logical_operators()`.
self._logical_operators: List[LogicalOperator] = []
self._data_context = data_context
self._id = str(uuid.uuid4())
# Initialize metrics after data_context is set
self._metrics = OpRuntimeMetrics(self)
def __reduce__(self):
raise ValueError("Operator is not serializable.")
@property
def id(self) -> str:
"""Return a unique identifier for this operator."""
return self._id
@property
def data_context(self) -> DataContext:
return self._data_context
# Override the following 3 methods to correct type hints.
@property
def input_dependencies(self) -> List["PhysicalOperator"]:
return super().input_dependencies # type: ignore
@property
def output_dependencies(self) -> List["PhysicalOperator"]:
return super().output_dependencies # type: ignore
def post_order_iter(self) -> Iterator["PhysicalOperator"]:
return super().post_order_iter() # type: ignore
def set_logical_operators(
self,
*logical_ops: LogicalOperator,
):
self._logical_operators = list(logical_ops)
@property
def target_max_block_size_override(self) -> Optional[int]:
"""
Target max block size output by this operator. If this returns None,
then the default from DataContext should be used.
"""
if self._output_block_size_option_override is None:
return None
else:
return self._output_block_size_option_override.target_max_block_size
def override_target_max_block_size(self, target_max_block_size: Optional[int]):
self._output_block_size_option_override = OutputBlockSizeOption.of(
target_max_block_size=target_max_block_size
)
def mark_execution_finished(self):
"""Manually mark that this operator has finished execution."""
self._is_execution_marked_finished = True
def has_execution_finished(self) -> bool:
"""Return True when this operator has finished execution.
The outputs may or may not have been taken.
"""
from ..operators.base_physical_operator import InternalQueueOperatorMixin
internal_input_queue_num_blocks = 0
if isinstance(self, InternalQueueOperatorMixin):
internal_input_queue_num_blocks = self.internal_input_queue_num_blocks()
# NOTE: Execution is considered finished if
# - The operator was explicitly marked finished OR
# - The following auto-completion conditions are met
# - All input blocks have been ingested
# - Internal queue is empty
# - There are no active or pending tasks
return self._is_execution_marked_finished or (
self._inputs_complete
and self.num_active_tasks() == 0
and internal_input_queue_num_blocks == 0
)
def completed(self) -> bool:
"""Returns whether this operator has been fully completed.
An operator is completed iff:
* The operator has finished execution (i.e., `has_execution_finished()` is True).
* All outputs have been taken (i.e., `has_next()` is False) from it.
"""
from ..operators.base_physical_operator import InternalQueueOperatorMixin
internal_output_queue_num_blocks = 0
if isinstance(self, InternalQueueOperatorMixin):
internal_output_queue_num_blocks = self.internal_output_queue_num_blocks()
# NOTE: We check for (internal_output_queue_size == 0) and
# (not self.has_next()) because _OrderedOutputQueue can
# return False for self.has_next(), but have a non-empty queue size.
# Draining the internal output queue is important to free object refs.
return (
self.has_execution_finished()
and not self.has_next()
and internal_output_queue_num_blocks == 0
)
def get_stats(self) -> StatsDict:
"""Return recorded execution stats for use with DatasetStats."""
raise NotImplementedError
@property
def metrics(self) -> OpRuntimeMetrics:
"""Returns the runtime metrics of this operator."""
self._metrics._extra_metrics = self._extra_metrics()
return self._metrics
def _extra_metrics(self) -> Dict[str, Any]:
"""Subclasses should override this method to report extra metrics
that are specific to them."""
return {}
def _get_logical_args(self) -> Dict[str, Dict[str, Any]]:
"""Return the logical arguments that were translated to create this
PhysicalOperator."""
res = {}
for i, logical_op in enumerate(self._logical_operators):
logical_op_id = f"{logical_op}_{i}"
res[logical_op_id] = logical_op._get_args()
return res
# TODO(@balaji): Disambiguate this with `incremental_resource_usage`.
def per_task_resource_allocation(
self: "PhysicalOperator",
) -> ExecutionResources:
"""The amount of logical resources used by each task.
For regular tasks, these are the resources required to schedule a task. For
actor tasks, these are the resources required to schedule an actor divided by
the number of actor threads (i.e., `max_concurrency`).
Returns:
The resource requirement per task.
"""
return ExecutionResources.zero()
def max_task_concurrency(self: "PhysicalOperator") -> Optional[int]:
"""The maximum number of tasks that can be run concurrently.
Some operators manually configure a maximum concurrency. For example, if you
specify `concurrency` in `map_batches`.
"""
return None
# TODO(@balaji): Disambiguate this with `base_resource_usage`.
def min_scheduling_resources(
self: "PhysicalOperator",
) -> ExecutionResources:
"""The minimum resource bundle required to schedule a worker.
For regular tasks, this is the resources required to schedule a task. For actor
tasks, this is the resources required to schedule an actor.
"""
return ExecutionResources.zero()
def progress_str(self) -> str:
"""Return any extra status to be displayed in the operator progress bar.
For example, `<N> actors` to show current number of actors in an actor pool.
"""
return ""
def num_outputs_total(self) -> Optional[int]:
"""Returns the total number of output bundles of this operator,
or ``None`` if unable to provide a reasonable estimate (for example,
if no tasks have finished yet).
The value returned may be an estimate based off the consumption so far.
This is useful for reporting progress.
Subclasses should either override this method, or update
``self._estimated_num_output_bundles`` appropriately.
"""
return self._estimated_num_output_bundles
def num_output_rows_total(self) -> Optional[int]:
"""Returns the total number of output rows of this operator,
or ``None`` if unable to provide a reasonable estimate (for example,
if no tasks have finished yet).
The value returned may be an estimate based off the consumption so far.
This is useful for reporting progress.
Subclasses should either override this method, or update
``self._estimated_output_num_rows`` appropriately.
"""
return self._estimated_output_num_rows
def start(self, options: ExecutionOptions) -> None:
"""Called by the executor when execution starts for an operator.
Args:
options: The global options used for the overall execution.
"""
self._started = True
def should_add_input(self) -> bool:
"""Return whether it is desirable to add input to this operator right now.
Operators can customize the implementation of this method to apply additional
backpressure (e.g., waiting for internal actors to be created).
"""
return True
def add_input(self, refs: RefBundle, input_index: int) -> None:
"""Called when an upstream result is available.
Inputs may be added in any order, and calls to `add_input` may be interleaved
with calls to `get_next` / `has_next` to implement streaming execution.
Subclasses should override `_add_input_inner` instead of this method.
Args:
refs: The ref bundle that should be added as input.
input_index: The index identifying the input dependency producing the
input. For most operators, this is always `0` since there is only
one upstream input operator.
"""
assert 0 <= input_index < len(self._input_dependencies), (
f"Input index out of bounds (total inputs {len(self._input_dependencies)}, "
f"index is {input_index})"
)
self._metrics.on_input_received(refs)
self._add_input_inner(refs, input_index)
def _add_input_inner(self, refs: RefBundle, input_index: int) -> None:
"""Subclasses should override this method to implement `add_input`."""
raise NotImplementedError
def input_done(self, input_index: int) -> None:
"""Called when the upstream operator at index `input_index` has completed().
After this is called, the executor guarantees that no more inputs will be added
via `add_input` for the given input index.
"""
pass
def all_inputs_done(self) -> None:
"""Called when all upstream operators have completed().
After this is called, the executor guarantees that no more inputs will be added
via `add_input` for any input index.
"""
self._inputs_complete = True
def has_next(self) -> bool:
"""Returns when a downstream output is available.
When this returns true, it is safe to call `get_next()`.
"""
raise NotImplementedError
def get_next(self) -> RefBundle:
"""Get the next downstream output.
It is only allowed to call this if `has_next()` has returned True.
Subclasses should override `_get_next_inner` instead of this method.
"""
output = self._get_next_inner()
self._metrics.on_output_taken(output)
return output
def _get_next_inner(self) -> RefBundle:
"""Subclasses should override this method to implement `get_next`."""
raise NotImplementedError
def get_active_tasks(self) -> List[OpTask]:
"""Get a list of the active tasks of this operator.
Subclasses should return *all* running normal/actor tasks. The
StreamingExecutor will wait on these tasks and trigger callbacks.
"""
return []
def num_active_tasks(self) -> int:
"""Return the number of active tasks.
This method is used for 2 purposes:
* Determine if this operator is completed.
* Displaying active task info in the progress bar.
Thus, the return value can be less than `len(get_active_tasks())`,
if some tasks are not needed for the above purposes. E.g., for the
actor pool map operator, readiness checking tasks can be excluded
from `num_active_tasks`, but they should be included in
`get_active_tasks`.
Subclasses can override this as a performance optimization.
"""
return len(self.get_active_tasks())
def throttling_disabled(self) -> bool:
"""Whether to disable resource throttling for this operator.
This should return True for operators that only manipulate bundle metadata
(e.g., the OutputSplitter operator). This hints to the execution engine that
these operators should not be throttled based on resource usage.
"""
return False
def shutdown(self, timer: Timer, force: bool = False) -> None:
"""Abort execution and release all resources used by this operator.
This release any Ray resources acquired by this operator such as active
tasks, actors, and objects.
"""
if self._shutdown:
return
elif not self._started:
raise ValueError("Operator must be started before being shutdown.")
# Mark operator as shut down
self._shutdown = True
# Time shutdown sequence duration
with timer.timer():
self._do_shutdown(force)
def _do_shutdown(self, force: bool):
# Default implementation simply cancels any outstanding active task
self._cancel_active_tasks(force=force)
def current_processor_usage(self) -> ExecutionResources:
"""Returns the current estimated CPU and GPU usage of this operator, excluding
object store memory.
This method is called by the executor to decide how to allocate processors
between different operators.
"""
return ExecutionResources(0, 0, 0)
def running_processor_usage(self) -> ExecutionResources:
"""Returns the estimated running CPU and GPU usage of this operator, excluding
object store memory.
This method is called by the resource manager and the streaming
executor to display the number of currently running CPUs and GPUs in the
progress bar.
Note, this method returns `current_processor_usage() -
pending_processor_usage()` by default. Subclasses should only override
`pending_processor_usage()` if needed.
"""
usage = self.current_processor_usage()
usage = usage.subtract(self.pending_processor_usage())
return usage
def pending_processor_usage(self) -> ExecutionResources:
"""Returns the estimated pending CPU and GPU usage of this operator, excluding
object store memory.
This method is called by the resource manager and the streaming
executor to display the number of currently pending actors in the
progress bar.
"""
return ExecutionResources(0, 0, 0)
def min_max_resource_requirements(
self,
) -> Tuple[ExecutionResources, ExecutionResources]:
"""Returns lower/upper boundary of resource requirements for this operator:
- Minimal: lower bound (min) of resources required to start this operator
(for most operators this is 0, except the ones that utilize actors)
- Maximum: upper bound (max) of how many resources this operator could
utilize.
"""
return ExecutionResources.zero(), ExecutionResources.inf()
def incremental_resource_usage(self) -> ExecutionResources:
"""Returns the incremental resources required for processing another input.
For example, an operator that launches a task per input could return
ExecutionResources(cpu=1) as its incremental usage.
"""
return ExecutionResources()
def notify_in_task_submission_backpressure(self, in_backpressure: bool) -> None:
"""Called periodically from the executor to update internal in backpressure
status for stats collection purposes.
Args:
in_backpressure: Value this operator's in_backpressure should be set to.
"""
# only update on change to in_backpressure
if self._in_task_submission_backpressure != in_backpressure:
self._metrics.on_toggle_task_submission_backpressure(in_backpressure)
self._in_task_submission_backpressure = in_backpressure
def notify_in_task_output_backpressure(self, in_backpressure: bool) -> None:
"""Called periodically from the executor to update internal output backpressure
status for stats collection purposes.
Args:
in_backpressure: Value this operator's output backpressure should be set to.
"""
# only update on change to in_backpressure
if self._in_task_output_backpressure != in_backpressure:
self._metrics.on_toggle_task_output_backpressure(in_backpressure)
self._in_task_output_backpressure = in_backpressure
def get_autoscaling_actor_pools(self) -> List[AutoscalingActorPool]:
"""Return a list of `AutoscalingActorPool`s managed by this operator."""
return []
def implements_accurate_memory_accounting(self) -> bool:
"""Return whether this operator implements accurate memory accounting.
An operator that implements accurate memory accounting should properly
report its memory usage via the following APIs:
- `self._metrics.on_input_queued`.
- `self._metrics.on_input_dequeued`.
- `self._metrics.on_output_queued`.
- `self._metrics.on_output_dequeued`.
"""
# TODO(hchen): Currently we only enable `ReservationOpResourceAllocator` when
# all operators in the dataset have implemented accurate memory accounting.
# Eventually all operators should implement accurate memory accounting.
return False
def supports_fusion(self) -> bool:
"""Returns ```True``` if this operator can be fused with other operators."""
return False
def update_resource_usage(self) -> None:
"""Updates resource usage of this operator at runtime.
This method will be called at runtime in each StreamingExecutor iteration.
Subclasses can override it to account for dynamic resource usage updates due to
restarting actors, retrying tasks, lost objects, etc.
"""
pass
def get_actor_info(self) -> _ActorPoolInfo:
"""Returns the current status of actors being used by the operator"""
return _ActorPoolInfo(running=0, pending=0, restarting=0)
def _cancel_active_tasks(self, force: bool):
tasks: List[OpTask] = self.get_active_tasks()
# Interrupt all (still) running tasks immediately
for task in tasks:
task._cancel(force=force)
# In case of forced cancellation block until task actually return
# to guarantee all tasks are done upon return from this method
if force:
# Wait for all tasks to get cancelled before returning
for task in tasks:
try:
ray.get(task.get_waitable())
except ray.exceptions.RayError:
# Cancellation either succeeded, or the task might have already
# failed with a different error, or cancellation failed.
# In all cases, we swallow the exception.
pass
def upstream_op_num_outputs(self):
upstream_op_num_outputs = sum(
op.num_outputs_total() or 0 for op in self.input_dependencies
)
return upstream_op_num_outputs
def get_max_concurrency_limit(self) -> Optional[int]:
"""Max value of how many tasks this operator could run
concurrently (if limited)"""
return None
|
PhysicalOperator
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.