language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | readthedocs__readthedocs.org | readthedocs/organizations/views/public.py | {
"start": 3131,
"end": 3901
} | class ____(FilterContextMixin, OrganizationTeamView, ListView):
template_name = "organizations/team_list.html"
context_object_name = "teams"
admin_only = False
filterset_class = OrganizationTeamListFilterSet
strict = True
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
org = self.get_organization()
# TODO the team queryset, used through ``get_queryset()`` defines
# sorting. Sorting should only happen in the filterset, so it can be
# controlled in the UI.
context["filter"] = self.get_filterset(
organization=org,
)
context[self.get_context_object_name()] = self.get_filtered_queryset()
return context
| ListOrganizationTeams |
python | palantir__python-language-server | test/plugins/test_jedi_rename.py | {
"start": 347,
"end": 2507
} | class ____(Test1):
pass
'''
DOC_NAME_EXTRA = 'test2.py'
DOC_EXTRA = '''from test1 import Test1
x = Test1()
'''
@pytest.fixture
def tmp_workspace(temp_workspace_factory):
return temp_workspace_factory({
DOC_NAME: DOC,
DOC_NAME_EXTRA: DOC_EXTRA
})
@pytest.mark.skipif(LT_PY36, reason='Jedi refactoring isnt supported on Python 2.x/3.5')
def test_jedi_rename(tmp_workspace, config): # pylint: disable=redefined-outer-name
# rename the `Test1` class
position = {'line': 0, 'character': 6}
DOC_URI = uris.from_fs_path(os.path.join(tmp_workspace.root_path, DOC_NAME))
doc = Document(DOC_URI, tmp_workspace)
result = pyls_rename(config, tmp_workspace, doc, position, 'ShouldBeRenamed')
assert len(result.keys()) == 1
changes = result.get('documentChanges')
assert len(changes) == 2
assert changes[0]['textDocument']['uri'] == doc.uri
assert changes[0]['textDocument']['version'] == doc.version
assert changes[0].get('edits') == [
{
'range': {
'start': {'line': 0, 'character': 0},
'end': {'line': 5, 'character': 0},
},
'newText': 'class ShouldBeRenamed():\n pass\n\nclass Test2(ShouldBeRenamed):\n pass\n',
}
]
path = os.path.join(tmp_workspace.root_path, DOC_NAME_EXTRA)
uri_extra = uris.from_fs_path(path)
assert changes[1]['textDocument']['uri'] == uri_extra
# This also checks whether documents not yet added via textDocument/didOpen
# but that do need to be renamed in the project have a `null` version
# number.
assert changes[1]['textDocument']['version'] is None
expected = 'from test1 import ShouldBeRenamed\nx = ShouldBeRenamed()\n'
if os.name == 'nt':
# The .write method in the temp_workspace_factory functions writes
# Windows-style line-endings.
expected = expected.replace('\n', '\r\n')
assert changes[1].get('edits') == [
{
'range': {
'start': {'line': 0, 'character': 0},
'end': {'line': 2, 'character': 0}},
'newText': expected
}
]
| Test2 |
python | django__django | tests/model_inheritance/models.py | {
"start": 811,
"end": 881
} | class ____(CommonInfo):
job = models.CharField(max_length=50)
| Worker |
python | pyca__cryptography | src/cryptography/hazmat/primitives/hashes.py | {
"start": 3199,
"end": 3311
} | class ____(HashAlgorithm): # noqa: N801
name = "sha3-512"
digest_size = 64
block_size = None
| SHA3_512 |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/basedialog.py | {
"start": 275,
"end": 1626
} | class ____(QDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
# Set style of all QPushButton's inside the dialog.
css = qstylizer.style.StyleSheet()
css.QPushButton.setValues(
padding='3px 15px 3px 15px',
)
self.setStyleSheet(css.toString())
def set_dynamic_width_and_height(self, screen_geometry, width_ratio=0.5,
height_ratio=0.5):
"""
Update width and height using an updated screen geometry.
Use a ratio for the width and height of the dialog.
"""
screen_width = int(screen_geometry.width() * width_ratio)
screen_height = int(screen_geometry.height() * height_ratio)
self.resize(screen_width, screen_height)
# Make the dialog window appear in the center of the screen
x = int(screen_geometry.center().x() - self.width() / 2)
y = int(screen_geometry.center().y() - self.height() / 2)
self.move(x, y)
def show(self):
super().show()
window = self.window()
windowHandle = window.windowHandle()
screen = windowHandle.screen()
geometry = screen.geometry()
self.set_dynamic_width_and_height(geometry)
screen.geometryChanged.connect(self.set_dynamic_width_and_height)
| BaseDialog |
python | pypa__warehouse | warehouse/packaging/services.py | {
"start": 2289,
"end": 3858
} | class ____:
def __init__(self, base):
# This class should not be used in production, it's trivial for it to
# be used to read arbitrary files from the disk. It is intended ONLY
# for local development with trusted users. To make this clear, we'll
# raise a warning.
warnings.warn(
"LocalFileStorage is intended only for use in development, you "
"should not use it in production due to the lack of safe guards "
"for safely locating files on disk.",
InsecureStorageWarning,
)
self.base = base
@classmethod
def create_service(cls, context, request):
raise NotImplementedError
def get(self, path):
return open(os.path.join(self.base, path), "rb")
def get_metadata(self, path):
return json.loads(open(os.path.join(self.base, path + ".meta")).read())
def get_checksum(self, path):
return hashlib.md5(
open(os.path.join(self.base, path), "rb").read(), usedforsecurity=False
).hexdigest()
def store(self, path, file_path, *, meta=None):
destination = os.path.join(self.base, path)
os.makedirs(os.path.dirname(destination), exist_ok=True)
with open(destination, "wb") as dest_fp:
with open(file_path, "rb") as src_fp:
dest_fp.write(src_fp.read())
if meta is not None:
with open(destination + ".meta", "w") as dest_fp:
dest_fp.write(json.dumps(meta))
@implementer(IFileStorage)
| GenericLocalBlobStorage |
python | pytorch__pytorch | benchmarks/transformer/sdp.py | {
"start": 2467,
"end": 10906
} | class ____(torch.nn.Module):
def __init__(self, num_heads, in_proj_weight, in_proj_bias, out_proj):
super().__init__()
self.in_proj_weight = in_proj_weight
self.in_proj_bias = in_proj_bias
self.out_proj = out_proj
self.num_heads = num_heads
def forward(self, query, key, value, mask):
if not (query is key and key is value):
raise NotImplementedError(
"query, key and value must be the same Tensor for now."
)
if mask is not None:
raise NotImplementedError("mask is currently not supported.")
query_projected = torch.nn.functional.linear(
query, self.in_proj_weight, self.in_proj_bias
)
batch_size = query_projected.size(0)
embed_dim = query_projected.size(2)
head_dim = embed_dim // (self.num_heads * 3)
query, key, value = query_projected.chunk(3, -1)
query = query.view(batch_size, -1, self.num_heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, self.num_heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, self.num_heads, head_dim).transpose(1, 2)
# the output of sdp = (batch, num_heads, seq_len, head_dim)
attn = torch.nn.functional.scaled_dot_product_attention(
query,
key,
value,
attn_mask=None,
dropout_p=0.0,
is_causal=False,
)
attn = attn.transpose(1, 2).reshape(batch_size, -1, self.num_heads * head_dim)
# Match return signature of nn.MHA
return self.out_proj(attn), None
def build_composite_mha_from_nn_mha(pt):
assert pt._qkv_same_embed_dim
in_proj_weight = pt.in_proj_weight
assert in_proj_weight is not None
assert pt.batch_first
return CompositeMHA(pt.num_heads, pt.in_proj_weight, pt.in_proj_bias, pt.out_proj)
def generate_rand_batch(
batch_size,
max_sequence_len,
embed_dimension,
pad_percentage=None,
dtype=torch.float16,
device="cuda",
):
if not pad_percentage:
return (
torch.randn(
batch_size,
max_sequence_len,
embed_dimension,
dtype=dtype,
device=device,
),
None,
)
# Really slow but should work
seq_len_list = [
int(max_sequence_len * (1 - random.gauss(pad_percentage, 0.01)))
for _ in range(batch_size)
]
# Make random ele max length
seq_len_list[random.randint(0, batch_size - 1)] = max_sequence_len
# print(f"Theoretical padding: {pad_percentage} actual: {1 - (sum(seq_len_list) / (batch_size * max_sequence_len))}")
return (
torch.nested.nested_tensor(
[
torch.randn(seq_len, embed_dimension, dtype=dtype, device=device)
for seq_len in seq_len_list
]
),
seq_len_list,
)
def benchmark_torch_function_in_microseconds(f, *args, **kwargs):
t0 = benchmark.Timer(
stmt="f(*args, **kwargs)", globals={"args": args, "kwargs": kwargs, "f": f}
)
return t0.blocked_autorange().mean * 1e6
def assert_close_tensors(tensor_a, tensor_b):
# First order sanity check. Not a replacement for rigorous tests.
if tensor_a.is_nested and tensor_b.is_nested:
for a, b in zip(tensor_a.unbind(), tensor_b.unbind()):
assert torch.allclose(a, b, atol=1e-2, rtol=1e-2)
else:
assert torch.allclose(tensor_a, tensor_b, atol=1e-3, rtol=1e-3)
def run_single_experiment(config: ExperimentConfig) -> ExperimentResults:
with sdp_kernel(
enable_math=config.enable_math,
enable_flash=config.enable_flash,
enable_mem_efficient=config.enable_mem_efficient,
enable_cudnn=config.enable_cudnn,
):
dropout_p = 0.0
mask = None
nn_mha = torch.nn.MultiheadAttention(
embed_dim=config.embed_dimension,
num_heads=config.num_heads,
batch_first=True,
dropout=dropout_p,
)
nn_mha = nn_mha.eval().to("cuda", config.dtype)
composite_mha = build_composite_mha_from_nn_mha(nn_mha)
qkv, lengths = generate_rand_batch(
config.batch_size,
config.max_sequence_len,
config.embed_dimension,
config.pad_percentage,
config.dtype,
)
nn_mha_output, _ = nn_mha(qkv, qkv, qkv, mask)
composite_mha_output, _ = composite_mha(qkv, qkv, qkv, mask)
# First order sanity check
assert_close_tensors(nn_mha_output, composite_mha_output)
nn_mha_time = benchmark_torch_function_in_microseconds(
nn_mha, qkv, qkv, qkv, mask
)
composite_mha_time = benchmark_torch_function_in_microseconds(
composite_mha, qkv, qkv, qkv, mask
)
# TorchDynamo will error on NestedTensors
if config.pad_percentage is None:
compiled_nn_mha = torch.compile(nn_mha)
compiled_composite_mha = torch.compile(composite_mha)
compiled_nn_mha_time = benchmark_torch_function_in_microseconds(
compiled_nn_mha, qkv, qkv, qkv, mask
)
compiled_composite_mha_time = benchmark_torch_function_in_microseconds(
compiled_composite_mha,
qkv,
qkv,
qkv,
mask,
)
else:
compiled_nn_mha_time = None
compiled_composite_mha_time = None
results = ExperimentResults(
nn_mha_time,
compiled_nn_mha_time,
composite_mha_time,
compiled_composite_mha_time,
)
return Experiment(config, results)
# Could return generator
def generate_experiments(
batch_sizes, num_heads, max_seq_lens, embed_dims, dtypes, pad_percentages
) -> list[ExperimentConfig]:
configs = []
for bsz, n_heads, seq_len, embed_dim, dtype, padding in itertools.product(
batch_sizes, num_heads, max_seq_lens, embed_dims, dtypes, pad_percentages
):
configs.append(
ExperimentConfig(
batch_size=bsz,
num_heads=n_heads,
max_sequence_len=seq_len,
embed_dimension=embed_dim,
dtype=dtype,
pad_percentage=padding,
enable_math=False,
enable_flash=True,
enable_mem_efficient=True,
enable_cudnn=True,
)
)
return configs
def main(save_path: Optional[Path]):
seed = 123
np.random.seed(seed)
torch.manual_seed(seed)
# Run one timing experiment comparing nn_mha vs composite_mha
config = ExperimentConfig(
batch_size=128,
num_heads=8,
max_sequence_len=512,
embed_dimension=512,
dtype=torch.float16,
pad_percentage=None,
enable_math=False,
enable_flash=True,
enable_mem_efficient=True,
enable_cudnn=True,
)
experiment = run_single_experiment(config)
pprint(experiment)
table = PrettyTable()
table.float_format = ".3"
table.field_names = (
ExperimentConfig.get_entry_names() + ExperimentResults.get_entry_names()
)
# Run a bunch of experiments
batch_sizes = [256]
num_heads = [32]
max_seq_lens = [256]
embed_dims = [512]
dtypes = [torch.bfloat16, torch.float16, torch.float32]
pad_percentages = [None, 0.9]
experiment_configs = generate_experiments(
batch_sizes, num_heads, max_seq_lens, embed_dims, dtypes, pad_percentages
)
experiments: list[Experiment] = []
for experiment_config in tqdm(experiment_configs):
experiment = run_single_experiment(experiment_config)
experiments.append(experiment)
table.add_row(experiment.get_entries())
print(table)
csv_string = table.get_csv_string()
if save_path is not None:
with open(save_path, "w") as csvfile:
csvfile.write(csv_string)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--save-path", "--save_path", type=str, help="Path to save the results"
)
args = parser.parse_args()
save_path = Path(args.save_path) if args.save_path else None
main(save_path)
| CompositeMHA |
python | scikit-learn__scikit-learn | sklearn/externals/array_api_compat/common/_typing.py | {
"start": 883,
"end": 1109
} | class ____(Protocol):
@property
def __class__(self, /) -> type[int]: ...
@__class__.setter
def __class__(self, value: type[int], /) -> None: ... # pyright: ignore[reportIncompatibleMethodOverride]
@final
| JustInt |
python | coleifer__peewee | tests/models.py | {
"start": 154326,
"end": 155327
} | class ____(ModelTestCase):
requires = [Transaction, TUser]
def test_max_alias(self):
with self.database.atomic():
charlie = TUser.create(username='charlie')
huey = TUser.create(username='huey')
data = (
(charlie, 10.),
(charlie, 20.),
(charlie, 30.),
(huey, 1.5),
(huey, 2.5))
for user, amount in data:
Transaction.create(user=user, amount=amount)
with self.assertQueryCount(1):
amount = fn.MAX(Transaction.amount).alias('amount')
query = (Transaction
.select(amount, TUser.username)
.join(TUser)
.group_by(TUser.username)
.order_by(TUser.username))
data = [(txn.amount, txn.user.username) for txn in query]
self.assertEqual(data, [
(30., 'charlie'),
(2.5, 'huey')])
| TestMaxAlias |
python | getsentry__sentry-python | tests/integrations/beam/test_beam.py | {
"start": 794,
"end": 992
} | class ____:
def __init__(self, fn):
self.r = "We are in A"
self.fn = fn
self._inspect_fn = _wrap_inspect_call(self, "fn")
def process(self):
return self.fn()
| A |
python | kamyu104__LeetCode-Solutions | Python/russian-doll-envelopes.py | {
"start": 84,
"end": 889
} | class ____(object):
def maxEnvelopes(self, envelopes):
"""
:type envelopes: List[List[int]]
:rtype: int
"""
def insert(target):
left, right = 0, len(result) - 1
while left <= right:
mid = left + (right - left) / 2
if result[mid] >= target:
right = mid - 1
else:
left = mid + 1
if left == len(result):
result.append(target)
else:
result[left] = target
result = []
envelopes.sort(lambda x, y: y[1] - x[1] if x[0] == y[0] else \
x[0] - y[0])
for envelope in envelopes:
insert(envelope[1])
return len(result)
| Solution |
python | huggingface__transformers | src/transformers/models/distilbert/modeling_distilbert.py | {
"start": 11999,
"end": 16819
} | class ____(DistilBertPreTrainedModel):
def __init__(self, config: PreTrainedConfig):
super().__init__(config)
self.embeddings = Embeddings(config) # Embeddings
self.transformer = Transformer(config) # Encoder
# Initialize weights and apply final processing
self.post_init()
def get_position_embeddings(self) -> nn.Embedding:
"""
Returns the position embeddings
"""
return self.embeddings.position_embeddings
def resize_position_embeddings(self, new_num_position_embeddings: int):
"""
Resizes position embeddings of the model if `new_num_position_embeddings != config.max_position_embeddings`.
Arguments:
new_num_position_embeddings (`int`):
The number of new position embedding matrix. If position embeddings are learned, increasing the size
will add newly initialized vectors at the end, whereas reducing the size will remove vectors from the
end. If position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the
size will add correct vectors at the end following the position encoding algorithm, whereas reducing
the size will remove vectors from the end.
"""
num_position_embeds_diff = new_num_position_embeddings - self.config.max_position_embeddings
# no resizing needs to be done if the length stays the same
if num_position_embeds_diff == 0:
return
logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...")
self.config.max_position_embeddings = new_num_position_embeddings
old_position_embeddings_weight = self.embeddings.position_embeddings.weight.clone()
self.embeddings.position_embeddings = nn.Embedding(self.config.max_position_embeddings, self.config.dim)
if self.config.sinusoidal_pos_embds:
create_sinusoidal_embeddings(
n_pos=self.config.max_position_embeddings, dim=self.config.dim, out=self.position_embeddings.weight
)
else:
with torch.no_grad():
if num_position_embeds_diff > 0:
self.embeddings.position_embeddings.weight[:-num_position_embeds_diff] = nn.Parameter(
old_position_embeddings_weight
)
else:
self.embeddings.position_embeddings.weight = nn.Parameter(
old_position_embeddings_weight[:num_position_embeds_diff]
)
# move position_embeddings to correct device
self.embeddings.position_embeddings.to(self.device)
def get_input_embeddings(self) -> nn.Embedding:
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings: nn.Embedding):
self.embeddings.word_embeddings = new_embeddings
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[BaseModelOutput, tuple[torch.Tensor, ...]]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
"""
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
embeddings = self.embeddings(input_ids, inputs_embeds, position_ids)
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embeddings,
attention_mask=attention_mask,
)
return self.transformer(
hidden_states=embeddings,
attention_mask=attention_mask,
**kwargs,
)
@auto_docstring(
custom_intro="""
DistilBert Model with a `masked language modeling` head on top.
"""
)
| DistilBertModel |
python | ray-project__ray | python/ray/data/_internal/arrow_block.py | {
"start": 6007,
"end": 16513
} | class ____(TableBlockAccessor):
ROW_TYPE = ArrowRow
def __init__(self, table: "pyarrow.Table"):
if pyarrow is None:
raise ImportError("Run `pip install pyarrow` for Arrow support")
super().__init__(table)
self._max_chunk_size: Optional[int] = None
def _get_row(self, index: int) -> ArrowRow:
base_row = self.slice(index, index + 1, copy=False)
return ArrowRow(base_row)
def column_names(self) -> List[str]:
return self._table.column_names
def fill_column(self, name: str, value: Any) -> Block:
import pyarrow.compute as pc
# Check if value is array-like - if so, use upsert_column logic
if isinstance(value, (pyarrow.Array, pyarrow.ChunkedArray)):
return self.upsert_column(name, value)
else:
# Scalar value - use original fill_column logic
if isinstance(value, pyarrow.Scalar):
type = value.type
else:
type = pyarrow.infer_type([value])
array = pyarrow.nulls(len(self._table), type=type)
array = pc.fill_null(array, value)
return self.upsert_column(name, array)
@classmethod
def from_bytes(cls, data: bytes) -> "ArrowBlockAccessor":
reader = pyarrow.ipc.open_stream(data)
return cls(reader.read_all())
@staticmethod
def _build_tensor_row(
row: ArrowRow, row_idx: int, col_name: str = TENSOR_COLUMN_NAME
) -> np.ndarray:
element = row[col_name][row_idx]
arr = element.as_py()
assert isinstance(arr, np.ndarray), type(arr)
return arr
def slice(self, start: int, end: int, copy: bool = False) -> "pyarrow.Table":
view = self._table.slice(start, end - start)
if copy:
view = transform_pyarrow.combine_chunks(view, copy)
return view
def random_shuffle(self, random_seed: Optional[int]) -> "pyarrow.Table":
return shuffle(self._table, random_seed)
def schema(self) -> "pyarrow.lib.Schema":
return self._table.schema
def to_pandas(self) -> "pandas.DataFrame":
from ray.air.util.data_batch_conversion import _cast_tensor_columns_to_ndarrays
# We specify ignore_metadata=True because pyarrow will use the metadata
# to build the Table. This is handled incorrectly for older pyarrow versions
ctx = DataContext.get_current()
df = self._table.to_pandas(ignore_metadata=ctx.pandas_block_ignore_metadata)
if ctx.enable_tensor_extension_casting:
df = _cast_tensor_columns_to_ndarrays(df)
return df
def to_numpy(
self, columns: Optional[Union[str, List[str]]] = None
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
if columns is None:
columns = self._table.column_names
should_be_single_ndarray = False
elif isinstance(columns, list):
should_be_single_ndarray = False
else:
columns = [columns]
should_be_single_ndarray = True
column_names_set = set(self._table.column_names)
for column in columns:
if column not in column_names_set:
raise ValueError(
f"Cannot find column {column}, available columns: "
f"{column_names_set}"
)
column_values_ndarrays = []
for col_name in columns:
col = self._table[col_name]
# Combine columnar values arrays to make these contiguous
# (making them compatible with numpy format)
combined_array = transform_pyarrow.combine_chunked_array(col)
column_values_ndarrays.append(
transform_pyarrow.to_numpy(combined_array, zero_copy_only=False)
)
if should_be_single_ndarray:
assert len(columns) == 1
return column_values_ndarrays[0]
else:
return dict(zip(columns, column_values_ndarrays))
def to_arrow(self) -> "pyarrow.Table":
return self._table
def num_rows(self) -> int:
# Arrow may represent an empty table via an N > 0 row, 0-column table, e.g. when
# slicing an empty table, so we return 0 if num_columns == 0.
return self._table.num_rows if self._table.num_columns > 0 else 0
def size_bytes(self) -> int:
return self._table.nbytes
def _zip(self, acc: BlockAccessor) -> "Block":
r = self.to_arrow()
s = acc.to_arrow()
for col_name in s.column_names:
col = s.column(col_name)
# Ensure the column names are unique after zip.
if col_name in r.column_names:
i = 1
new_name = col_name
while new_name in r.column_names:
new_name = "{}_{}".format(col_name, i)
i += 1
col_name = new_name
r = r.append_column(col_name, col)
return r
def upsert_column(
self, column_name: str, column_data: BlockColumn
) -> "pyarrow.Table":
assert isinstance(
column_data, (pyarrow.Array, pyarrow.ChunkedArray)
), f"Expected either a pyarrow.Array or pyarrow.ChunkedArray, got: {type(column_data)}"
column_idx = self._table.schema.get_field_index(column_name)
if column_idx == -1:
return self._table.append_column(column_name, column_data)
else:
return self._table.set_column(column_idx, column_name, column_data)
@staticmethod
def builder() -> ArrowBlockBuilder:
return ArrowBlockBuilder()
@staticmethod
def _empty_table() -> "pyarrow.Table":
return ArrowBlockBuilder._empty_table()
def take(
self,
indices: Union[List[int], "pyarrow.Array", "pyarrow.ChunkedArray"],
) -> "pyarrow.Table":
"""Select rows from the underlying table.
This method is an alternative to pyarrow.Table.take(), which breaks for
extension arrays.
"""
return transform_pyarrow.take_table(self._table, indices)
def drop(self, columns: List[str]) -> Block:
return self._table.drop(columns)
def select(self, columns: List[str]) -> "pyarrow.Table":
if not all(isinstance(col, str) for col in columns):
raise ValueError(
"Columns must be a list of column name strings when aggregating on "
f"Arrow blocks, but got: {columns}."
)
if len(columns) == 0:
# Applicable for count which does an empty projection.
# Pyarrow returns a table with 0 columns and num_rows rows.
return self.fill_column(_BATCH_SIZE_PRESERVING_STUB_COL_NAME, None)
return self._table.select(columns)
def rename_columns(self, columns_rename: Dict[str, str]) -> "pyarrow.Table":
return self._table.rename_columns(columns_rename)
def hstack(self, other_block: "pyarrow.Table") -> "pyarrow.Table":
result_table = self._table
for name, column in zip(other_block.column_names, other_block.columns):
result_table = result_table.append_column(name, column)
return result_table
def _sample(self, n_samples: int, sort_key: "SortKey") -> "pyarrow.Table":
indices = random.sample(range(self._table.num_rows), n_samples)
table = self._table.select(sort_key.get_columns())
return transform_pyarrow.take_table(table, indices)
def sort(self, sort_key: "SortKey") -> Block:
assert (
sort_key.get_columns()
), f"Sorting columns couldn't be empty (got {sort_key.get_columns()})"
if self._table.num_rows == 0:
# If the pyarrow table is empty we may not have schema
# so calling sort_indices() will raise an error.
return self._empty_table()
context = DataContext.get_current()
sort = get_sort_transform(context)
return sort(self._table, sort_key)
def sort_and_partition(
self, boundaries: List[T], sort_key: "SortKey"
) -> List["Block"]:
table = self.sort(sort_key)
if table.num_rows == 0:
return [self._empty_table() for _ in range(len(boundaries) + 1)]
elif len(boundaries) == 0:
return [table]
return BlockAccessor.for_block(table)._find_partitions_sorted(
boundaries, sort_key
)
@staticmethod
def merge_sorted_blocks(
blocks: List[Block], sort_key: "SortKey"
) -> Tuple[Block, BlockMetadataWithSchema]:
stats = BlockExecStats.builder()
blocks = [b for b in blocks if b.num_rows > 0]
if len(blocks) == 0:
ret = ArrowBlockAccessor._empty_table()
else:
# Handle blocks of different types.
blocks = TableBlockAccessor.normalize_block_types(blocks, BlockType.ARROW)
concat_and_sort = get_concat_and_sort_transform(DataContext.get_current())
ret = concat_and_sort(blocks, sort_key, promote_types=True)
return ret, BlockMetadataWithSchema.from_block(ret, stats=stats.build())
def block_type(self) -> BlockType:
return BlockType.ARROW
def iter_rows(
self, public_row_format: bool
) -> Iterator[Union[Mapping, np.ndarray]]:
table = self._table
if public_row_format:
if self._max_chunk_size is None:
# Calling _get_max_chunk_size in constructor makes it slow, so we
# are calling it here only when needed.
self._max_chunk_size = _get_max_chunk_size(
table, ARROW_MAX_CHUNK_SIZE_BYTES
)
for batch in table.to_batches(max_chunksize=self._max_chunk_size):
yield from batch.to_pylist()
else:
num_rows = self.num_rows()
for i in range(num_rows):
yield self._get_row(i)
def filter(self, predicate_expr: "Expr") -> "pyarrow.Table":
"""Filter rows based on a predicate expression."""
if self._table.num_rows == 0:
return self._table
from ray.data._internal.planner.plan_expression.expression_evaluator import (
eval_expr,
)
# Evaluate the expression to get a boolean mask
mask = eval_expr(predicate_expr, self._table)
# Use PyArrow's built-in filter method
return self._table.filter(mask)
| ArrowBlockAccessor |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI034.py | {
"start": 10213,
"end": 10300
} | class ____(tuple):
def __new__(cls: Type[NonGeneric2]) -> NonGeneric2: ...
| NonGeneric2 |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 14083,
"end": 15657
} | class ____:
_suffixes: Tuple[Tuple[DQLDMLClauseElement, str], ...] = ()
_has_suffixes_traverse_internals: _TraverseInternalsType = [
("_suffixes", InternalTraversal.dp_prefix_sequence)
]
@_generative
@_document_text_coercion(
"suffixes",
":meth:`_expression.HasSuffixes.suffix_with`",
":paramref:`.HasSuffixes.suffix_with.*suffixes`",
)
def suffix_with(
self,
*suffixes: _TextCoercedExpressionArgument[Any],
dialect: str = "*",
) -> Self:
r"""Add one or more expressions following the statement as a whole.
This is used to support backend-specific suffix keywords on
certain constructs.
E.g.::
stmt = (
select(col1, col2)
.cte()
.suffix_with(
"cycle empno set y_cycle to 1 default 0", dialect="oracle"
)
)
Multiple suffixes can be specified by multiple calls
to :meth:`_expression.HasSuffixes.suffix_with`.
:param \*suffixes: textual or :class:`_expression.ClauseElement`
construct which
will be rendered following the target clause.
:param dialect: Optional string dialect name which will
limit rendering of this suffix to only that dialect.
"""
self._suffixes = self._suffixes + tuple(
[
(coercions.expect(roles.StatementOptionRole, p), dialect)
for p in suffixes
]
)
return self
| HasSuffixes |
python | gevent__gevent | src/gevent/tests/test__destroy_default_loop.py | {
"start": 70,
"end": 2199
} | class ____(unittest.TestCase):
def tearDown(self):
self._reset_hub()
super(TestDestroyDefaultLoop, self).tearDown()
def _reset_hub(self):
from gevent._hub_local import set_hub
from gevent._hub_local import set_loop
from gevent._hub_local import get_hub_if_exists
hub = get_hub_if_exists()
if hub is not None:
hub.destroy(destroy_loop=True)
set_hub(None)
set_loop(None)
def test_destroy_gc(self):
# Issue 1098: destroying the default loop
# while using the C extension could crash
# the interpreter when it exits
# Create the hub greenlet. This creates one loop
# object pointing to the default loop.
gevent.get_hub()
# Get a new loop object, but using the default
# C loop
loop = gevent.config.loop(default=True)
self.assertTrue(loop.default)
# Destroy it
loop.destroy()
# It no longer claims to be the default
self.assertFalse(loop.default)
# Delete it
del loop
# Delete the hub. This prompts garbage
# collection of it and its loop object.
# (making this test more repeatable; the exit
# crash only happened when that greenlet object
# was collected at exit time, which was most common
# in CPython 3.5)
self._reset_hub()
def test_destroy_two(self):
# Get two new loop object, but using the default
# C loop
loop1 = gevent.config.loop(default=True)
loop2 = gevent.config.loop(default=True)
self.assertTrue(loop1.default)
self.assertTrue(loop2.default)
# Destroy the first
loop1.destroy()
# It no longer claims to be the default
self.assertFalse(loop1.default)
# Destroy the second. This doesn't crash.
loop2.destroy()
self.assertFalse(loop2.default)
self.assertFalse(loop2.ptr)
self._reset_hub()
self.assertTrue(gevent.get_hub().loop.ptr)
if __name__ == '__main__':
unittest.main()
| TestDestroyDefaultLoop |
python | pydantic__pydantic | pydantic-core/tests/validators/test_union.py | {
"start": 1694,
"end": 4215
} | class ____:
class ModelA:
pass
class ModelB:
pass
@pytest.fixture(scope='class')
def schema_validator(self) -> SchemaValidator:
return SchemaValidator(
schema=core_schema.union_schema(
choices=[
core_schema.model_schema(
cls=self.ModelA,
schema=core_schema.model_fields_schema(
fields={
'a': core_schema.model_field(schema=core_schema.int_schema()),
'b': core_schema.model_field(schema=core_schema.str_schema()),
}
),
),
core_schema.model_schema(
cls=self.ModelB,
schema=core_schema.model_fields_schema(
fields={
'c': core_schema.model_field(schema=core_schema.int_schema()),
'd': core_schema.model_field(schema=core_schema.str_schema()),
}
),
),
]
)
)
def test_model_a(self, schema_validator: SchemaValidator):
m_a = schema_validator.validate_python({'a': 1, 'b': 'hello'})
assert isinstance(m_a, self.ModelA)
assert m_a.a == 1
assert m_a.b == 'hello'
def test_model_b(self, schema_validator: SchemaValidator):
m_b = schema_validator.validate_python({'c': 2, 'd': 'again'})
assert isinstance(m_b, self.ModelB)
assert m_b.c == 2
assert m_b.d == 'again'
def test_exact_check(self, schema_validator: SchemaValidator):
m_b = schema_validator.validate_python({'c': 2, 'd': 'again'})
assert isinstance(m_b, self.ModelB)
m_b2 = schema_validator.validate_python(m_b)
assert m_b2 is m_b
def test_error(self, schema_validator: SchemaValidator):
with pytest.raises(ValidationError) as exc_info:
schema_validator.validate_python({'a': 2})
assert exc_info.value.errors(include_url=False) == [
{'type': 'missing', 'loc': ('ModelA', 'b'), 'msg': 'Field required', 'input': {'a': 2}},
{'type': 'missing', 'loc': ('ModelB', 'c'), 'msg': 'Field required', 'input': {'a': 2}},
{'type': 'missing', 'loc': ('ModelB', 'd'), 'msg': 'Field required', 'input': {'a': 2}},
]
| TestModelClass |
python | walkccc__LeetCode | solutions/2008. Maximum Earnings From Taxi/2008.py | {
"start": 0,
"end": 501
} | class ____:
def maxTaxiEarnings(self, n: int, rides: list[list[int]]) -> int:
startToEndAndEarns = [[] for _ in range(n)]
# dp[i] := the maximum dollars you can earn starting at i
dp = [0] * (n + 1)
for start, end, tip in rides:
earn = end - start + tip
startToEndAndEarns[start].append((end, earn))
for i in range(n - 1, 0, -1):
dp[i] = dp[i + 1]
for end, earn in startToEndAndEarns[i]:
dp[i] = max(dp[i], dp[end] + earn)
return dp[1]
| Solution |
python | hyperopt__hyperopt | hyperopt/tests/unit/test_rand.py | {
"start": 312,
"end": 959
} | class ____(unittest.TestCase):
def test_seeding(self):
# -- assert that the seeding works a particular way
domain = coin_flip()
docs = rand.suggest(
list(range(10)), domain, Trials(), seed=np.random.PCG64(123)
)
trials = trials_from_docs(docs)
idxs, vals = miscs_to_idxs_vals(trials.miscs)
# Passes Nov 8 / 2013
self.assertEqual(list(idxs["flip"]), list(range(10)))
self.assertEqual(list(vals["flip"]), [0, 1, 1, 0, 1, 0, 0, 0, 0, 0])
# -- TODO: put in a test that guarantees that
# stochastic nodes are sampled in a particular order.
| TestRand |
python | pennersr__django-allauth | allauth/headless/contrib/ninja/security.py | {
"start": 1246,
"end": 1760
} | class ____(AuthBase):
openapi_type: str = "apiKey"
def __call__(self, request: HttpRequest):
token = get_authorization_credential(
request, app_settings.JWT_AUTHORIZATION_HEADER_SCHEME
)
if token is None:
return None
user_payload = validate_access_token(token)
if user_payload is None:
return None
user, payload = user_payload
request.user = user
return payload
jwt_token_auth = JWTTokenAuth()
| JWTTokenAuth |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 405950,
"end": 407810
} | class ____(Response):
"""
Response of tasks.stopped endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "stopped"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, fields: Optional[dict] = None, **kwargs: Any) -> None:
super(StoppedResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self) -> Optional[dict]:
return self._property_fields
@fields.setter
def fields(self, value: Optional[dict]) -> None:
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
| StoppedResponse |
python | pypa__warehouse | tests/unit/search/test_tasks.py | {
"start": 4374,
"end": 5461
} | class ____:
def __init__(self):
self.indices = {}
self.aliases = {}
self.put_settings = pretend.call_recorder(lambda *a, **kw: None)
self.delete = pretend.call_recorder(lambda *a, **kw: None)
self.create = pretend.call_recorder(lambda *a, **kw: None)
def exists_alias(self, name):
return name in self.aliases
def get_alias(self, name):
return self.aliases[name]
def put_alias(self, name, index):
self.aliases.setdefault(name, []).append(index)
def remove_alias(self, name, alias):
self.aliases[name] = [n for n in self.aliases[name] if n != alias]
def update_aliases(self, *, body):
for items in body["actions"]:
for action, values in items.items():
if action == "add":
self.put_alias(values["alias"], values["index"])
elif action == "remove":
self.remove_alias(values["alias"], values["index"])
else:
pytest.fail(f"Unknown action: {action!r}.")
| FakeESIndices |
python | django-debug-toolbar__django-debug-toolbar | example/test_views.py | {
"start": 237,
"end": 388
} | class ____(TestCase):
def test_index(self):
response = self.client.get(reverse("home"))
assert response.status_code == 200
| ViewTestCase |
python | facebook__pyre-check | client/json_rpc.py | {
"start": 539,
"end": 668
} | class ____(Enum):
"""Message type for an LSP warning message."""
WARNING = 2
INFORMATION = 3
| LanguageServerMessageType |
python | numba__numba | numba/core/caching.py | {
"start": 5416,
"end": 5891
} | class ____(_SourceFileBackedLocatorMixin, _CacheLocator):
"""
A locator for functions backed by a regular Python module with a
writable __pycache__ directory.
"""
def __init__(self, py_func, py_file):
self._py_file = py_file
self._lineno = py_func.__code__.co_firstlineno
self._cache_path = os.path.join(os.path.dirname(self._py_file), '__pycache__')
def get_cache_path(self):
return self._cache_path
| InTreeCacheLocator |
python | Lightning-AI__lightning | tests/tests_fabric/strategies/test_fsdp_integration.py | {
"start": 2599,
"end": 3899
} | class ____(BasicTrainer):
def get_model(self):
model = torch.nn.Sequential(torch.nn.Linear(32, 32), torch.nn.ReLU(), torch.nn.Linear(32, 2))
self.num_wrapped = 4
return model
def step(self, model, batch):
wrapped_layers = [m for m in model.modules() if isinstance(m, FullyShardedDataParallel)]
assert len(wrapped_layers) == self.num_wrapped
assert (self.num_wrapped == 4) == isinstance(model._forward_module, FullyShardedDataParallel)
precision = self.fabric._precision
assert isinstance(precision, FSDPPrecision)
if precision.precision in ("16-true", "16-mixed"):
param_dtype = reduce_dtype = buffer_dtype = torch.float16
elif precision.precision in ("bf16-true", "bf16-mixed"):
param_dtype = reduce_dtype = buffer_dtype = torch.bfloat16
else:
raise ValueError(f"Unknown precision {precision.precision}")
for layer in wrapped_layers:
assert layer.mixed_precision.param_dtype == param_dtype
assert layer.mixed_precision.reduce_dtype == reduce_dtype
assert layer.mixed_precision.buffer_dtype == buffer_dtype
output = model(batch)
return torch.nn.functional.mse_loss(output, torch.ones_like(output))
| _Trainer |
python | mlflow__mlflow | mlflow/types/chat.py | {
"start": 5682,
"end": 5808
} | class ____(BaseModel):
index: int
id: str | None = None
type: str | None = None
function: Function
| ToolCallDelta |
python | walkccc__LeetCode | solutions/130. Surrounded Regions/130-2.py | {
"start": 0,
"end": 683
} | class ____:
def solve(self, board: list[list[str]]) -> None:
if not board:
return
m = len(board)
n = len(board[0])
def dfs(i: int, j: int) -> None:
"""Marks the grids with 'O' that stretch from the four sides to '*'."""
if i < 0 or i == m or j < 0 or j == n:
return
if board[i][j] != 'O':
return
board[i][j] = '*'
dfs(i + 1, j)
dfs(i - 1, j)
dfs(i, j + 1)
dfs(i, j - 1)
for i in range(m):
for j in range(n):
if i * j == 0 or i == m - 1 or j == n - 1:
dfs(i, j)
for row in board:
for i, c in enumerate(row):
row[i] = 'O' if c == '*' else 'X'
| Solution |
python | huggingface__transformers | src/transformers/integrations/flex_attention.py | {
"start": 1431,
"end": 13337
} | class ____:
"""
We are doing a singleton class so that flex attention is compiled once when it's first called.
"""
_instance = None
_is_flex_compiled = False
_compiled_flex_attention = None
def __new__(cls, *args, **kwargs):
if cls._instance is None:
# Create a new instance if one doesn't already exist
cls._instance = super().__new__(cls)
return cls._instance
@torch.compiler.disable(recursive=False)
def __init__(self, training):
"""
Initialize or update the singleton instance.
"""
if not self._is_flex_compiled or training != self.training:
self.training = training
if is_torch_less_or_equal("2.5.1"):
self._compiled_flex_attention = torch.compile(flex_attention, dynamic=False)
# In PyTorch 2.6.0, there's a known issue with flex attention compilation which may
# cause errors. The suggested fix is to compile with "max-autotune-no-cudagraphs"
# see https://github.com/pytorch/pytorch/issues/146260 for training
elif version.parse(get_torch_version()).base_version == "2.6.0" and training:
self._compiled_flex_attention = torch.compile(
flex_attention, dynamic=False, mode="max-autotune-no-cudagraphs"
)
# Fallback, usually the most recent torch 2.7.x+ versions
else:
self._compiled_flex_attention = torch.compile(flex_attention)
self._is_flex_compiled = True
def __call__(self):
return self._compiled_flex_attention
def compile_friendly_flex_attention(
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
training=False,
**kwargs,
) -> torch.Tensor | tuple[torch.Tensor, torch.Tensor]:
# First call initialise singleton wrapper object, second call invokes the object method to return compiled flex attention
# Do not use compiled version if already compiling forward (it raises issues)
flex_attention_compiled = WrappedFlexAttention(training)() if not is_torchdynamo_compiling() else flex_attention
return flex_attention_compiled(
query,
key,
value,
**kwargs,
)
Offset = Union[torch.Tensor, int]
# TODO: deprecate / rename to make_flex_block_mask for clarity as it's not only causal anymore
def make_flex_block_causal_mask(
attention_mask_2d: torch.Tensor,
attention_chunk_size: int | None = None,
query_length=None,
key_length=None,
offsets: tuple[Offset, Offset] | None = None,
is_causal: bool | None = True,
) -> "BlockMask":
"""
IMPORTANT NOTICE: This function is deprecated in favor of using the mask primitives in `masking_utils.py`,
and will be removed in a future version without warnings. New code should not use it. It is only kept here
for BC for now, while models using it are being patched accordingly.
Create a block (causal) document mask for a batch of sequences, both packed and unpacked.
Create Block (causal) logic and passing it into :func:`torch.nn.attention.flex_attention.create_block_mask`.
The resultant BlockMask is a compressed representation of the full (causal) block
mask. BlockMask is essential for performant computation of flex attention.
See: https://pytorch.org/blog/flexattention/
Args:
attention_mask_2d (torch.Tensor): Attention mask for packed and padded sequences
of shape (batch_size, total_seq_len). e.g.
For unpacked sequence:
[[1, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0]]
For packed sequence:
[[1, 1, 1, 2, 2, 2, 0],
[1, 1, 2, 2, 2, 3, 3]]
Returns:
BlockMask
"""
batch_size, total_seq_len = attention_mask_2d.shape
if not key_length:
key_length = total_seq_len
if not query_length:
query_length = total_seq_len
# older torch (2.5.x) cannot handle sequences not in multiples of 128 (default block size)
pad_len = ((key_length // flex_default_block_size) + 1) * flex_default_block_size
attention_mask_2d = torch.nn.functional.pad(attention_mask_2d, value=0, pad=(0, pad_len - key_length))
device = attention_mask_2d.device
document_ids = attention_mask_2d.clone()
if attention_chunk_size is not None:
# we create an arange, then we just // by chunk size to get [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]
chunk_idxs = (document_ids.clone().fill_(1).cumsum(-1) - 1) // (attention_chunk_size)
# Instead of passing a tensor mask, flex attention requires a mask_mod function
# that determines which elements of QK^T should be included in the attention
# computation prior to the softmax. For sample packing, we need both the
# logic for both causal mask and document mask. See PyTorch's official
# blog post for more details: https://pytorch.org/blog/flexattention/#mask-mods
def causal_mask_mod(batch_idx, head_idx, q_idx, kv_idx):
"""
Defines the logic of a block causal mask by combining both a standard causal mask
and a block diagonal document mask.
See :func:`~torchtune.modules.attention_utils.create_block_causal_mask`
for an illustration.
"""
causal_mask = q_idx >= kv_idx # not valid when decoding
document_mask = document_ids[batch_idx, q_idx] == document_ids[batch_idx, kv_idx]
padding_mask = attention_mask_2d[batch_idx, q_idx] > 0
final_mask = causal_mask & padding_mask & document_mask
return final_mask
def chunk_causal_mask_mod(batch_idx, head_idx, q_idx, kv_idx):
"""
Combines the chunk mask with the causal mask for chunked attention.
"""
chunk_mask = chunk_idxs[batch_idx, q_idx] == chunk_idxs[batch_idx, kv_idx]
causal_doc_mask = causal_mask_mod(batch_idx, head_idx, q_idx, kv_idx)
return chunk_mask & causal_doc_mask
def default_mask_mod(batch_idx, head_idx, q_idx, kv_idx):
"""
Utilizes default attention mask to enable encoder and encoder-decoder
attention masks.
"""
document_mask = document_ids[batch_idx, q_idx] == document_ids[batch_idx, kv_idx]
# kv indexing is crucial in order to work correctly
padding_mask = attention_mask_2d[batch_idx, kv_idx] > 0
final_mask = padding_mask & document_mask
return final_mask
if not is_causal:
mask_mod_maybe_combined = default_mask_mod
else:
mask_mod_maybe_combined = causal_mask_mod if attention_chunk_size is None else chunk_causal_mask_mod
if offsets is not None:
q_offset = offsets[0].to(device)
kv_offset = offsets[1].to(device)
def mask_mod(batch_idx, head_idx, q_idx, kv_idx):
offset_q = q_idx + q_offset
offset_kv = kv_idx + kv_offset
return mask_mod_maybe_combined(batch_idx, head_idx, offset_q, offset_kv)
else:
mask_mod = mask_mod_maybe_combined
return create_block_mask(
mask_mod=mask_mod,
B=batch_size,
H=None, # attention head
Q_LEN=query_length,
KV_LEN=key_length,
device=device,
# compiling the mask is not BC with older torch
_compile=not is_torch_less_or_equal("2.5.1"),
)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def flex_attention_forward(
module: torch.nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Union[torch.Tensor, "BlockMask"],
scaling: float | None = None,
softcap: float | None = None,
s_aux: torch.Tensor | None = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor | None]:
if kwargs.get("dropout", 0.0) > 0:
raise ValueError(
"`flex_attention` does not support `dropout`. Please use it with inference"
" only (`model.eval()`) or turn off the attention dropout in the respective config."
)
block_mask = None
score_mask = None
if isinstance(attention_mask, BlockMask):
block_mask = attention_mask
else:
score_mask = attention_mask
if score_mask is not None:
score_mask = score_mask[:, :, :, : key.shape[-2]]
def score_mod(score, batch_idx, head_idx, q_idx, kv_idx):
if softcap is not None:
score = softcap * torch.tanh(score / softcap)
if score_mask is not None:
score = score + score_mask[batch_idx][0][q_idx][kv_idx]
# Note: attention sinks cannot be correctly implemented in score_mod
# because it requires operating on the full attention matrix before softmax.
# ==> this is done after flex attention
return score
enable_gqa = True
num_local_query_heads = query.shape[1]
# When running TP this helps:
if (num_local_query_heads & (num_local_query_heads - 1)) != 0:
key = repeat_kv(key, query.shape[1] // key.shape[1])
value = repeat_kv(value, query.shape[1] // value.shape[1])
enable_gqa = False
kernel_options = kwargs.get("kernel_options")
# On CPU we must skip returning LSE due to a runtime issue; elsewhere, follow PyTorch API and return it
return_lse = query.device.type != "cpu"
if not return_lse and s_aux is not None:
raise ValueError(
"Attention sinks cannot be run on CPU with flex attention. Please switch to a different device, e.g. CUDA"
)
flex_attention_output = compile_friendly_flex_attention(
query,
key,
value,
score_mod=score_mod,
block_mask=block_mask,
enable_gqa=enable_gqa,
scale=scaling,
kernel_options=kernel_options,
# Last time checked on PyTorch == 2.5.1: Flex Attention always computes the lse regardless.
# For simplification, we thus always return it as no additional computations are introduced.
return_lse=return_lse,
training=module.training,
)
# lse is returned in float32
if return_lse:
attention_output, lse = flex_attention_output # type: ignore[misc]
lse = lse.to(value.dtype)
if s_aux is not None:
# Apply attention sinks by renormalizing using LSE
batch_size, num_heads, seq_len_q, _ = attention_output.shape # batch, num_heads, seq_len, head_dim
sinks = s_aux.view(1, -1, 1, 1).expand(batch_size, num_heads, seq_len_q, 1)
# We need to compute the normalization that includes the sinks
# since log(sum(exp(scores))) = lse, exp(log(sum(exp(scores)))) = exp(lse)
# NB: log(sum(exp(scores)) + exp(sink)) = log(exp(lse) + exp(sink))
lse_expanded = lse.unsqueeze(-1) # [batch, num_heads, seq_len, 1]
combined_lse = torch.logsumexp(torch.cat([lse_expanded, sinks], dim=-1), dim=-1, keepdim=True)
# Use new_norm / old_norm = exp(combined_lse - lse) to compute renorm and apply
renorm_factor = torch.exp(lse_expanded - combined_lse)
attention_output = attention_output * renorm_factor
else:
attention_output = flex_attention_output # type: ignore[assignment]
lse = None
attention_output = attention_output.transpose(1, 2).contiguous()
return attention_output, lse
| WrappedFlexAttention |
python | django__django | tests/foreign_object/tests.py | {
"start": 31278,
"end": 32616
} | class ____(TestCase):
@skipUnlessDBFeature("supports_table_check_constraints")
def test_validate_constraints_with_foreign_object(self):
customer_tab = CustomerTab(customer_id=1500)
with self.assertRaisesMessage(ValidationError, "customer_id_limit"):
customer_tab.validate_constraints()
@skipUnlessDBFeature("supports_table_check_constraints")
def test_validate_constraints_success_case_single_query(self):
customer_tab = CustomerTab(customer_id=500)
with CaptureQueriesContext(connection) as ctx:
customer_tab.validate_constraints()
select_queries = [
query["sql"]
for query in ctx.captured_queries
if "select" in query["sql"].lower()
]
self.assertEqual(len(select_queries), 1)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_validate_constraints_excluding_foreign_object(self):
customer_tab = CustomerTab(customer_id=150)
customer_tab.validate_constraints(exclude={"customer"})
@skipUnlessDBFeature("supports_table_check_constraints")
def test_validate_constraints_excluding_foreign_object_member(self):
customer_tab = CustomerTab(customer_id=150)
customer_tab.validate_constraints(exclude={"customer_id"})
| ForeignObjectModelValidationTests |
python | kamyu104__LeetCode-Solutions | Python/minimum-number-of-increments-on-subarrays-to-form-a-target-array.py | {
"start": 29,
"end": 302
} | class ____(object):
def minNumberOperations(self, target):
"""
:type target: List[int]
:rtype: int
"""
return sum(max((target[i] if i < len(target) else 0)-(target[i-1] if i-1 >= 0 else 0), 0) for i in xrange(len(target)+1))
| Solution |
python | psf__black | src/black/linegen.py | {
"start": 33281,
"end": 78665
} | class ____(Enum):
head = auto()
body = auto()
tail = auto()
def left_hand_split(
line: Line, _features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
"""Split line into many lines, starting with the first matching bracket pair.
Note: this usually looks weird, only use this for function definitions.
Prefer RHS otherwise. This is why this function is not symmetrical with
:func:`right_hand_split` which also handles optional parentheses.
"""
for leaf_type in [token.LPAR, token.LSQB]:
tail_leaves: list[Leaf] = []
body_leaves: list[Leaf] = []
head_leaves: list[Leaf] = []
current_leaves = head_leaves
matching_bracket: Leaf | None = None
depth = 0
for index, leaf in enumerate(line.leaves):
if index == 2 and leaf.type == token.LSQB:
# A [ at index 2 means this is a type param, so start
# tracking the depth
depth += 1
elif depth > 0:
if leaf.type == token.LSQB:
depth += 1
elif leaf.type == token.RSQB:
depth -= 1
if (
current_leaves is body_leaves
and leaf.type in CLOSING_BRACKETS
and leaf.opening_bracket is matching_bracket
and isinstance(matching_bracket, Leaf)
# If the code is still on LPAR and we are inside a type
# param, ignore the match since this is searching
# for the function arguments
and not (leaf_type == token.LPAR and depth > 0)
):
ensure_visible(leaf)
ensure_visible(matching_bracket)
current_leaves = tail_leaves if body_leaves else head_leaves
current_leaves.append(leaf)
if current_leaves is head_leaves:
if leaf.type == leaf_type and (
Preview.fix_type_expansion_split not in mode
or not (leaf_type == token.LPAR and depth > 0)
):
matching_bracket = leaf
current_leaves = body_leaves
if matching_bracket and tail_leaves:
break
if not matching_bracket or not tail_leaves:
raise CannotSplit("No brackets found")
head = bracket_split_build_line(
head_leaves, line, matching_bracket, component=_BracketSplitComponent.head
)
body = bracket_split_build_line(
body_leaves, line, matching_bracket, component=_BracketSplitComponent.body
)
tail = bracket_split_build_line(
tail_leaves, line, matching_bracket, component=_BracketSplitComponent.tail
)
bracket_split_succeeded_or_raise(head, body, tail)
for result in (head, body, tail):
if result:
yield result
def right_hand_split(
line: Line,
mode: Mode,
features: Collection[Feature] = (),
omit: Collection[LeafID] = (),
) -> Iterator[Line]:
"""Split line into many lines, starting with the last matching bracket pair.
If the split was by optional parentheses, attempt splitting without them, too.
`omit` is a collection of closing bracket IDs that shouldn't be considered for
this split.
Note: running this function modifies `bracket_depth` on the leaves of `line`.
"""
rhs_result = _first_right_hand_split(line, omit=omit)
yield from _maybe_split_omitting_optional_parens(
rhs_result, line, mode, features=features, omit=omit
)
def _first_right_hand_split(
line: Line,
omit: Collection[LeafID] = (),
) -> RHSResult:
"""Split the line into head, body, tail starting with the last bracket pair.
Note: this function should not have side effects. It's relied upon by
_maybe_split_omitting_optional_parens to get an opinion whether to prefer
splitting on the right side of an assignment statement.
"""
tail_leaves: list[Leaf] = []
body_leaves: list[Leaf] = []
head_leaves: list[Leaf] = []
current_leaves = tail_leaves
opening_bracket: Leaf | None = None
closing_bracket: Leaf | None = None
for leaf in reversed(line.leaves):
if current_leaves is body_leaves:
if leaf is opening_bracket:
current_leaves = head_leaves if body_leaves else tail_leaves
current_leaves.append(leaf)
if current_leaves is tail_leaves:
if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
opening_bracket = leaf.opening_bracket
closing_bracket = leaf
current_leaves = body_leaves
if not (opening_bracket and closing_bracket and head_leaves):
# If there is no opening or closing_bracket that means the split failed and
# all content is in the tail. Otherwise, if `head_leaves` are empty, it means
# the matching `opening_bracket` wasn't available on `line` anymore.
raise CannotSplit("No brackets found")
tail_leaves.reverse()
body_leaves.reverse()
head_leaves.reverse()
body: Line | None = None
if (
Preview.hug_parens_with_braces_and_square_brackets in line.mode
and tail_leaves[0].value
and tail_leaves[0].opening_bracket is head_leaves[-1]
):
inner_body_leaves = list(body_leaves)
hugged_opening_leaves: list[Leaf] = []
hugged_closing_leaves: list[Leaf] = []
is_unpacking = body_leaves[0].type in [token.STAR, token.DOUBLESTAR]
unpacking_offset: int = 1 if is_unpacking else 0
while (
len(inner_body_leaves) >= 2 + unpacking_offset
and inner_body_leaves[-1].type in CLOSING_BRACKETS
and inner_body_leaves[-1].opening_bracket
is inner_body_leaves[unpacking_offset]
):
if unpacking_offset:
hugged_opening_leaves.append(inner_body_leaves.pop(0))
unpacking_offset = 0
hugged_opening_leaves.append(inner_body_leaves.pop(0))
hugged_closing_leaves.insert(0, inner_body_leaves.pop())
if hugged_opening_leaves and inner_body_leaves:
inner_body = bracket_split_build_line(
inner_body_leaves,
line,
hugged_opening_leaves[-1],
component=_BracketSplitComponent.body,
)
if (
line.mode.magic_trailing_comma
and inner_body_leaves[-1].type == token.COMMA
):
should_hug = True
else:
line_length = line.mode.line_length - sum(
len(str(leaf))
for leaf in hugged_opening_leaves + hugged_closing_leaves
)
if is_line_short_enough(
inner_body, mode=replace(line.mode, line_length=line_length)
):
# Do not hug if it fits on a single line.
should_hug = False
else:
should_hug = True
if should_hug:
body_leaves = inner_body_leaves
head_leaves.extend(hugged_opening_leaves)
tail_leaves = hugged_closing_leaves + tail_leaves
body = inner_body # No need to re-calculate the body again later.
head = bracket_split_build_line(
head_leaves, line, opening_bracket, component=_BracketSplitComponent.head
)
if body is None:
body = bracket_split_build_line(
body_leaves, line, opening_bracket, component=_BracketSplitComponent.body
)
tail = bracket_split_build_line(
tail_leaves, line, opening_bracket, component=_BracketSplitComponent.tail
)
bracket_split_succeeded_or_raise(head, body, tail)
return RHSResult(head, body, tail, opening_bracket, closing_bracket)
def _maybe_split_omitting_optional_parens(
rhs: RHSResult,
line: Line,
mode: Mode,
features: Collection[Feature] = (),
omit: Collection[LeafID] = (),
) -> Iterator[Line]:
if (
Feature.FORCE_OPTIONAL_PARENTHESES not in features
# the opening bracket is an optional paren
and rhs.opening_bracket.type == token.LPAR
and not rhs.opening_bracket.value
# the closing bracket is an optional paren
and rhs.closing_bracket.type == token.RPAR
and not rhs.closing_bracket.value
# it's not an import (optional parens are the only thing we can split on
# in this case; attempting a split without them is a waste of time)
and not line.is_import
# and we can actually remove the parens
and can_omit_invisible_parens(rhs, mode.line_length)
):
omit = {id(rhs.closing_bracket), *omit}
try:
# The RHSResult Omitting Optional Parens.
rhs_oop = _first_right_hand_split(line, omit=omit)
if _prefer_split_rhs_oop_over_rhs(rhs_oop, rhs, mode):
yield from _maybe_split_omitting_optional_parens(
rhs_oop, line, mode, features=features, omit=omit
)
return
except CannotSplit as e:
# For chained assignments we want to use the previous successful split
if line.is_chained_assignment:
pass
elif (
not can_be_split(rhs.body)
and not is_line_short_enough(rhs.body, mode=mode)
and not (
Preview.wrap_long_dict_values_in_parens
and rhs.opening_bracket.parent
and rhs.opening_bracket.parent.parent
and rhs.opening_bracket.parent.parent.type == syms.dictsetmaker
)
):
raise CannotSplit(
"Splitting failed, body is still too long and can't be split."
) from e
elif (
rhs.head.contains_multiline_strings()
or rhs.tail.contains_multiline_strings()
):
raise CannotSplit(
"The current optional pair of parentheses is bound to fail to"
" satisfy the splitting algorithm because the head or the tail"
" contains multiline strings which by definition never fit one"
" line."
) from e
ensure_visible(rhs.opening_bracket)
ensure_visible(rhs.closing_bracket)
for result in (rhs.head, rhs.body, rhs.tail):
if result:
yield result
def _prefer_split_rhs_oop_over_rhs(
rhs_oop: RHSResult, rhs: RHSResult, mode: Mode
) -> bool:
"""
Returns whether we should prefer the result from a split omitting optional parens
(rhs_oop) over the original (rhs).
"""
# contains unsplittable type ignore
if (
rhs_oop.head.contains_unsplittable_type_ignore()
or rhs_oop.body.contains_unsplittable_type_ignore()
or rhs_oop.tail.contains_unsplittable_type_ignore()
):
return True
# Retain optional parens around dictionary values
if (
Preview.wrap_long_dict_values_in_parens
and rhs.opening_bracket.parent
and rhs.opening_bracket.parent.parent
and rhs.opening_bracket.parent.parent.type == syms.dictsetmaker
and rhs.body.bracket_tracker.delimiters
):
# Unless the split is inside the key
return any(leaf.type == token.COLON for leaf in rhs_oop.tail.leaves)
# the split is right after `=`
if not (len(rhs.head.leaves) >= 2 and rhs.head.leaves[-2].type == token.EQUAL):
return True
# the left side of assignment contains brackets
if not any(leaf.type in BRACKETS for leaf in rhs.head.leaves[:-1]):
return True
# the left side of assignment is short enough (the -1 is for the ending optional
# paren)
if not is_line_short_enough(
rhs.head, mode=replace(mode, line_length=mode.line_length - 1)
):
return True
# the left side of assignment won't explode further because of magic trailing comma
if rhs.head.magic_trailing_comma is not None:
return True
# If we have multiple targets, we prefer more `=`s on the head vs pushing them to
# the body
rhs_head_equal_count = [leaf.type for leaf in rhs.head.leaves].count(token.EQUAL)
rhs_oop_head_equal_count = [leaf.type for leaf in rhs_oop.head.leaves].count(
token.EQUAL
)
if rhs_head_equal_count > 1 and rhs_head_equal_count > rhs_oop_head_equal_count:
return False
has_closing_bracket_after_assign = False
for leaf in reversed(rhs_oop.head.leaves):
if leaf.type == token.EQUAL:
break
if leaf.type in CLOSING_BRACKETS:
has_closing_bracket_after_assign = True
break
return (
# contains matching brackets after the `=` (done by checking there is a
# closing bracket)
has_closing_bracket_after_assign
or (
# the split is actually from inside the optional parens (done by checking
# the first line still contains the `=`)
any(leaf.type == token.EQUAL for leaf in rhs_oop.head.leaves)
# the first line is short enough
and is_line_short_enough(rhs_oop.head, mode=mode)
)
)
def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
"""Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
Do nothing otherwise.
A left- or right-hand split is based on a pair of brackets. Content before
(and including) the opening bracket is left on one line, content inside the
brackets is put on a separate line, and finally content starting with and
following the closing bracket is put on a separate line.
Those are called `head`, `body`, and `tail`, respectively. If the split
produced the same line (all content in `head`) or ended up with an empty `body`
and the `tail` is just the closing bracket, then it's considered failed.
"""
tail_len = len(str(tail).strip())
if not body:
if tail_len == 0:
raise CannotSplit("Splitting brackets produced the same line")
elif tail_len < 3:
raise CannotSplit(
f"Splitting brackets on an empty body to save {tail_len} characters is"
" not worth it"
)
def _ensure_trailing_comma(
leaves: list[Leaf], original: Line, opening_bracket: Leaf
) -> bool:
if not leaves:
return False
# Ensure a trailing comma for imports
if original.is_import:
return True
# ...and standalone function arguments
if not original.is_def:
return False
if opening_bracket.value != "(":
return False
# Don't add commas if we already have any commas
if any(
leaf.type == token.COMMA and not is_part_of_annotation(leaf) for leaf in leaves
):
return False
# Find a leaf with a parent (comments don't have parents)
leaf_with_parent = next((leaf for leaf in leaves if leaf.parent), None)
if leaf_with_parent is None:
return True
# Don't add commas inside parenthesized return annotations
if get_annotation_type(leaf_with_parent) == "return":
return False
# Don't add commas inside PEP 604 unions
if (
leaf_with_parent.parent
and leaf_with_parent.parent.next_sibling
and leaf_with_parent.parent.next_sibling.type == token.VBAR
):
return False
return True
def bracket_split_build_line(
leaves: list[Leaf],
original: Line,
opening_bracket: Leaf,
*,
component: _BracketSplitComponent,
) -> Line:
"""Return a new line with given `leaves` and respective comments from `original`.
If it's the head component, brackets will be tracked so trailing commas are
respected.
If it's the body component, the result line is one-indented inside brackets and as
such has its first leaf's prefix normalized and a trailing comma added when
expected.
"""
result = Line(mode=original.mode, depth=original.depth)
if component is _BracketSplitComponent.body:
result.inside_brackets = True
result.depth += 1
if _ensure_trailing_comma(leaves, original, opening_bracket):
for i in range(len(leaves) - 1, -1, -1):
if leaves[i].type == STANDALONE_COMMENT:
continue
if leaves[i].type != token.COMMA:
new_comma = Leaf(token.COMMA, ",")
leaves.insert(i + 1, new_comma)
break
leaves_to_track: set[LeafID] = set()
if component is _BracketSplitComponent.head:
leaves_to_track = get_leaves_inside_matching_brackets(leaves)
# Populate the line
for leaf in leaves:
result.append(
leaf,
preformatted=True,
track_bracket=id(leaf) in leaves_to_track,
)
for comment_after in original.comments_after(leaf):
result.append(comment_after, preformatted=True)
if component is _BracketSplitComponent.body and should_split_line(
result, opening_bracket
):
result.should_split_rhs = True
return result
def dont_increase_indentation(split_func: Transformer) -> Transformer:
"""Normalize prefix of the first leaf in every line returned by `split_func`.
This is a decorator over relevant split functions.
"""
@wraps(split_func)
def split_wrapper(
line: Line, features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
for split_line in split_func(line, features, mode):
split_line.leaves[0].prefix = ""
yield split_line
return split_wrapper
def _get_last_non_comment_leaf(line: Line) -> int | None:
for leaf_idx in range(len(line.leaves) - 1, 0, -1):
if line.leaves[leaf_idx].type != STANDALONE_COMMENT:
return leaf_idx
return None
def _can_add_trailing_comma(leaf: Leaf, features: Collection[Feature]) -> bool:
if is_vararg(leaf, within={syms.typedargslist}):
return Feature.TRAILING_COMMA_IN_DEF in features
if is_vararg(leaf, within={syms.arglist, syms.argument}):
return Feature.TRAILING_COMMA_IN_CALL in features
return True
def _safe_add_trailing_comma(safe: bool, delimiter_priority: int, line: Line) -> Line:
if (
safe
and delimiter_priority == COMMA_PRIORITY
and line.leaves[-1].type != token.COMMA
and line.leaves[-1].type != STANDALONE_COMMENT
):
new_comma = Leaf(token.COMMA, ",")
line.append(new_comma)
return line
MIGRATE_COMMENT_DELIMITERS = {STRING_PRIORITY, COMMA_PRIORITY}
@dont_increase_indentation
def delimiter_split(
line: Line, features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
"""Split according to delimiters of the highest priority.
If the appropriate Features are given, the split will add trailing commas
also in function signatures and calls that contain `*` and `**`.
"""
if len(line.leaves) == 0:
raise CannotSplit("Line empty") from None
last_leaf = line.leaves[-1]
bt = line.bracket_tracker
try:
delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)})
except ValueError:
raise CannotSplit("No delimiters found") from None
if (
delimiter_priority == DOT_PRIORITY
and bt.delimiter_count_with_priority(delimiter_priority) == 1
):
raise CannotSplit("Splitting a single attribute from its owner looks wrong")
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
lowest_depth = sys.maxsize
trailing_comma_safe = True
def append_to_line(leaf: Leaf) -> Iterator[Line]:
"""Append `leaf` to current line or to new line if appending impossible."""
nonlocal current_line
try:
current_line.append_safe(leaf, preformatted=True)
except ValueError:
yield current_line
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
current_line.append(leaf)
def append_comments(leaf: Leaf) -> Iterator[Line]:
for comment_after in line.comments_after(leaf):
yield from append_to_line(comment_after)
last_non_comment_leaf = _get_last_non_comment_leaf(line)
for leaf_idx, leaf in enumerate(line.leaves):
yield from append_to_line(leaf)
previous_priority = leaf_idx > 0 and bt.delimiters.get(
id(line.leaves[leaf_idx - 1])
)
if (
previous_priority != delimiter_priority
or delimiter_priority in MIGRATE_COMMENT_DELIMITERS
):
yield from append_comments(leaf)
lowest_depth = min(lowest_depth, leaf.bracket_depth)
if trailing_comma_safe and leaf.bracket_depth == lowest_depth:
trailing_comma_safe = _can_add_trailing_comma(leaf, features)
if last_leaf.type == STANDALONE_COMMENT and leaf_idx == last_non_comment_leaf:
current_line = _safe_add_trailing_comma(
trailing_comma_safe, delimiter_priority, current_line
)
leaf_priority = bt.delimiters.get(id(leaf))
if leaf_priority == delimiter_priority:
if (
leaf_idx + 1 < len(line.leaves)
and delimiter_priority not in MIGRATE_COMMENT_DELIMITERS
):
yield from append_comments(line.leaves[leaf_idx + 1])
yield current_line
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
if current_line:
current_line = _safe_add_trailing_comma(
trailing_comma_safe, delimiter_priority, current_line
)
yield current_line
@dont_increase_indentation
def standalone_comment_split(
line: Line, features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
"""Split standalone comments from the rest of the line."""
if not line.contains_standalone_comments():
raise CannotSplit("Line does not have any standalone comments")
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
def append_to_line(leaf: Leaf) -> Iterator[Line]:
"""Append `leaf` to current line or to new line if appending impossible."""
nonlocal current_line
try:
current_line.append_safe(leaf, preformatted=True)
except ValueError:
yield current_line
current_line = Line(
line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
current_line.append(leaf)
for leaf in line.leaves:
yield from append_to_line(leaf)
for comment_after in line.comments_after(leaf):
yield from append_to_line(comment_after)
if current_line:
yield current_line
def normalize_invisible_parens(
node: Node, parens_after: set[str], *, mode: Mode, features: Collection[Feature]
) -> None:
"""Make existing optional parentheses invisible or create new ones.
`parens_after` is a set of string leaf values immediately after which parens
should be put.
Standardizes on visible parentheses for single-element tuples, and keeps
existing visible parentheses for other tuples and generator expressions.
"""
for pc in list_comments(node.prefix, is_endmarker=False, mode=mode):
if pc.value in FMT_OFF:
# This `node` has a prefix with `# fmt: off`, don't mess with parens.
return
# The multiple context managers grammar has a different pattern, thus this is
# separate from the for-loop below. This possibly wraps them in invisible parens,
# and later will be removed in remove_with_parens when needed.
if node.type == syms.with_stmt:
_maybe_wrap_cms_in_parens(node, mode, features)
check_lpar = False
for index, child in enumerate(list(node.children)):
# Fixes a bug where invisible parens are not properly stripped from
# assignment statements that contain type annotations.
if isinstance(child, Node) and child.type == syms.annassign:
normalize_invisible_parens(
child, parens_after=parens_after, mode=mode, features=features
)
# Fixes a bug where invisible parens are not properly wrapped around
# case blocks.
if isinstance(child, Node) and child.type == syms.case_block:
normalize_invisible_parens(
child, parens_after={"case"}, mode=mode, features=features
)
# Add parentheses around if guards in case blocks
if isinstance(child, Node) and child.type == syms.guard:
normalize_invisible_parens(
child, parens_after={"if"}, mode=mode, features=features
)
# Add parentheses around long tuple unpacking in assignments.
if (
index == 0
and isinstance(child, Node)
and child.type == syms.testlist_star_expr
):
check_lpar = True
# Check for assignment LHS with preview feature enabled
if (
Preview.remove_parens_from_assignment_lhs in mode
and index == 0
and isinstance(child, Node)
and child.type == syms.atom
and node.type == syms.expr_stmt
and not _atom_has_magic_trailing_comma(child, mode)
and not _is_atom_multiline(child)
):
if maybe_make_parens_invisible_in_atom(
child,
parent=node,
mode=mode,
features=features,
remove_brackets_around_comma=True,
allow_star_expr=True,
):
wrap_in_parentheses(node, child, visible=False)
if check_lpar:
if (
child.type == syms.atom
and node.type == syms.for_stmt
and isinstance(child.prev_sibling, Leaf)
and child.prev_sibling.type == token.NAME
and child.prev_sibling.value == "for"
):
if maybe_make_parens_invisible_in_atom(
child,
parent=node,
mode=mode,
features=features,
remove_brackets_around_comma=True,
):
wrap_in_parentheses(node, child, visible=False)
elif isinstance(child, Node) and node.type == syms.with_stmt:
remove_with_parens(child, node, mode=mode, features=features)
elif child.type == syms.atom and not (
"in" in parens_after
and len(child.children) == 3
and is_lpar_token(child.children[0])
and is_rpar_token(child.children[-1])
and child.children[1].type == syms.test
):
if maybe_make_parens_invisible_in_atom(
child, parent=node, mode=mode, features=features
):
wrap_in_parentheses(node, child, visible=False)
elif is_one_tuple(child):
wrap_in_parentheses(node, child, visible=True)
elif node.type == syms.import_from:
_normalize_import_from(node, child, index)
break
elif (
index == 1
and child.type == token.STAR
and node.type == syms.except_clause
):
# In except* (PEP 654), the star is actually part of
# of the keyword. So we need to skip the insertion of
# invisible parentheses to work more precisely.
continue
elif (
isinstance(child, Leaf)
and child.next_sibling is not None
and child.next_sibling.type == token.COLON
and child.value == "case"
):
# A special patch for "case case:" scenario, the second occurrence
# of case will be not parsed as a Python keyword.
break
elif not is_multiline_string(child):
wrap_in_parentheses(node, child, visible=False)
comma_check = child.type == token.COMMA
check_lpar = isinstance(child, Leaf) and (
child.value in parens_after or comma_check
)
def _normalize_import_from(parent: Node, child: LN, index: int) -> None:
# "import from" nodes store parentheses directly as part of
# the statement
if is_lpar_token(child):
assert is_rpar_token(parent.children[-1])
# make parentheses invisible
child.value = ""
parent.children[-1].value = ""
elif child.type != token.STAR:
# insert invisible parentheses
parent.insert_child(index, Leaf(token.LPAR, ""))
parent.append_child(Leaf(token.RPAR, ""))
def remove_await_parens(node: Node, mode: Mode, features: Collection[Feature]) -> None:
if node.children[0].type == token.AWAIT and len(node.children) > 1:
if (
node.children[1].type == syms.atom
and node.children[1].children[0].type == token.LPAR
):
if maybe_make_parens_invisible_in_atom(
node.children[1],
parent=node,
mode=mode,
features=features,
remove_brackets_around_comma=True,
):
wrap_in_parentheses(node, node.children[1], visible=False)
# Since await is an expression we shouldn't remove
# brackets in cases where this would change
# the AST due to operator precedence.
# Therefore we only aim to remove brackets around
# power nodes that aren't also await expressions themselves.
# https://peps.python.org/pep-0492/#updated-operator-precedence-table
# N.B. We've still removed any redundant nested brackets though :)
opening_bracket = cast(Leaf, node.children[1].children[0])
closing_bracket = cast(Leaf, node.children[1].children[-1])
bracket_contents = node.children[1].children[1]
if isinstance(bracket_contents, Node) and (
bracket_contents.type != syms.power
or bracket_contents.children[0].type == token.AWAIT
or any(
isinstance(child, Leaf) and child.type == token.DOUBLESTAR
for child in bracket_contents.children
)
):
ensure_visible(opening_bracket)
ensure_visible(closing_bracket)
def _maybe_wrap_cms_in_parens(
node: Node, mode: Mode, features: Collection[Feature]
) -> None:
"""When enabled and safe, wrap the multiple context managers in invisible parens.
It is only safe when `features` contain Feature.PARENTHESIZED_CONTEXT_MANAGERS.
"""
if (
Feature.PARENTHESIZED_CONTEXT_MANAGERS not in features
or len(node.children) <= 2
# If it's an atom, it's already wrapped in parens.
or node.children[1].type == syms.atom
):
return
colon_index: int | None = None
for i in range(2, len(node.children)):
if node.children[i].type == token.COLON:
colon_index = i
break
if colon_index is not None:
lpar = Leaf(token.LPAR, "")
rpar = Leaf(token.RPAR, "")
context_managers = node.children[1:colon_index]
for child in context_managers:
child.remove()
# After wrapping, the with_stmt will look like this:
# with_stmt
# NAME 'with'
# atom
# LPAR ''
# testlist_gexp
# ... <-- context_managers
# /testlist_gexp
# RPAR ''
# /atom
# COLON ':'
new_child = Node(
syms.atom, [lpar, Node(syms.testlist_gexp, context_managers), rpar]
)
node.insert_child(1, new_child)
def remove_with_parens(
node: Node, parent: Node, mode: Mode, features: Collection[Feature]
) -> None:
"""Recursively hide optional parens in `with` statements."""
# Removing all unnecessary parentheses in with statements in one pass is a tad
# complex as different variations of bracketed statements result in pretty
# different parse trees:
#
# with (open("file")) as f: # this is an asexpr_test
# ...
#
# with (open("file") as f): # this is an atom containing an
# ... # asexpr_test
#
# with (open("file")) as f, (open("file")) as f: # this is asexpr_test, COMMA,
# ... # asexpr_test
#
# with (open("file") as f, open("file") as f): # an atom containing a
# ... # testlist_gexp which then
# # contains multiple asexpr_test(s)
if node.type == syms.atom:
if maybe_make_parens_invisible_in_atom(
node,
parent=parent,
mode=mode,
features=features,
remove_brackets_around_comma=True,
):
wrap_in_parentheses(parent, node, visible=False)
if isinstance(node.children[1], Node):
remove_with_parens(node.children[1], node, mode=mode, features=features)
elif node.type == syms.testlist_gexp:
for child in node.children:
if isinstance(child, Node):
remove_with_parens(child, node, mode=mode, features=features)
elif node.type == syms.asexpr_test and not any(
leaf.type == token.COLONEQUAL for leaf in node.leaves()
):
if maybe_make_parens_invisible_in_atom(
node.children[0],
parent=node,
mode=mode,
features=features,
remove_brackets_around_comma=True,
):
wrap_in_parentheses(node, node.children[0], visible=False)
def _atom_has_magic_trailing_comma(node: LN, mode: Mode) -> bool:
"""Check if an atom node has a magic trailing comma.
Returns True for single-element tuples with trailing commas like (a,),
which should be preserved to maintain their tuple type.
"""
if not mode.magic_trailing_comma:
return False
return is_one_tuple(node)
def _is_atom_multiline(node: LN) -> bool:
"""Check if an atom node is multiline (indicating intentional formatting)."""
if not isinstance(node, Node) or len(node.children) < 3:
return False
# Check the middle child (between LPAR and RPAR) for newlines in its subtree
# The first child's prefix contains blank lines/comments before the opening paren
middle = node.children[1]
for child in middle.pre_order():
if isinstance(child, Leaf) and "\n" in child.prefix:
return True
return False
def maybe_make_parens_invisible_in_atom(
node: LN,
parent: LN,
mode: Mode,
features: Collection[Feature],
remove_brackets_around_comma: bool = False,
allow_star_expr: bool = False,
) -> bool:
"""If it's safe, make the parens in the atom `node` invisible, recursively.
Additionally, remove repeated, adjacent invisible parens from the atom `node`
as they are redundant.
Returns whether the node should itself be wrapped in invisible parentheses.
"""
if (
node.type not in (syms.atom, syms.expr)
or is_empty_tuple(node)
or is_one_tuple(node)
or (is_tuple(node) and parent.type == syms.asexpr_test)
or (
is_tuple(node)
and parent.type == syms.with_stmt
and has_sibling_with_type(node, token.COMMA)
)
or (is_yield(node) and parent.type != syms.expr_stmt)
or (
# This condition tries to prevent removing non-optional brackets
# around a tuple, however, can be a bit overzealous so we provide
# and option to skip this check for `for` and `with` statements.
not remove_brackets_around_comma
and max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
# Skip this check in Preview mode in order to
# Remove parentheses around multiple exception types in except and
# except* without as. See PEP 758 for details.
and not (
Preview.remove_parens_around_except_types in mode
and Feature.UNPARENTHESIZED_EXCEPT_TYPES in features
# is a tuple
and is_tuple(node)
# has a parent node
and node.parent is not None
# parent is an except clause
and node.parent.type == syms.except_clause
# is not immediately followed by as clause
and not (
node.next_sibling is not None
and is_name_token(node.next_sibling)
and node.next_sibling.value == "as"
)
)
)
or is_tuple_containing_walrus(node)
or (not allow_star_expr and is_tuple_containing_star(node))
or is_generator(node)
):
return False
if is_walrus_assignment(node):
if parent.type in [
syms.annassign,
syms.expr_stmt,
syms.assert_stmt,
syms.return_stmt,
syms.except_clause,
syms.funcdef,
syms.with_stmt,
syms.testlist_gexp,
syms.tname,
# these ones aren't useful to end users, but they do please fuzzers
syms.for_stmt,
syms.del_stmt,
syms.for_stmt,
]:
return False
first = node.children[0]
last = node.children[-1]
if is_lpar_token(first) and is_rpar_token(last):
middle = node.children[1]
# make parentheses invisible
if (
# If the prefix of `middle` includes a type comment with
# ignore annotation, then we do not remove the parentheses
not is_type_ignore_comment_string(middle.prefix.strip(), mode=mode)
):
first.value = ""
last.value = ""
maybe_make_parens_invisible_in_atom(
middle,
parent=parent,
mode=mode,
features=features,
remove_brackets_around_comma=remove_brackets_around_comma,
)
if is_atom_with_invisible_parens(middle):
# Strip the invisible parens from `middle` by replacing
# it with the child in-between the invisible parens
middle.replace(middle.children[1])
if middle.children[0].prefix.strip():
# Preserve comments before first paren
middle.children[1].prefix = (
middle.children[0].prefix + middle.children[1].prefix
)
if middle.children[-1].prefix.strip():
# Preserve comments before last paren
last.prefix = middle.children[-1].prefix + last.prefix
return False
return True
def should_split_line(line: Line, opening_bracket: Leaf) -> bool:
"""Should `line` be immediately split with `delimiter_split()` after RHS?"""
if not (opening_bracket.parent and opening_bracket.value in "[{("):
return False
# We're essentially checking if the body is delimited by commas and there's more
# than one of them (we're excluding the trailing comma and if the delimiter priority
# is still commas, that means there's more).
exclude = set()
trailing_comma = False
try:
last_leaf = line.leaves[-1]
if last_leaf.type == token.COMMA:
trailing_comma = True
exclude.add(id(last_leaf))
max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
except (IndexError, ValueError):
return False
return max_priority == COMMA_PRIORITY and (
(line.mode.magic_trailing_comma and trailing_comma)
# always explode imports
or opening_bracket.parent.type in {syms.atom, syms.import_from}
)
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[set[LeafID]]:
"""Generate sets of closing bracket IDs that should be omitted in a RHS.
Brackets can be omitted if the entire trailer up to and including
a preceding closing bracket fits in one line.
Yielded sets are cumulative (contain results of previous yields, too). First
set is empty, unless the line should explode, in which case bracket pairs until
the one that needs to explode are omitted.
"""
omit: set[LeafID] = set()
if not line.magic_trailing_comma:
yield omit
length = 4 * line.depth
opening_bracket: Leaf | None = None
closing_bracket: Leaf | None = None
inner_brackets: set[LeafID] = set()
for index, leaf, leaf_length in line.enumerate_with_length(is_reversed=True):
length += leaf_length
if length > line_length:
break
has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix)
if leaf.type == STANDALONE_COMMENT or has_inline_comment:
break
if opening_bracket:
if leaf is opening_bracket:
opening_bracket = None
elif leaf.type in CLOSING_BRACKETS:
prev = line.leaves[index - 1] if index > 0 else None
if (
prev
and prev.type == token.COMMA
and leaf.opening_bracket is not None
and not is_one_sequence_between(
leaf.opening_bracket, leaf, line.leaves
)
):
# Never omit bracket pairs with trailing commas.
# We need to explode on those.
break
inner_brackets.add(id(leaf))
elif leaf.type in CLOSING_BRACKETS:
prev = line.leaves[index - 1] if index > 0 else None
if prev and prev.type in OPENING_BRACKETS:
# Empty brackets would fail a split so treat them as "inner"
# brackets (e.g. only add them to the `omit` set if another
# pair of brackets was good enough.
inner_brackets.add(id(leaf))
continue
if closing_bracket:
omit.add(id(closing_bracket))
omit.update(inner_brackets)
inner_brackets.clear()
yield omit
if (
prev
and prev.type == token.COMMA
and leaf.opening_bracket is not None
and not is_one_sequence_between(leaf.opening_bracket, leaf, line.leaves)
):
# Never omit bracket pairs with trailing commas.
# We need to explode on those.
break
if leaf.value:
opening_bracket = leaf.opening_bracket
closing_bracket = leaf
def run_transformer(
line: Line,
transform: Transformer,
mode: Mode,
features: Collection[Feature],
*,
line_str: str = "",
) -> list[Line]:
if not line_str:
line_str = line_to_string(line)
result: list[Line] = []
for transformed_line in transform(line, features, mode):
if str(transformed_line).strip("\n") == line_str:
raise CannotTransform("Line transformer returned an unchanged result")
result.extend(transform_line(transformed_line, mode=mode, features=features))
features_set = set(features)
if (
Feature.FORCE_OPTIONAL_PARENTHESES in features_set
or transform.__class__.__name__ != "rhs"
or not line.bracket_tracker.invisible
or any(bracket.value for bracket in line.bracket_tracker.invisible)
or line.contains_multiline_strings()
or result[0].contains_uncollapsable_type_comments()
or result[0].contains_unsplittable_type_ignore()
or is_line_short_enough(result[0], mode=mode)
# If any leaves have no parents (which _can_ occur since
# `transform(line)` potentially destroys the line's underlying node
# structure), then we can't proceed. Doing so would cause the below
# call to `append_leaves()` to fail.
or any(leaf.parent is None for leaf in line.leaves)
):
return result
line_copy = line.clone()
append_leaves(line_copy, line, line.leaves)
features_fop = features_set | {Feature.FORCE_OPTIONAL_PARENTHESES}
second_opinion = run_transformer(
line_copy, transform, mode, features_fop, line_str=line_str
)
if all(is_line_short_enough(ln, mode=mode) for ln in second_opinion):
result = second_opinion
return result
| _BracketSplitComponent |
python | celery__celery | t/smoke/workers/alt.py | {
"start": 251,
"end": 1671
} | class ____(SmokeWorkerContainer):
"""Alternative worker with different name, but same configurations."""
@classmethod
def worker_name(cls) -> str:
return "alt_smoke_tests_worker"
# Build the image like the dev worker
celery_alt_dev_worker_image = build(
path=".",
dockerfile="t/smoke/workers/docker/dev",
tag="t/smoke/worker:alt",
buildargs=AltSmokeWorkerContainer.buildargs(),
)
# Define container settings like the dev worker
alt_dev_worker_container = container(
image="{celery_alt_dev_worker_image.id}",
environment=fxtr("default_worker_env"),
network="{default_pytest_celery_network.name}",
volumes={
# Volume: Worker /app
"{default_worker_volume.name}": defaults.DEFAULT_WORKER_VOLUME,
# Mount: Celery source
os.path.abspath(os.getcwd()): {
"bind": "/celery",
"mode": "rw",
},
},
wrapper_class=AltSmokeWorkerContainer,
timeout=defaults.DEFAULT_WORKER_CONTAINER_TIMEOUT,
command=AltSmokeWorkerContainer.command(),
)
@pytest.fixture
def celery_alt_dev_worker(
alt_dev_worker_container: AltSmokeWorkerContainer,
celery_setup_app: Celery,
) -> CeleryTestWorker:
"""Creates a pytest-celery worker node from the worker container."""
worker = CeleryTestWorker(alt_dev_worker_container, app=celery_setup_app)
yield worker
worker.teardown()
| AltSmokeWorkerContainer |
python | gevent__gevent | src/greentest/3.13/test_selectors.py | {
"start": 15395,
"end": 17552
} | class ____:
# see issue #18963 for why it's skipped on older OS X versions
@support.requires_mac_ver(10, 5)
@unittest.skipUnless(resource, "Test needs resource module")
@support.requires_resource('cpu')
def test_above_fd_setsize(self):
# A scalable implementation should have no problem with more than
# FD_SETSIZE file descriptors. Since we don't know the value, we just
# try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling.
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE,
(soft, hard))
NUM_FDS = min(hard, 2**16)
except (OSError, ValueError):
NUM_FDS = soft
# guard for already allocated FDs (stdin, stdout...)
NUM_FDS -= 32
s = self.SELECTOR()
self.addCleanup(s.close)
for i in range(NUM_FDS // 2):
try:
rd, wr = self.make_socketpair()
except OSError:
# too many FDs, skip - note that we should only catch EMFILE
# here, but apparently *BSD and Solaris can fail upon connect()
# or bind() with EADDRNOTAVAIL, so let's be safe
self.skipTest("FD limit reached")
try:
s.register(rd, selectors.EVENT_READ)
s.register(wr, selectors.EVENT_WRITE)
except OSError as e:
if e.errno == errno.ENOSPC:
# this can be raised by epoll if we go over
# fs.epoll.max_user_watches sysctl
self.skipTest("FD limit reached")
raise
try:
fds = s.select()
except OSError as e:
if e.errno == errno.EINVAL and is_apple:
# unexplainable errors on macOS don't need to fail the test
self.skipTest("Invalid argument error calling poll()")
raise
self.assertEqual(NUM_FDS // 2, len(fds))
| ScalableSelectorMixIn |
python | oauthlib__oauthlib | oauthlib/oauth2/rfc6749/grant_types/client_credentials.py | {
"start": 200,
"end": 5051
} | class ____(GrantTypeBase):
"""`Client Credentials Grant`_
The client can request an access token using only its client
credentials (or other supported means of authentication) when the
client is requesting access to the protected resources under its
control, or those of another resource owner that have been previously
arranged with the authorization server (the method of which is beyond
the scope of this specification).
The client credentials grant type MUST only be used by confidential
clients::
+---------+ +---------------+
: : : :
: :>-- A - Client Authentication --->: Authorization :
: Client : : Server :
: :<-- B ---- Access Token ---------<: :
: : : :
+---------+ +---------------+
Figure 6: Client Credentials Flow
The flow illustrated in Figure 6 includes the following steps:
(A) The client authenticates with the authorization server and
requests an access token from the token endpoint.
(B) The authorization server authenticates the client, and if valid,
issues an access token.
.. _`Client Credentials Grant`: https://tools.ietf.org/html/rfc6749#section-4.4
"""
def create_token_response(self, request, token_handler):
"""Return token or error in JSON format.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
If the access token request is valid and authorized, the
authorization server issues an access token as described in
`Section 5.1`_. A refresh token SHOULD NOT be included. If the request
failed client authentication or is invalid, the authorization server
returns an error response as described in `Section 5.2`_.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
"""
headers = self._get_default_headers()
try:
log.debug('Validating access token request, %r.', request)
self.validate_token_request(request)
except errors.OAuth2Error as e:
log.debug('Client error in token request. %s.', e)
headers.update(e.headers)
return headers, e.json, e.status_code
token = token_handler.create_token(request, refresh_token=False)
for modifier in self._token_modifiers:
token = modifier(token)
self.request_validator.save_token(token, request)
log.debug('Issuing token to client id %r (%r), %r.',
request.client_id, request.client, token)
return headers, json.dumps(token), 200
def validate_token_request(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
for validator in self.custom_validators.pre_token:
validator(request)
if not getattr(request, 'grant_type', None):
raise errors.InvalidRequestError('Request is missing grant type.',
request=request)
if not request.grant_type == 'client_credentials':
raise errors.UnsupportedGrantTypeError(request=request)
for param in ('grant_type', 'scope'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param,
request=request)
log.debug('Authenticating client, %r.', request)
if not self.request_validator.authenticate_client(request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
elif not hasattr(request.client, 'client_id'):
raise NotImplementedError('Authenticate client must set the '
'request.client.client_id attribute '
'in authenticate_client.')
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
request.client_id = request.client_id or request.client.client_id
log.debug('Authorizing access to client %r.', request.client_id)
self.validate_scopes(request)
for validator in self.custom_validators.post_token:
validator(request)
| ClientCredentialsGrant |
python | jazzband__django-formtools | tests/wizard/namedwizardtests/tests.py | {
"start": 15886,
"end": 16102
} | class ____(NamedFormTests, TestCase):
formwizard_class = TestNamedUrlSessionWizardView
wizard_urlname = 'nwiz_session'
@override_settings(ROOT_URLCONF='tests.wizard.namedwizardtests.urls')
| NamedSessionFormTests |
python | ApeWorX__ape | tests/functional/test_test.py | {
"start": 11344,
"end": 14385
} | class ____:
def test_from_test_item(self, item):
actual = FixtureMap.from_test_item(item)
assert actual[Scope.SESSION] == ["foo"]
assert actual[Scope.MODULE] == ["bar"]
assert actual[Scope.CLASS] == ["baz"]
def test_names(self, fixture_map):
"""
Show that we have both the initialized fixtures as well
as the properly injected isolation fixtures. Order is
EXTREMELY important here! It determines the order in which
fixtures run; isolation should run before their sister fixtures.
Function isolation is expected even when not using other function-scoped
fixtures. Package isolation is missing because there are no
package-scoped fixtures being used.
"""
actual = fixture_map.names
expected = [
"_session_isolation",
"foo",
"_module_isolation",
"bar",
"_class_isolation",
"baz",
"_function_isolation",
]
assert actual == expected
def test_parameters(self, fixture_map):
actual = fixture_map.parameters
expected = ["param0", "param1"]
assert actual == expected
def test_isolation(self, fixture_map):
actual = fixture_map.isolation
expected = [
"session",
"module",
"class",
"function",
]
assert actual == expected
def test_parametrized(self, fixture_map):
actual = fixture_map.parametrized
assert "foo" in actual
assert len(actual) == 1
def test_get_info(self, fixture_map):
actual = fixture_map.get_info("foo")
assert len(actual) == 1
assert actual[0].argname == "foo"
assert actual[0].scope == Scope.SESSION
def test_is_known(self, fixture_map):
assert fixture_map.is_known("foo")
assert not fixture_map.is_known("param0")
def test_is_iterating(self, fixture_map):
assert fixture_map.is_iterating("foo")
assert not fixture_map.is_iterating("baz")
# Iterate.
fixture_map._item.session._fixturemanager._arg2fixturedefs["foo"][0].cached_result = (
None,
1,
None,
)
assert fixture_map.is_iterating("foo")
# Complete.
fixture_map._item.session._fixturemanager._arg2fixturedefs["foo"][0].cached_result = (
None,
3,
None,
)
assert not fixture_map.is_iterating("foo")
def test_apply_fixturenames(self, fixture_map):
assert fixture_map._item.fixturenames == ["foo", "bar", "baz", "param0", "param1"]
fixture_map.apply_fixturenames()
assert fixture_map._item.fixturenames == [
"_session_isolation",
"foo",
"_module_isolation",
"bar",
"_class_isolation",
"baz",
"_function_isolation",
"param0",
"param1",
]
| TestFixtureMap |
python | doocs__leetcode | solution/3100-3199/3117.Minimum Sum of Values by Dividing Array/Solution.py | {
"start": 0,
"end": 631
} | class ____:
def minimumValueSum(self, nums: List[int], andValues: List[int]) -> int:
@cache
def dfs(i: int, j: int, a: int) -> int:
if n - i < m - j:
return inf
if j == m:
return 0 if i == n else inf
a &= nums[i]
if a < andValues[j]:
return inf
ans = dfs(i + 1, j, a)
if a == andValues[j]:
ans = min(ans, dfs(i + 1, j + 1, -1) + nums[i])
return ans
n, m = len(nums), len(andValues)
ans = dfs(0, 0, -1)
return ans if ans < inf else -1
| Solution |
python | getsentry__sentry-python | sentry_sdk/client.py | {
"start": 8026,
"end": 42519
} | class ____(BaseClient):
"""
The client is internally responsible for capturing the events and
forwarding them to sentry through the configured transport. It takes
the client options as keyword arguments and optionally the DSN as first
argument.
Alias of :py:class:`sentry_sdk.Client`. (Was created for better intelisense support)
"""
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
super(_Client, self).__init__(options=get_options(*args, **kwargs))
self._init_impl()
def __getstate__(self):
# type: () -> Any
return {"options": self.options}
def __setstate__(self, state):
# type: (Any) -> None
self.options = state["options"]
self._init_impl()
def _setup_instrumentation(self, functions_to_trace):
# type: (Sequence[Dict[str, str]]) -> None
"""
Instruments the functions given in the list `functions_to_trace` with the `@sentry_sdk.tracing.trace` decorator.
"""
for function in functions_to_trace:
class_name = None
function_qualname = function["qualified_name"]
module_name, function_name = function_qualname.rsplit(".", 1)
try:
# Try to import module and function
# ex: "mymodule.submodule.funcname"
module_obj = import_module(module_name)
function_obj = getattr(module_obj, function_name)
setattr(module_obj, function_name, trace(function_obj))
logger.debug("Enabled tracing for %s", function_qualname)
except module_not_found_error:
try:
# Try to import a class
# ex: "mymodule.submodule.MyClassName.member_function"
module_name, class_name = module_name.rsplit(".", 1)
module_obj = import_module(module_name)
class_obj = getattr(module_obj, class_name)
function_obj = getattr(class_obj, function_name)
function_type = type(class_obj.__dict__[function_name])
traced_function = trace(function_obj)
if function_type in (staticmethod, classmethod):
traced_function = staticmethod(traced_function)
setattr(class_obj, function_name, traced_function)
setattr(module_obj, class_name, class_obj)
logger.debug("Enabled tracing for %s", function_qualname)
except Exception as e:
logger.warning(
"Can not enable tracing for '%s'. (%s) Please check your `functions_to_trace` parameter.",
function_qualname,
e,
)
except Exception as e:
logger.warning(
"Can not enable tracing for '%s'. (%s) Please check your `functions_to_trace` parameter.",
function_qualname,
e,
)
def _init_impl(self):
# type: () -> None
old_debug = _client_init_debug.get(False)
def _capture_envelope(envelope):
# type: (Envelope) -> None
if self.transport is not None:
self.transport.capture_envelope(envelope)
def _record_lost_event(
reason, # type: str
data_category, # type: EventDataCategory
item=None, # type: Optional[Item]
quantity=1, # type: int
):
# type: (...) -> None
if self.transport is not None:
self.transport.record_lost_event(
reason=reason,
data_category=data_category,
item=item,
quantity=quantity,
)
try:
_client_init_debug.set(self.options["debug"])
self.transport = make_transport(self.options)
self.monitor = None
if self.transport:
if self.options["enable_backpressure_handling"]:
self.monitor = Monitor(self.transport)
self.session_flusher = SessionFlusher(capture_func=_capture_envelope)
self.log_batcher = None
if has_logs_enabled(self.options):
from sentry_sdk._log_batcher import LogBatcher
self.log_batcher = LogBatcher(
capture_func=_capture_envelope,
record_lost_func=_record_lost_event,
)
self.metrics_batcher = None
if has_metrics_enabled(self.options):
self.metrics_batcher = MetricsBatcher(
capture_func=_capture_envelope,
record_lost_func=_record_lost_event,
)
max_request_body_size = ("always", "never", "small", "medium")
if self.options["max_request_body_size"] not in max_request_body_size:
raise ValueError(
"Invalid value for max_request_body_size. Must be one of {}".format(
max_request_body_size
)
)
if self.options["_experiments"].get("otel_powered_performance", False):
logger.debug(
"[OTel] Enabling experimental OTel-powered performance monitoring."
)
self.options["instrumenter"] = INSTRUMENTER.OTEL
if (
"sentry_sdk.integrations.opentelemetry.integration.OpenTelemetryIntegration"
not in _DEFAULT_INTEGRATIONS
):
_DEFAULT_INTEGRATIONS.append(
"sentry_sdk.integrations.opentelemetry.integration.OpenTelemetryIntegration",
)
self.integrations = setup_integrations(
self.options["integrations"],
with_defaults=self.options["default_integrations"],
with_auto_enabling_integrations=self.options[
"auto_enabling_integrations"
],
disabled_integrations=self.options["disabled_integrations"],
options=self.options,
)
spotlight_config = self.options.get("spotlight")
if spotlight_config is None and "SENTRY_SPOTLIGHT" in os.environ:
spotlight_env_value = os.environ["SENTRY_SPOTLIGHT"]
spotlight_config = env_to_bool(spotlight_env_value, strict=True)
self.options["spotlight"] = (
spotlight_config
if spotlight_config is not None
else spotlight_env_value
)
if self.options.get("spotlight"):
# This is intentionally here to prevent setting up spotlight
# stuff we don't need unless spotlight is explicitly enabled
from sentry_sdk.spotlight import setup_spotlight
self.spotlight = setup_spotlight(self.options)
if not self.options["dsn"]:
sample_all = lambda *_args, **_kwargs: 1.0
self.options["send_default_pii"] = True
self.options["error_sampler"] = sample_all
self.options["traces_sampler"] = sample_all
self.options["profiles_sampler"] = sample_all
sdk_name = get_sdk_name(list(self.integrations.keys()))
SDK_INFO["name"] = sdk_name
logger.debug("Setting SDK name to '%s'", sdk_name)
if has_profiling_enabled(self.options):
try:
setup_profiler(self.options)
except Exception as e:
logger.debug("Can not set up profiler. (%s)", e)
else:
try:
setup_continuous_profiler(
self.options,
sdk_info=SDK_INFO,
capture_func=_capture_envelope,
)
except Exception as e:
logger.debug("Can not set up continuous profiler. (%s)", e)
finally:
_client_init_debug.set(old_debug)
self._setup_instrumentation(self.options.get("functions_to_trace", []))
if (
self.monitor
or self.log_batcher
or has_profiling_enabled(self.options)
or isinstance(self.transport, BaseHttpTransport)
):
# If we have anything on that could spawn a background thread, we
# need to check if it's safe to use them.
check_uwsgi_thread_support()
def is_active(self):
# type: () -> bool
"""
.. versionadded:: 2.0.0
Returns whether the client is active (able to send data to Sentry)
"""
return True
def should_send_default_pii(self):
# type: () -> bool
"""
.. versionadded:: 2.0.0
Returns whether the client should send default PII (Personally Identifiable Information) data to Sentry.
"""
return self.options.get("send_default_pii") or False
@property
def dsn(self):
# type: () -> Optional[str]
"""Returns the configured DSN as string."""
return self.options["dsn"]
@property
def parsed_dsn(self):
# type: () -> Optional[Dsn]
"""Returns the configured parsed DSN object."""
return self.transport.parsed_dsn if self.transport else None
def _prepare_event(
self,
event, # type: Event
hint, # type: Hint
scope, # type: Optional[Scope]
):
# type: (...) -> Optional[Event]
previous_total_spans = None # type: Optional[int]
previous_total_breadcrumbs = None # type: Optional[int]
if event.get("timestamp") is None:
event["timestamp"] = datetime.now(timezone.utc)
is_transaction = event.get("type") == "transaction"
if scope is not None:
spans_before = len(cast(List[Dict[str, object]], event.get("spans", [])))
event_ = scope.apply_to_event(event, hint, self.options)
# one of the event/error processors returned None
if event_ is None:
if self.transport:
self.transport.record_lost_event(
"event_processor",
data_category=("transaction" if is_transaction else "error"),
)
if is_transaction:
self.transport.record_lost_event(
"event_processor",
data_category="span",
quantity=spans_before + 1, # +1 for the transaction itself
)
return None
event = event_
spans_delta = spans_before - len(
cast(List[Dict[str, object]], event.get("spans", []))
)
if is_transaction and spans_delta > 0 and self.transport is not None:
self.transport.record_lost_event(
"event_processor", data_category="span", quantity=spans_delta
)
dropped_spans = event.pop("_dropped_spans", 0) + spans_delta # type: int
if dropped_spans > 0:
previous_total_spans = spans_before + dropped_spans
if scope._n_breadcrumbs_truncated > 0:
breadcrumbs = event.get("breadcrumbs", {})
values = (
breadcrumbs.get("values", [])
if not isinstance(breadcrumbs, AnnotatedValue)
else []
)
previous_total_breadcrumbs = (
len(values) + scope._n_breadcrumbs_truncated
)
if (
not is_transaction
and self.options["attach_stacktrace"]
and "exception" not in event
and "stacktrace" not in event
and "threads" not in event
):
with capture_internal_exceptions():
event["threads"] = {
"values": [
{
"stacktrace": current_stacktrace(
include_local_variables=self.options.get(
"include_local_variables", True
),
max_value_length=self.options.get(
"max_value_length", DEFAULT_MAX_VALUE_LENGTH
),
),
"crashed": False,
"current": True,
}
]
}
for key in "release", "environment", "server_name", "dist":
if event.get(key) is None and self.options[key] is not None:
event[key] = str(self.options[key]).strip()
if event.get("sdk") is None:
sdk_info = dict(SDK_INFO)
sdk_info["integrations"] = sorted(self.integrations.keys())
event["sdk"] = sdk_info
if event.get("platform") is None:
event["platform"] = "python"
event = handle_in_app(
event,
self.options["in_app_exclude"],
self.options["in_app_include"],
self.options["project_root"],
)
if event is not None:
event_scrubber = self.options["event_scrubber"]
if event_scrubber:
event_scrubber.scrub_event(event)
if scope is not None and scope._gen_ai_original_message_count:
spans = event.get("spans", []) # type: List[Dict[str, Any]] | AnnotatedValue
if isinstance(spans, list):
for span in spans:
span_id = span.get("span_id", None)
span_data = span.get("data", {})
if (
span_id
and span_id in scope._gen_ai_original_message_count
and SPANDATA.GEN_AI_REQUEST_MESSAGES in span_data
):
span_data[SPANDATA.GEN_AI_REQUEST_MESSAGES] = AnnotatedValue(
span_data[SPANDATA.GEN_AI_REQUEST_MESSAGES],
{"len": scope._gen_ai_original_message_count[span_id]},
)
if previous_total_spans is not None:
event["spans"] = AnnotatedValue(
event.get("spans", []), {"len": previous_total_spans}
)
if previous_total_breadcrumbs is not None:
event["breadcrumbs"] = AnnotatedValue(
event.get("breadcrumbs", {"values": []}),
{"len": previous_total_breadcrumbs},
)
# Postprocess the event here so that annotated types do
# generally not surface in before_send
if event is not None:
event = cast(
"Event",
serialize(
cast("Dict[str, Any]", event),
max_request_body_size=self.options.get("max_request_body_size"),
max_value_length=self.options.get("max_value_length"),
custom_repr=self.options.get("custom_repr"),
),
)
before_send = self.options["before_send"]
if (
before_send is not None
and event is not None
and event.get("type") != "transaction"
):
new_event = None
with capture_internal_exceptions():
new_event = before_send(event, hint or {})
if new_event is None:
logger.info("before send dropped event")
if self.transport:
self.transport.record_lost_event(
"before_send", data_category="error"
)
# If this is an exception, reset the DedupeIntegration. It still
# remembers the dropped exception as the last exception, meaning
# that if the same exception happens again and is not dropped
# in before_send, it'd get dropped by DedupeIntegration.
if event.get("exception"):
DedupeIntegration.reset_last_seen()
event = new_event
before_send_transaction = self.options["before_send_transaction"]
if (
before_send_transaction is not None
and event is not None
and event.get("type") == "transaction"
):
new_event = None
spans_before = len(cast(List[Dict[str, object]], event.get("spans", [])))
with capture_internal_exceptions():
new_event = before_send_transaction(event, hint or {})
if new_event is None:
logger.info("before send transaction dropped event")
if self.transport:
self.transport.record_lost_event(
reason="before_send", data_category="transaction"
)
self.transport.record_lost_event(
reason="before_send",
data_category="span",
quantity=spans_before + 1, # +1 for the transaction itself
)
else:
spans_delta = spans_before - len(new_event.get("spans", []))
if spans_delta > 0 and self.transport is not None:
self.transport.record_lost_event(
reason="before_send", data_category="span", quantity=spans_delta
)
event = new_event
return event
def _is_ignored_error(self, event, hint):
# type: (Event, Hint) -> bool
exc_info = hint.get("exc_info")
if exc_info is None:
return False
error = exc_info[0]
error_type_name = get_type_name(exc_info[0])
error_full_name = "%s.%s" % (exc_info[0].__module__, error_type_name)
for ignored_error in self.options["ignore_errors"]:
# String types are matched against the type name in the
# exception only
if isinstance(ignored_error, str):
if ignored_error == error_full_name or ignored_error == error_type_name:
return True
else:
if issubclass(error, ignored_error):
return True
return False
def _should_capture(
self,
event, # type: Event
hint, # type: Hint
scope=None, # type: Optional[Scope]
):
# type: (...) -> bool
# Transactions are sampled independent of error events.
is_transaction = event.get("type") == "transaction"
if is_transaction:
return True
ignoring_prevents_recursion = scope is not None and not scope._should_capture
if ignoring_prevents_recursion:
return False
ignored_by_config_option = self._is_ignored_error(event, hint)
if ignored_by_config_option:
return False
return True
def _should_sample_error(
self,
event, # type: Event
hint, # type: Hint
):
# type: (...) -> bool
error_sampler = self.options.get("error_sampler", None)
if callable(error_sampler):
with capture_internal_exceptions():
sample_rate = error_sampler(event, hint)
else:
sample_rate = self.options["sample_rate"]
try:
not_in_sample_rate = sample_rate < 1.0 and random.random() >= sample_rate
except NameError:
logger.warning(
"The provided error_sampler raised an error. Defaulting to sampling the event."
)
# If the error_sampler raised an error, we should sample the event, since the default behavior
# (when no sample_rate or error_sampler is provided) is to sample all events.
not_in_sample_rate = False
except TypeError:
parameter, verb = (
("error_sampler", "returned")
if callable(error_sampler)
else ("sample_rate", "contains")
)
logger.warning(
"The provided %s %s an invalid value of %s. The value should be a float or a bool. Defaulting to sampling the event."
% (parameter, verb, repr(sample_rate))
)
# If the sample_rate has an invalid value, we should sample the event, since the default behavior
# (when no sample_rate or error_sampler is provided) is to sample all events.
not_in_sample_rate = False
if not_in_sample_rate:
# because we will not sample this event, record a "lost event".
if self.transport:
self.transport.record_lost_event("sample_rate", data_category="error")
return False
return True
def _update_session_from_event(
self,
session, # type: Session
event, # type: Event
):
# type: (...) -> None
crashed = False
errored = False
user_agent = None
exceptions = (event.get("exception") or {}).get("values")
if exceptions:
errored = True
for error in exceptions:
if isinstance(error, AnnotatedValue):
error = error.value or {}
mechanism = error.get("mechanism")
if isinstance(mechanism, Mapping) and mechanism.get("handled") is False:
crashed = True
break
user = event.get("user")
if session.user_agent is None:
headers = (event.get("request") or {}).get("headers")
headers_dict = headers if isinstance(headers, dict) else {}
for k, v in headers_dict.items():
if k.lower() == "user-agent":
user_agent = v
break
session.update(
status="crashed" if crashed else None,
user=user,
user_agent=user_agent,
errors=session.errors + (errored or crashed),
)
def capture_event(
self,
event, # type: Event
hint=None, # type: Optional[Hint]
scope=None, # type: Optional[Scope]
):
# type: (...) -> Optional[str]
"""Captures an event.
:param event: A ready-made event that can be directly sent to Sentry.
:param hint: Contains metadata about the event that can be read from `before_send`, such as the original exception object or a HTTP request object.
:param scope: An optional :py:class:`sentry_sdk.Scope` to apply to events.
:returns: An event ID. May be `None` if there is no DSN set or of if the SDK decided to discard the event for other reasons. In such situations setting `debug=True` on `init()` may help.
"""
hint = dict(hint or ()) # type: Hint
if not self._should_capture(event, hint, scope):
return None
profile = event.pop("profile", None)
event_id = event.get("event_id")
if event_id is None:
event["event_id"] = event_id = uuid.uuid4().hex
event_opt = self._prepare_event(event, hint, scope)
if event_opt is None:
return None
# whenever we capture an event we also check if the session needs
# to be updated based on that information.
session = scope._session if scope else None
if session:
self._update_session_from_event(session, event)
is_transaction = event_opt.get("type") == "transaction"
is_checkin = event_opt.get("type") == "check_in"
if (
not is_transaction
and not is_checkin
and not self._should_sample_error(event, hint)
):
return None
attachments = hint.get("attachments")
trace_context = event_opt.get("contexts", {}).get("trace") or {}
dynamic_sampling_context = trace_context.pop("dynamic_sampling_context", {})
headers = {
"event_id": event_opt["event_id"],
"sent_at": format_timestamp(datetime.now(timezone.utc)),
} # type: dict[str, object]
if dynamic_sampling_context:
headers["trace"] = dynamic_sampling_context
envelope = Envelope(headers=headers)
if is_transaction:
if isinstance(profile, Profile):
envelope.add_profile(profile.to_json(event_opt, self.options))
envelope.add_transaction(event_opt)
elif is_checkin:
envelope.add_checkin(event_opt)
else:
envelope.add_event(event_opt)
for attachment in attachments or ():
envelope.add_item(attachment.to_envelope_item())
return_value = None
if self.spotlight:
self.spotlight.capture_envelope(envelope)
return_value = event_id
if self.transport is not None:
self.transport.capture_envelope(envelope)
return_value = event_id
return return_value
def _capture_log(self, log):
# type: (Optional[Log]) -> None
if not has_logs_enabled(self.options) or log is None:
return
current_scope = sentry_sdk.get_current_scope()
isolation_scope = sentry_sdk.get_isolation_scope()
log["attributes"]["sentry.sdk.name"] = SDK_INFO["name"]
log["attributes"]["sentry.sdk.version"] = SDK_INFO["version"]
server_name = self.options.get("server_name")
if server_name is not None and SPANDATA.SERVER_ADDRESS not in log["attributes"]:
log["attributes"][SPANDATA.SERVER_ADDRESS] = server_name
environment = self.options.get("environment")
if environment is not None and "sentry.environment" not in log["attributes"]:
log["attributes"]["sentry.environment"] = environment
release = self.options.get("release")
if release is not None and "sentry.release" not in log["attributes"]:
log["attributes"]["sentry.release"] = release
trace_context = current_scope.get_trace_context()
trace_id = trace_context.get("trace_id")
span_id = trace_context.get("span_id")
if trace_id is not None and log.get("trace_id") is None:
log["trace_id"] = trace_id
if (
span_id is not None
and "sentry.trace.parent_span_id" not in log["attributes"]
):
log["attributes"]["sentry.trace.parent_span_id"] = span_id
# The user, if present, is always set on the isolation scope.
if isolation_scope._user is not None:
for log_attribute, user_attribute in (
("user.id", "id"),
("user.name", "username"),
("user.email", "email"),
):
if (
user_attribute in isolation_scope._user
and log_attribute not in log["attributes"]
):
log["attributes"][log_attribute] = isolation_scope._user[
user_attribute
]
# If debug is enabled, log the log to the console
debug = self.options.get("debug", False)
if debug:
logger.debug(
f"[Sentry Logs] [{log.get('severity_text')}] {log.get('body')}"
)
before_send_log = get_before_send_log(self.options)
if before_send_log is not None:
log = before_send_log(log, {})
if log is None:
return
if self.log_batcher:
self.log_batcher.add(log)
def _capture_metric(self, metric):
# type: (Optional[Metric]) -> None
if not has_metrics_enabled(self.options) or metric is None:
return
current_scope = sentry_sdk.get_current_scope()
isolation_scope = sentry_sdk.get_isolation_scope()
metric["attributes"]["sentry.sdk.name"] = SDK_INFO["name"]
metric["attributes"]["sentry.sdk.version"] = SDK_INFO["version"]
server_name = self.options.get("server_name")
if (
server_name is not None
and SPANDATA.SERVER_ADDRESS not in metric["attributes"]
):
metric["attributes"][SPANDATA.SERVER_ADDRESS] = server_name
environment = self.options.get("environment")
if environment is not None and "sentry.environment" not in metric["attributes"]:
metric["attributes"]["sentry.environment"] = environment
release = self.options.get("release")
if release is not None and "sentry.release" not in metric["attributes"]:
metric["attributes"]["sentry.release"] = release
trace_context = current_scope.get_trace_context()
trace_id = trace_context.get("trace_id")
span_id = trace_context.get("span_id")
metric["trace_id"] = trace_id or "00000000-0000-0000-0000-000000000000"
if span_id is not None:
metric["span_id"] = span_id
if isolation_scope._user is not None:
for metric_attribute, user_attribute in (
("user.id", "id"),
("user.name", "username"),
("user.email", "email"),
):
if (
user_attribute in isolation_scope._user
and metric_attribute not in metric["attributes"]
):
metric["attributes"][metric_attribute] = isolation_scope._user[
user_attribute
]
debug = self.options.get("debug", False)
if debug:
logger.debug(
f"[Sentry Metrics] [{metric.get('type')}] {metric.get('name')}: {metric.get('value')}"
)
before_send_metric = get_before_send_metric(self.options)
if before_send_metric is not None:
metric = before_send_metric(metric, {})
if metric is None:
return
if self.metrics_batcher:
self.metrics_batcher.add(metric)
def capture_session(
self,
session, # type: Session
):
# type: (...) -> None
if not session.release:
logger.info("Discarded session update because of missing release")
else:
self.session_flusher.add_session(session)
if TYPE_CHECKING:
@overload
def get_integration(self, name_or_class):
# type: (str) -> Optional[Integration]
...
@overload
def get_integration(self, name_or_class):
# type: (type[I]) -> Optional[I]
...
def get_integration(
self,
name_or_class, # type: Union[str, Type[Integration]]
):
# type: (...) -> Optional[Integration]
"""Returns the integration for this client by name or class.
If the client does not have that integration then `None` is returned.
"""
if isinstance(name_or_class, str):
integration_name = name_or_class
elif name_or_class.identifier is not None:
integration_name = name_or_class.identifier
else:
raise ValueError("Integration has no name")
return self.integrations.get(integration_name)
def close(
self,
timeout=None, # type: Optional[float]
callback=None, # type: Optional[Callable[[int, float], None]]
):
# type: (...) -> None
"""
Close the client and shut down the transport. Arguments have the same
semantics as :py:meth:`Client.flush`.
"""
if self.transport is not None:
self.flush(timeout=timeout, callback=callback)
self.session_flusher.kill()
if self.log_batcher is not None:
self.log_batcher.kill()
if self.metrics_batcher is not None:
self.metrics_batcher.kill()
if self.monitor:
self.monitor.kill()
self.transport.kill()
self.transport = None
def flush(
self,
timeout=None, # type: Optional[float]
callback=None, # type: Optional[Callable[[int, float], None]]
):
# type: (...) -> None
"""
Wait for the current events to be sent.
:param timeout: Wait for at most `timeout` seconds. If no `timeout` is provided, the `shutdown_timeout` option value is used.
:param callback: Is invoked with the number of pending events and the configured timeout.
"""
if self.transport is not None:
if timeout is None:
timeout = self.options["shutdown_timeout"]
self.session_flusher.flush()
if self.log_batcher is not None:
self.log_batcher.flush()
if self.metrics_batcher is not None:
self.metrics_batcher.flush()
self.transport.flush(timeout=timeout, callback=callback)
def __enter__(self):
# type: () -> _Client
return self
def __exit__(self, exc_type, exc_value, tb):
# type: (Any, Any, Any) -> None
self.close()
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# Make mypy, PyCharm and other static analyzers think `get_options` is a
# type to have nicer autocompletion for params.
#
# Use `ClientConstructor` to define the argument types of `init` and
# `Dict[str, Any]` to tell static analyzers about the return type.
class get_options(ClientConstructor, Dict[str, Any]): # noqa: N801
pass
class Client(ClientConstructor, _Client):
pass
else:
# Alias `get_options` for actual usage. Go through the lambda indirection
# to throw PyCharm off of the weakly typed signature (it would otherwise
# discover both the weakly typed signature of `_init` and our faked `init`
# type).
get_options = (lambda: _get_options)()
Client = (lambda: _Client)()
| _Client |
python | pydantic__pydantic | tests/test_edge_cases.py | {
"start": 89215,
"end": 95349
} | class ____(BaseModel):
type Int = int
a: Int
""",
globs,
)
A = globs['A']
assert A(a=1).a == 1
def test_method_descriptors_default() -> None:
class SomeModel(BaseModel):
@staticmethod
def default_int_factory() -> int: ...
int_factory: Callable[[], int] = Field(default=default_int_factory)
assert SomeModel.model_fields['int_factory'].default is SomeModel.default_int_factory
def test_setattr_handler_memo_does_not_inherit() -> None:
class Model1(BaseModel):
a: int
class Model2(Model1):
a: int
m1 = Model1(a=1)
m2 = Model2(a=10)
assert not Model1.__pydantic_setattr_handlers__
assert not Model2.__pydantic_setattr_handlers__
m2.a = 11
assert not Model1.__pydantic_setattr_handlers__
assert 'a' in Model2.__pydantic_setattr_handlers__
handler2 = Model2.__pydantic_setattr_handlers__['a']
m1.a = 2
assert 'a' in Model1.__pydantic_setattr_handlers__
assert Model1.__pydantic_setattr_handlers__['a'] is handler2
assert Model2.__pydantic_setattr_handlers__['a'] is handler2
assert m1.a == 2 and m2.a == 11
def test_setattr_handler_does_not_memoize_unknown_field() -> None:
class Model(BaseModel):
a: int
m = Model(a=1)
with pytest.raises(ValueError, match='object has no field "unknown"'):
m.unknown = 'x'
assert not Model.__pydantic_setattr_handlers__
m.a = 2
assert 'a' in Model.__pydantic_setattr_handlers__
def test_setattr_handler_does_not_memoize_unknown_private_field() -> None:
class Model(BaseModel):
a: int
_p: str
m = Model(a=1)
assert not Model.__pydantic_setattr_handlers__
m.a = 2
assert len(Model.__pydantic_setattr_handlers__) == 1
m._unknown = 'x'
assert len(Model.__pydantic_setattr_handlers__) == 1
m._p = 'y'
assert len(Model.__pydantic_setattr_handlers__) == 2
def test_setattr_handler_does_not_memoize_on_validate_assignment_field_failure() -> None:
class Model(BaseModel, validate_assignment=True):
a: int
m = Model(a=1)
with pytest.raises(ValidationError):
m.unknown = 'x'
with pytest.raises(ValidationError):
m.a = 'y'
assert not Model.__pydantic_setattr_handlers__
m.a = 2
assert 'a' in Model.__pydantic_setattr_handlers__
# The following 3 tests define a `__get_pydantic_core_schema__()` method on Pydantic models.
# This isn't explicitly supported and can lead to unexpected side effects, but are here
# to prevent potential regressions:
def test_get_pydantic_core_schema_on_referenceable_type() -> None:
# This ensures that even if you define the method, it won't actually
# be called twice and the cached definition will be used instead.
# Note that what is important here is that the `Test` definition does
# not raise a `LookupError` when calling the `handler` (see
# `CallbackGetCoreSchemaHandler.__call__` calling `resolve_ref_schema`).
counter = 0
class Model(BaseModel):
@classmethod
def __get_pydantic_core_schema__(
cls, source: type[BaseModel], handler: GetCoreSchemaHandler, /
) -> core_schema.CoreSchema:
rv = handler(source)
nonlocal counter
counter += 1
return rv
counter = 0 # reset the counter to only account for the `Test` definition
class Test(Model):
t: 'Test'
assert counter == 1
def test_repeated_custom_type() -> None:
class Numeric(BaseModel):
value: float
@classmethod
def __get_pydantic_core_schema__(cls, source_type: Any, handler: GetCoreSchemaHandler) -> CoreSchema:
return core_schema.no_info_before_validator_function(cls._validate, handler(source_type))
@classmethod
def _validate(cls, v: Any) -> Union[dict[str, Any], Self]:
if isinstance(v, (str, float, int)):
return cls(value=v)
if isinstance(v, Numeric):
return v
if isinstance(v, dict):
return v
raise ValueError(f'Invalid value for {cls}: {v}')
def is_positive(value: Numeric):
assert value.value > 0.0, 'Must be positive'
class OuterModel(BaseModel):
x: Numeric
y: Numeric
z: Annotated[Numeric, AfterValidator(is_positive)]
assert OuterModel(x=2, y=-1, z=1)
with pytest.raises(ValidationError):
OuterModel(x=2, y=-1, z=-1)
def test_get_pydantic_core_schema_noop() -> None:
"""https://github.com/pydantic/pydantic/issues/12096"""
class Metadata(BaseModel):
foo: int = 100
@classmethod
def __get_pydantic_core_schema__(cls, source_type, handler: GetCoreSchemaHandler) -> CoreSchema:
return handler(source_type)
class Model1(BaseModel):
f: Annotated[str, Metadata()]
assert isinstance(Model1.model_fields['f'].metadata[0], Metadata)
assert Model1(f='test').f == 'test'
class Model2(BaseModel):
f1: Annotated[str, Metadata()]
f2: Annotated[str, Metadata()] = 'f2'
m2 = Model2(f1='f1')
assert m2.f1 == 'f1'
assert m2.f2 == 'f2'
def test_validator_and_serializer_not_reused_during_rebuild() -> None:
# Make sure validators and serializers are deleted before model rebuild,
# so that they don't end up being reused in pydantic-core (since we look
# for `cls.__pydantic_validator/serializer__`).
# This test makes the assertion on the serialization behavior.
class Model(BaseModel):
a: int
Model.model_fields['a'].exclude = True
Model.model_rebuild(force=True)
m = Model(a=1)
assert m.model_dump() == {}
@pytest.mark.filterwarnings('ignore:.*`__get_validators__`.*:DeprecationWarning')
def test_get_schema_on_classes_with_both_v1_and_v2_apis() -> None:
class Model(BaseModel):
a: int
@model_validator(mode='after')
def my_model_validator(self):
return self
@classmethod
def __get_validators__(cls):
raise AssertionError('This should not be called')
| A |
python | catalyst-team__catalyst | catalyst/data/sampler.py | {
"start": 13723,
"end": 16898
} | class ____(Sampler):
"""
Sampler iterates mini epochs from the dataset used by ``mini_epoch_len``.
Args:
data_len: Size of the dataset
mini_epoch_len: Num samples from the dataset used in one
mini epoch.
drop_last: If ``True``, sampler will drop the last batches
if its size would be less than ``batches_per_epoch``
shuffle: one of ``"always"``, ``"real_epoch"``, or `None``.
The sampler will shuffle indices
> "per_mini_epoch" - every mini epoch (every ``__iter__`` call)
> "per_epoch" -- every real epoch
> None -- don't shuffle
Example:
>>> MiniEpochSampler(len(dataset), mini_epoch_len=100)
>>> MiniEpochSampler(len(dataset), mini_epoch_len=100, drop_last=True)
>>> MiniEpochSampler(len(dataset), mini_epoch_len=100,
>>> shuffle="per_epoch")
"""
def __init__(
self,
data_len: int,
mini_epoch_len: int,
drop_last: bool = False,
shuffle: str = None,
):
"""Sampler initialisation."""
super().__init__(None)
self.data_len = int(data_len)
self.mini_epoch_len = int(mini_epoch_len)
self.steps = int(data_len / self.mini_epoch_len)
self.state_i = 0
has_reminder = data_len - self.steps * mini_epoch_len > 0
if self.steps == 0:
self.divider = 1
elif has_reminder and not drop_last:
self.divider = self.steps + 1
else:
self.divider = self.steps
self._indices = np.arange(self.data_len)
self.indices = self._indices
self.end_pointer = max(self.data_len, self.mini_epoch_len)
if not (shuffle is None or shuffle in ["per_mini_epoch", "per_epoch"]):
raise ValueError(
"Shuffle must be one of ['per_mini_epoch', 'per_epoch']. "
+ f"Got {shuffle}"
)
self.shuffle_type = shuffle
def shuffle(self) -> None:
"""Shuffle sampler indices."""
if self.shuffle_type == "per_mini_epoch" or (
self.shuffle_type == "per_epoch" and self.state_i == 0
):
if self.data_len >= self.mini_epoch_len:
self.indices = self._indices
np.random.shuffle(self.indices)
else:
self.indices = np.random.choice(
self._indices, self.mini_epoch_len, replace=True
)
def __iter__(self) -> Iterator[int]:
"""Iterate over sampler.
Returns:
python iterator
"""
self.state_i = self.state_i % self.divider
self.shuffle()
start = self.state_i * self.mini_epoch_len
stop = (
self.end_pointer
if (self.state_i == self.steps)
else (self.state_i + 1) * self.mini_epoch_len
)
indices = self.indices[start:stop].tolist()
self.state_i += 1
return iter(indices)
def __len__(self) -> int:
"""
Returns:
int: length of the mini-epoch
"""
return self.mini_epoch_len
| MiniEpochSampler |
python | apache__airflow | providers/pagerduty/tests/unit/pagerduty/hooks/test_pagerduty.py | {
"start": 1531,
"end": 2888
} | class ____:
def test_get_token_from_password(self, pagerduty_connections):
hook = PagerdutyHook(pagerduty_conn_id=DEFAULT_CONN_ID)
assert hook.token == "token", "token initialised."
assert hook.routing_key == "integration_key"
def test_without_routing_key_extra(self):
hook = PagerdutyHook(pagerduty_conn_id="pagerduty_no_extra")
assert hook.token == "pagerduty_token_without_extra", "token initialised."
assert hook.routing_key is None, "default routing key skipped."
def test_token_parameter_override(self):
hook = PagerdutyHook(token="pagerduty_param_token", pagerduty_conn_id=DEFAULT_CONN_ID)
assert hook.token == "pagerduty_param_token", "token initialised."
@patch.object(pagerduty.RestApiV2Client, "rget")
def test_get_service(self, rget):
hook = PagerdutyHook(pagerduty_conn_id=DEFAULT_CONN_ID)
mock_response_body = {
"id": "PZYX321",
"name": "Apache Airflow",
"status": "active",
"type": "service",
"summary": "Apache Airflow",
"self": "https://api.pagerduty.com/services/PZYX321",
}
rget.return_value = mock_response_body
client = hook.client()
resp = client.rget("/services/PZYX321")
assert resp == mock_response_body
| TestPagerdutyHook |
python | tensorflow__tensorflow | tensorflow/python/distribute/multi_worker_test_base.py | {
"start": 21711,
"end": 25619
} | class ____(test.TestCase):
"""Testing infra for independent workers."""
def _make_mock_run_std_server(self):
def _mock_run_std_server(*args, **kwargs):
"""Returns the std server once all threads have started it."""
with skip_if_grpc_server_cant_be_started(self):
ret = original_run_std_server(*args, **kwargs)
# Wait for all std servers to be brought up in order to reduce the chance
# of remote sessions taking local ports that have been assigned to std
# servers. Only call this barrier the first time this function is run for
# each thread.
if not getattr(self._thread_local, 'server_started', False):
self._barrier.wait()
self._thread_local.server_started = True
return ret
return _mock_run_std_server
def setUp(self):
self._mock_os_env = MockOsEnv()
self._mock_context = test.mock.patch.object(os, 'environ',
self._mock_os_env)
self._coord = coordinator.Coordinator()
super(IndependentWorkerTestBase, self).setUp()
self._mock_context.__enter__()
# threading local object to be shared by all threads
self._thread_local = threading.local()
def tearDown(self):
self._mock_context.__exit__(None, None, None)
super(IndependentWorkerTestBase, self).tearDown()
def _task_thread(self, task_fn, tf_config, executing_eagerly, *args,
**kwargs):
with self._coord.stop_on_exception():
os.environ['TF_CONFIG'] = json.dumps(tf_config)
# Force the new thread simulating a worker to run in the same context
# mode as the parent thread does.
if executing_eagerly:
with context.eager_mode():
task_fn(*args, **kwargs)
else:
with ops.Graph().as_default(), context.graph_mode():
task_fn(*args, **kwargs)
def _run_task_in_thread(self, task_fn, cluster_spec, task_type, task_id,
*args, **kwargs):
"""Run tasks in a thread.
If `tf_config` is provided, use it for the new thread; if not, construct one
from `cluster_spec`, `task_type`, and `task_id`, and provide it to the new
thread to be set as `TF_CONFIG` environment.
Args:
task_fn: The function to run in the new thread.
cluster_spec: The cluster spec.
task_type: The task type.
task_id: The task id.
*args: Additional positional arguments to provide to the thread's task_fn.
**kwargs: Additional keyword arguments to provide to the thread's task_fn.
If `tf_config` is provided, that dict will be used for the TF_CONFIG for
the new thread.
Returns:
The thread that has started.
"""
tf_config = kwargs.pop('tf_config', None)
if tf_config is None:
if task_type:
tf_config = {
'cluster': cluster_spec,
'task': {
'type': task_type,
'index': task_id
}
}
else:
tf_config = {
'cluster': cluster_spec,
}
t = threading.Thread(
target=self._task_thread,
args=(task_fn, tf_config, context.executing_eagerly()) + args,
kwargs=kwargs)
t.start()
return t
def run_multiple_tasks_in_threads(self, task_fn, cluster_spec, *args,
**kwargs):
# The task_fn should create std_server by itself.
threads = {}
for task_type in cluster_spec.keys():
threads[task_type] = []
for task_id in range(len(cluster_spec[task_type])):
t = self._run_task_in_thread(task_fn, cluster_spec, task_type, task_id,
*args, **kwargs)
threads[task_type].append(t)
return threads
def join_independent_workers(self, worker_threads):
with skip_if_grpc_server_cant_be_started(self):
self._coord.join(worker_threads)
| IndependentWorkerTestBase |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-genesys/source_genesys/source.py | {
"start": 4609,
"end": 4896
} | class ____(GenesysStream):
"""
API Docs: https://developer.genesys.cloud/telephony/telephony-apis
"""
primary_key = "id"
cursor_field = "dateModified"
def path(self, **kwargs) -> str:
return "telephony/providers/edges/phones"
| TelephonyProvidersEdgesPhones |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 8360,
"end": 8569
} | class ____(BaseModel):
"""
Dag Serializer for updatable bodies.
"""
model_config = ConfigDict(
extra="forbid",
)
is_paused: Annotated[bool, Field(title="Is Paused")]
| DAGPatchBody |
python | bokeh__bokeh | src/bokeh/models/tiles.py | {
"start": 4244,
"end": 4807
} | class ____(MercatorTileSource):
''' Contains tile config info and provides urls for tiles based on a
templated url e.g. ``http://your.tms.server.host/{Z}/{X}/{Y}.png``. The
defining feature of TMS is the tile-origin in located at the bottom-left.
``TMSTileSource`` can also be helpful in implementing tile renderers for
custom tile sets, including non-spatial datasets.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| TMSTileSource |
python | mwaskom__seaborn | tests/_core/test_plot.py | {
"start": 42546,
"end": 44127
} | class ____:
def test_scale_setup(self):
x = y = color = ["a", "b"]
bad_palette = "not_a_palette"
p = Plot(x, y, color=color).add(MockMark()).scale(color=bad_palette)
msg = "Scale setup failed for the `color` variable."
with pytest.raises(PlotSpecError, match=msg) as err:
p.plot()
assert isinstance(err.value.__cause__, ValueError)
assert bad_palette in str(err.value.__cause__)
def test_coordinate_scaling(self):
x = ["a", "b"]
y = [1, 2]
p = Plot(x, y).add(MockMark()).scale(x=Temporal())
msg = "Scaling operation failed for the `x` variable."
with pytest.raises(PlotSpecError, match=msg) as err:
p.plot()
# Don't test the cause contents b/c matplotlib owns them here.
assert hasattr(err.value, "__cause__")
def test_semantic_scaling(self):
class ErrorRaising(Continuous):
def _setup(self, data, prop, axis=None):
def f(x):
raise ValueError("This is a test")
new = super()._setup(data, prop, axis)
new._pipeline = [f]
return new
x = y = color = [1, 2]
p = Plot(x, y, color=color).add(Dot()).scale(color=ErrorRaising())
msg = "Scaling operation failed for the `color` variable."
with pytest.raises(PlotSpecError, match=msg) as err:
p.plot()
assert isinstance(err.value.__cause__, ValueError)
assert str(err.value.__cause__) == "This is a test"
| TestExceptions |
python | kubernetes-client__python | kubernetes/base/leaderelection/leaderelectionrecord.py | {
"start": 589,
"end": 911
} | class ____:
# Annotation used in the lock object
def __init__(self, holder_identity, lease_duration, acquire_time, renew_time):
self.holder_identity = holder_identity
self.lease_duration = lease_duration
self.acquire_time = acquire_time
self.renew_time = renew_time
| LeaderElectionRecord |
python | Pylons__pyramid | tests/test_config/test_predicates.py | {
"start": 50,
"end": 19833
} | class ____(unittest.TestCase):
def _makeOne(self):
from pyramid import predicates
from pyramid.config.predicates import PredicateList
inst = PredicateList()
for name, factory in (
('xhr', predicates.XHRPredicate),
('request_method', predicates.RequestMethodPredicate),
('path_info', predicates.PathInfoPredicate),
('request_param', predicates.RequestParamPredicate),
('header', predicates.HeaderPredicate),
('accept', predicates.AcceptPredicate),
('containment', predicates.ContainmentPredicate),
('request_type', predicates.RequestTypePredicate),
('match_param', predicates.MatchParamPredicate),
('is_authenticated', predicates.IsAuthenticatedPredicate),
('custom', predicates.CustomPredicate),
('traverse', predicates.TraversePredicate),
):
inst.add(name, factory)
return inst
def _callFUT(self, **kw):
inst = self._makeOne()
config = DummyConfigurator()
return inst.make(config, **kw)
def test_ordering_xhr_and_request_method_trump_only_containment(self):
order1, _, _ = self._callFUT(xhr=True, request_method='GET')
order2, _, _ = self._callFUT(containment=True)
self.assertTrue(order1 < order2)
def test_ordering_number_of_predicates(self):
from pyramid.config.predicates import predvalseq
order0, _, _ = self._callFUT(
xhr='xhr',
request_method='request_method',
path_info='path_info',
request_param='param',
match_param='foo=bar',
header='header',
accept='accept',
is_authenticated=True,
containment='containment',
request_type='request_type',
custom=predvalseq([DummyCustomPredicate()]),
)
order1, _, _ = self._callFUT(
xhr='xhr',
request_method='request_method',
path_info='path_info',
request_param='param',
match_param='foo=bar',
header='header',
accept='accept',
containment='containment',
request_type='request_type',
custom=predvalseq([DummyCustomPredicate()]),
)
order2, _, _ = self._callFUT(
xhr='xhr',
request_method='request_method',
path_info='path_info',
request_param='param',
match_param='foo=bar',
header='header',
accept='accept',
containment='containment',
request_type='request_type',
custom=predvalseq([DummyCustomPredicate()]),
)
order3, _, _ = self._callFUT(
xhr='xhr',
request_method='request_method',
path_info='path_info',
request_param='param',
match_param='foo=bar',
header='header',
accept='accept',
containment='containment',
request_type='request_type',
)
order4, _, _ = self._callFUT(
xhr='xhr',
request_method='request_method',
path_info='path_info',
request_param='param',
match_param='foo=bar',
header='header',
accept='accept',
containment='containment',
)
order5, _, _ = self._callFUT(
xhr='xhr',
request_method='request_method',
path_info='path_info',
request_param='param',
match_param='foo=bar',
header='header',
accept='accept',
)
order6, _, _ = self._callFUT(
xhr='xhr',
request_method='request_method',
path_info='path_info',
request_param='param',
match_param='foo=bar',
header='header',
)
order7, _, _ = self._callFUT(
xhr='xhr',
request_method='request_method',
path_info='path_info',
request_param='param',
match_param='foo=bar',
)
order8, _, _ = self._callFUT(
xhr='xhr',
request_method='request_method',
path_info='path_info',
request_param='param',
)
order9, _, _ = self._callFUT(
xhr='xhr', request_method='request_method', path_info='path_info'
)
order10, _, _ = self._callFUT(
xhr='xhr', request_method='request_method'
)
order11, _, _ = self._callFUT(xhr='xhr')
order12, _, _ = self._callFUT()
self.assertTrue(order1 > order0)
self.assertEqual(order1, order2)
self.assertTrue(order3 > order2)
self.assertTrue(order4 > order3)
self.assertTrue(order5 > order4)
self.assertTrue(order6 > order5)
self.assertTrue(order7 > order6)
self.assertTrue(order8 > order7)
self.assertTrue(order9 > order8)
self.assertTrue(order10 > order9)
self.assertTrue(order11 > order10)
self.assertTrue(order12 > order11)
def test_ordering_importance_of_predicates(self):
from pyramid.config.predicates import predvalseq
order1, _, _ = self._callFUT(xhr='xhr')
order2, _, _ = self._callFUT(request_method='request_method')
order3, _, _ = self._callFUT(path_info='path_info')
order4, _, _ = self._callFUT(request_param='param')
order5, _, _ = self._callFUT(header='header')
order6, _, _ = self._callFUT(accept='accept')
order7, _, _ = self._callFUT(containment='containment')
order8, _, _ = self._callFUT(request_type='request_type')
order9, _, _ = self._callFUT(match_param='foo=bar')
order10, _, _ = self._callFUT(is_authenticated=True)
order11, _, _ = self._callFUT(
custom=predvalseq([DummyCustomPredicate()])
)
self.assertTrue(order1 > order2)
self.assertTrue(order2 > order3)
self.assertTrue(order3 > order4)
self.assertTrue(order4 > order5)
self.assertTrue(order5 > order6)
self.assertTrue(order6 > order7)
self.assertTrue(order7 > order8)
self.assertTrue(order8 > order9)
self.assertTrue(order9 > order10)
self.assertTrue(order10 > order11)
def test_ordering_importance_and_number(self):
from pyramid.config.predicates import predvalseq
order1, _, _ = self._callFUT(
xhr='xhr', request_method='request_method'
)
order2, _, _ = self._callFUT(
custom=predvalseq([DummyCustomPredicate()])
)
self.assertTrue(order1 < order2)
order1, _, _ = self._callFUT(
xhr='xhr', request_method='request_method'
)
order2, _, _ = self._callFUT(
request_method='request_method',
custom=predvalseq([DummyCustomPredicate()]),
)
self.assertTrue(order1 > order2)
order1, _, _ = self._callFUT(
xhr='xhr', request_method='request_method', path_info='path_info'
)
order2, _, _ = self._callFUT(
request_method='request_method',
custom=predvalseq([DummyCustomPredicate()]),
)
self.assertTrue(order1 < order2)
order1, _, _ = self._callFUT(
xhr='xhr', request_method='request_method', path_info='path_info'
)
order2, _, _ = self._callFUT(
xhr='xhr',
request_method='request_method',
custom=predvalseq([DummyCustomPredicate()]),
)
self.assertTrue(order1 > order2)
def test_different_custom_predicates_with_same_hash(self):
from pyramid.config.predicates import predvalseq
class PredicateWithHash:
def __hash__(self):
return 1
a = PredicateWithHash()
b = PredicateWithHash()
_, _, a_phash = self._callFUT(custom=predvalseq([a]))
_, _, b_phash = self._callFUT(custom=predvalseq([b]))
self.assertEqual(a_phash, b_phash)
def test_traverse_has_remainder_already(self):
order, predicates, phash = self._callFUT(traverse='/1/:a/:b')
self.assertEqual(len(predicates), 1)
pred = predicates[0]
info = {'traverse': 'abc'}
request = DummyRequest()
result = pred(info, request)
self.assertEqual(result, True)
self.assertEqual(info, {'traverse': 'abc'})
def test_traverse_matches(self):
order, predicates, phash = self._callFUT(traverse='/1/:a/:b')
self.assertEqual(len(predicates), 1)
pred = predicates[0]
info = {'match': {'a': 'a', 'b': 'b'}}
request = DummyRequest()
result = pred(info, request)
self.assertEqual(result, True)
self.assertEqual(
info, {'match': {'a': 'a', 'b': 'b', 'traverse': ('1', 'a', 'b')}}
)
def test_traverse_matches_with_highorder_chars(self):
order, predicates, phash = self._callFUT(
traverse=text_(b'/La Pe\xc3\xb1a/{x}', 'utf-8')
)
self.assertEqual(len(predicates), 1)
pred = predicates[0]
info = {'match': {'x': text_(b'Qu\xc3\xa9bec', 'utf-8')}}
request = DummyRequest()
result = pred(info, request)
self.assertEqual(result, True)
self.assertEqual(
info['match']['traverse'],
(
text_(b'La Pe\xc3\xb1a', 'utf-8'),
text_(b'Qu\xc3\xa9bec', 'utf-8'),
),
)
def test_custom_predicates_can_affect_traversal(self):
from pyramid.config.predicates import predvalseq
def custom(info, request):
m = info['match']
m['dummy'] = 'foo'
return True
_, predicates, _ = self._callFUT(
custom=predvalseq([custom]), traverse='/1/:dummy/:a'
)
self.assertEqual(len(predicates), 2)
info = {'match': {'a': 'a'}}
request = DummyRequest()
self.assertTrue(all([p(info, request) for p in predicates]))
self.assertEqual(
info,
{
'match': {
'a': 'a',
'dummy': 'foo',
'traverse': ('1', 'foo', 'a'),
}
},
)
def test_predicate_text_is_correct(self):
from pyramid.config.predicates import predvalseq
_, predicates, _ = self._callFUT(
xhr='xhr',
request_method='request_method',
path_info='path_info',
request_param='param',
header='header',
accept='accept',
containment='containment',
request_type='request_type',
custom=predvalseq(
[
DummyCustomPredicate(),
DummyCustomPredicate.classmethod_predicate,
DummyCustomPredicate.classmethod_predicate_no_text,
]
),
match_param='foo=bar',
is_authenticated=False,
)
self.assertEqual(predicates[0].text(), 'xhr = True')
self.assertEqual(
predicates[1].text(), "request_method = request_method"
)
self.assertEqual(predicates[2].text(), 'path_info = path_info')
self.assertEqual(predicates[3].text(), 'request_param param')
self.assertEqual(predicates[4].text(), 'header header')
self.assertEqual(predicates[5].text(), 'accept = accept')
self.assertEqual(predicates[6].text(), 'containment = containment')
self.assertEqual(predicates[7].text(), 'request_type = request_type')
self.assertEqual(predicates[8].text(), "match_param foo=bar")
self.assertEqual(predicates[9].text(), "is_authenticated = False")
self.assertEqual(predicates[10].text(), 'custom predicate')
self.assertEqual(predicates[11].text(), 'classmethod predicate')
self.assertTrue(predicates[12].text().startswith('custom predicate'))
def test_predicate_text_is_correct_when_multiple(self):
_, predicates, _ = self._callFUT(
request_method=('one', 'two'),
request_param=('par2=on', 'par1'),
header=('header2', 'header1:val.*'),
accept=('accept1', 'accept2'),
match_param=('foo=bar', 'baz=bim'),
)
self.assertEqual(predicates[0].text(), "request_method = one,two")
self.assertEqual(predicates[1].text(), 'request_param par1,par2=on')
self.assertEqual(predicates[2].text(), 'header header1=val.*, header2')
self.assertEqual(predicates[3].text(), 'accept = accept1, accept2')
self.assertEqual(predicates[4].text(), "match_param baz=bim,foo=bar")
def test_match_param_from_string(self):
_, predicates, _ = self._callFUT(match_param='foo=bar')
request = DummyRequest()
request.matchdict = {'foo': 'bar', 'baz': 'bum'}
self.assertTrue(predicates[0](Dummy(), request))
def test_match_param_from_string_fails(self):
_, predicates, _ = self._callFUT(match_param='foo=bar')
request = DummyRequest()
request.matchdict = {'foo': 'bum', 'baz': 'bum'}
self.assertFalse(predicates[0](Dummy(), request))
def test_match_param_from_dict(self):
_, predicates, _ = self._callFUT(match_param=('foo=bar', 'baz=bum'))
request = DummyRequest()
request.matchdict = {'foo': 'bar', 'baz': 'bum'}
self.assertTrue(predicates[0](Dummy(), request))
def test_match_param_from_dict_fails(self):
_, predicates, _ = self._callFUT(match_param=('foo=bar', 'baz=bum'))
request = DummyRequest()
request.matchdict = {'foo': 'bar', 'baz': 'foo'}
self.assertFalse(predicates[0](Dummy(), request))
def test_request_method_sequence(self):
_, predicates, _ = self._callFUT(request_method=('GET', 'HEAD'))
request = DummyRequest()
request.method = 'HEAD'
self.assertTrue(predicates[0](Dummy(), request))
request.method = 'GET'
self.assertTrue(predicates[0](Dummy(), request))
request.method = 'POST'
self.assertFalse(predicates[0](Dummy(), request))
def test_request_method_ordering_hashes_same(self):
hash1, _, __ = self._callFUT(request_method=('GET', 'HEAD'))
hash2, _, __ = self._callFUT(request_method=('HEAD', 'GET'))
self.assertEqual(hash1, hash2)
hash1, _, __ = self._callFUT(request_method=('GET',))
hash2, _, __ = self._callFUT(request_method='GET')
self.assertEqual(hash1, hash2)
def test_header_simple(self):
_, predicates, _ = self._callFUT(header='foo')
request = DummyRequest()
request.headers = {'foo': 'bars', 'baz': 'foo'}
self.assertTrue(predicates[0](Dummy(), request))
def test_header_simple_fails(self):
_, predicates, _ = self._callFUT(header='content-length')
request = DummyRequest()
request.headers = {'foo': 'bars', 'baz': 'foo'}
self.assertFalse(predicates[0](Dummy(), request))
def test_header_with_value(self):
_, predicates, _ = self._callFUT(header='foo:bar')
request = DummyRequest()
request.headers = {'foo': 'bars', 'baz': 'foo'}
self.assertTrue(predicates[0](Dummy(), request))
def test_header_with_value_fails(self):
_, predicates, _ = self._callFUT(header='foo:bar')
request = DummyRequest()
request.headers = {'foo': 'nobar', 'baz': 'foo'}
self.assertFalse(predicates[0](Dummy(), request))
def test_header_with_value_fails_case(self):
_, predicates, _ = self._callFUT(header='foo:bar')
request = DummyRequest()
request.headers = {'foo': 'BAR'}
self.assertFalse(predicates[0](Dummy(), request))
def test_header_multiple(self):
_, predicates, _ = self._callFUT(header=('foo', 'content-length'))
request = DummyRequest()
request.headers = {'foo': 'bars', 'content-length': '42'}
self.assertTrue(predicates[0](Dummy(), request))
def test_header_multiple_fails(self):
_, predicates, _ = self._callFUT(header=('foo', 'content-encoding'))
request = DummyRequest()
request.headers = {'foo': 'bars', 'content-length': '42'}
self.assertFalse(predicates[0](Dummy(), request))
def test_header_multiple_with_values(self):
_, predicates, _ = self._callFUT(header=('foo:bar', 'spam:egg'))
request = DummyRequest()
request.headers = {'foo': 'bars', 'spam': 'eggs'}
self.assertTrue(predicates[0](Dummy(), request))
def test_header_multiple_with_values_fails(self):
_, predicates, _ = self._callFUT(header=('foo:bar', 'spam:egg$'))
request = DummyRequest()
request.headers = {'foo': 'bars', 'spam': 'eggs'}
self.assertFalse(predicates[0](Dummy(), request))
def test_header_multiple_mixed(self):
_, predicates, _ = self._callFUT(header=('foo:bar', 'spam'))
request = DummyRequest()
request.headers = {'foo': 'bars', 'spam': 'ham'}
self.assertTrue(predicates[0](Dummy(), request))
def test_header_multiple_mixed_fails(self):
_, predicates, _ = self._callFUT(header=('foo:bar', 'spam'))
request = DummyRequest()
request.headers = {'foo': 'nobar', 'spamme': 'ham'}
self.assertFalse(predicates[0](Dummy(), request))
def test_is_authenticated_true_matches(self):
_, predicates, _ = self._callFUT(is_authenticated=True)
request = DummyRequest()
request.is_authenticated = True
self.assertTrue(predicates[0](Dummy(), request))
def test_is_authenticated_true_fails(self):
_, predicates, _ = self._callFUT(is_authenticated=True)
request = DummyRequest()
request.is_authenticated = False
self.assertFalse(predicates[0](Dummy(), request))
def test_is_authenticated_false_matches(self):
_, predicates, _ = self._callFUT(is_authenticated=False)
request = DummyRequest()
request.is_authenticated = False
self.assertTrue(predicates[0](Dummy(), request))
def test_is_authenticated_false_fails(self):
_, predicates, _ = self._callFUT(is_authenticated=False)
request = DummyRequest()
request.is_authenticated = True
self.assertFalse(predicates[0](Dummy(), request))
def test_unknown_predicate(self):
from pyramid.exceptions import ConfigurationError
self.assertRaises(ConfigurationError, self._callFUT, unknown=1)
def test_predicate_close_matches(self):
from pyramid.exceptions import ConfigurationError
with self.assertRaises(ConfigurationError) as context:
self._callFUT(method='GET')
expected_msg = (
"Unknown predicate values: {'method': 'GET'} "
"(did you mean request_method)"
)
self.assertEqual(context.exception.args[0], expected_msg)
def test_notted(self):
from pyramid.config import not_
from pyramid.testing import DummyRequest
request = DummyRequest()
_, predicates, _ = self._callFUT(
xhr='xhr', request_method=not_('POST'), header=not_('header')
)
self.assertEqual(predicates[0].text(), 'xhr = True')
self.assertEqual(predicates[1].text(), "!request_method = POST")
self.assertEqual(predicates[2].text(), '!header header')
self.assertEqual(predicates[1](None, request), True)
self.assertEqual(predicates[2](None, request), True)
| TestPredicateList |
python | numba__numba | numba/tests/test_caching.py | {
"start": 3159,
"end": 5880
} | class ____(SerialMixin, TestCase):
def run_test(self, func):
func()
res = run_in_new_process_caching(func)
self.assertEqual(res['exitcode'], 0)
def test_constant_unicode_cache(self):
self.run_test(check_constant_unicode_cache)
def test_dict_cache(self):
self.run_test(check_dict_cache)
def test_generator_cache(self):
self.run_test(check_generator_cache)
def test_omitted(self):
# Test in a new directory
cache_dir = temp_directory(self.__class__.__name__)
ctx = multiprocessing.get_context()
result_queue = ctx.Queue()
proc = ctx.Process(
target=omitted_child_test_wrapper,
args=(result_queue, cache_dir, False),
)
proc.start()
proc.join()
success, output = result_queue.get()
# Ensure the child process is completed before checking its output
if not success:
self.fail(output)
self.assertEqual(
output,
1000,
"Omitted function returned an incorrect output"
)
proc = ctx.Process(
target=omitted_child_test_wrapper,
args=(result_queue, cache_dir, True)
)
proc.start()
proc.join()
success, output = result_queue.get()
# Ensure the child process is completed before checking its output
if not success:
self.fail(output)
self.assertEqual(
output,
1000,
"Omitted function returned an incorrect output"
)
def omitted_child_test_wrapper(result_queue, cache_dir, second_call):
with override_config("CACHE_DIR", cache_dir):
@njit(cache=True)
def test(num=1000):
return num
try:
output = test()
# If we have a second call, we should have a cache hit.
# Otherwise, we expect a cache miss.
if second_call:
assert test._cache_hits[test.signatures[0]] == 1, \
"Cache did not hit as expected"
assert test._cache_misses[test.signatures[0]] == 0, \
"Cache has an unexpected miss"
else:
assert test._cache_misses[test.signatures[0]] == 1, \
"Cache did not miss as expected"
assert test._cache_hits[test.signatures[0]] == 0, \
"Cache has an unexpected hit"
success = True
# Catch anything raised so it can be propagated
except: # noqa: E722
output = traceback.format_exc()
success = False
result_queue.put((success, output))
| TestCaching |
python | getsentry__sentry | src/sentry/web/frontend/error_500.py | {
"start": 207,
"end": 377
} | class ____(View):
def dispatch(self, request: HttpRequest) -> HttpResponse:
return render_to_response("sentry/500.html", status=500, request=request)
| Error500View |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/proto/descriptor_source_test_base.py | {
"start": 1196,
"end": 6861
} | class ____(test.TestCase):
"""Base class for testing descriptor sources."""
def __init__(self, decode_module, encode_module, methodName='runTest'): # pylint: disable=invalid-name
"""DescriptorSourceTestBase initializer.
Args:
decode_module: a module containing the `decode_proto_op` method
encode_module: a module containing the `encode_proto_op` method
methodName: the name of the test method (same as for test.TestCase)
"""
super(DescriptorSourceTestBase, self).__init__(methodName)
self._decode_module = decode_module
self._encode_module = encode_module
# NOTE: We generate the descriptor programmatically instead of via a compiler
# because of differences between different versions of the compiler.
#
# The generated descriptor should capture the subset of `test_example.proto`
# used in `test_base.simple_test_case()`.
def _createDescriptorProto(self):
proto = FileDescriptorSet()
file_proto = proto.file.add(
name='types.proto', package='tensorflow', syntax='proto3')
enum_proto = file_proto.enum_type.add(name='DataType')
enum_proto.value.add(name='DT_DOUBLE', number=0)
enum_proto.value.add(name='DT_BOOL', number=1)
file_proto = proto.file.add(
name='test_example.proto',
package='tensorflow.contrib.proto',
dependency=['types.proto'])
message_proto = file_proto.message_type.add(name='TestCase')
message_proto.field.add(
name='values',
number=1,
type=FieldDescriptorProto.TYPE_MESSAGE,
type_name='.tensorflow.contrib.proto.TestValue',
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto.field.add(
name='shapes',
number=2,
type=FieldDescriptorProto.TYPE_INT32,
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto.field.add(
name='sizes',
number=3,
type=FieldDescriptorProto.TYPE_INT32,
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto.field.add(
name='fields',
number=4,
type=FieldDescriptorProto.TYPE_MESSAGE,
type_name='.tensorflow.contrib.proto.FieldSpec',
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto = file_proto.message_type.add(
name='TestValue')
message_proto.field.add(
name='double_value',
number=1,
type=FieldDescriptorProto.TYPE_DOUBLE,
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto.field.add(
name='bool_value',
number=2,
type=FieldDescriptorProto.TYPE_BOOL,
label=FieldDescriptorProto.LABEL_REPEATED)
message_proto = file_proto.message_type.add(
name='FieldSpec')
message_proto.field.add(
name='name',
number=1,
type=FieldDescriptorProto.TYPE_STRING,
label=FieldDescriptorProto.LABEL_OPTIONAL)
message_proto.field.add(
name='dtype',
number=2,
type=FieldDescriptorProto.TYPE_ENUM,
type_name='.tensorflow.DataType',
label=FieldDescriptorProto.LABEL_OPTIONAL)
message_proto.field.add(
name='value',
number=3,
type=FieldDescriptorProto.TYPE_MESSAGE,
type_name='.tensorflow.contrib.proto.TestValue',
label=FieldDescriptorProto.LABEL_OPTIONAL)
return proto
def _writeProtoToFile(self, proto):
fn = os.path.join(self.get_temp_dir(), 'descriptor.pb')
with open(fn, 'wb') as f:
f.write(proto.SerializeToString())
return fn
def _testRoundtrip(self, descriptor_source):
# Numpy silently truncates the strings if you don't specify dtype=object.
in_bufs = np.array(
[test_base.ProtoOpTestBase.simple_test_case().SerializeToString()],
dtype=object)
message_type = 'tensorflow.contrib.proto.TestCase'
field_names = ['values', 'shapes', 'sizes', 'fields']
tensor_types = [dtypes.string, dtypes.int32, dtypes.int32, dtypes.string]
with self.cached_session() as sess:
sizes, field_tensors = self._decode_module.decode_proto(
in_bufs,
message_type=message_type,
field_names=field_names,
output_types=tensor_types,
descriptor_source=descriptor_source)
out_tensors = self._encode_module.encode_proto(
sizes,
field_tensors,
message_type=message_type,
field_names=field_names,
descriptor_source=descriptor_source)
out_bufs, = sess.run([out_tensors])
# Check that the re-encoded tensor has the same shape.
self.assertEqual(in_bufs.shape, out_bufs.shape)
# Compare the input and output.
for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
# Check that the input and output serialized messages are identical.
# If we fail here, there is a difference in the serialized
# representation but the new serialization still parses. This could
# be harmless (a change in map ordering?) or it could be bad (e.g.
# loss of packing in the encoding).
self.assertEqual(in_buf, out_buf)
def testWithFileDescriptorSet(self):
# First try parsing with a local proto db, which should fail.
with self.assertRaisesOpError('No descriptor found for message type'):
self._testRoundtrip(b'local://')
# Now try parsing with a FileDescriptorSet which contains the test proto.
proto = self._createDescriptorProto()
proto_file = self._writeProtoToFile(proto)
self._testRoundtrip(proto_file)
# Finally, try parsing the descriptor as a serialized string.
self._testRoundtrip(b'bytes://' + proto.SerializeToString())
| DescriptorSourceTestBase |
python | huggingface__transformers | src/transformers/models/ovis2/modeling_ovis2.py | {
"start": 15722,
"end": 16677
} | class ____(nn.Module):
def __init__(self, config: Ovis2VisionConfig):
super().__init__()
self.config = config
self.embeddings = Ovis2VisionEmbeddings(config)
self.encoder = Ovis2VisionEncoder(config)
self.rms_norm = Ovis2RMSNorm(config.hidden_size, config.rms_norm_eps)
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
pixel_values,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
):
hidden_states = self.embeddings(pixel_values)
encoder_outputs: BaseModelOutput = self.encoder(
inputs_embeds=hidden_states,
attention_mask=attention_mask,
**kwargs,
)
last_hidden_state = encoder_outputs.last_hidden_state
last_hidden_state = self.rms_norm(last_hidden_state)
return BaseModelOutput(last_hidden_state=last_hidden_state)
| Ovis2VisionTransformer |
python | ipython__ipython | tests/test_guarded_eval.py | {
"start": 7392,
"end": 7506
} | class ____:
def __new__(self) -> frozenset: # type:ignore[misc]
return frozenset()
| InitReturnsFrozenset |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/utils/plus/defs_state_storage.py | {
"start": 1057,
"end": 3883
} | class ____(DefsStateStorage[T_DagsterInstance]):
"""DefsStateStorage that can be instantiated from a DagsterPlusCliConfig,
intended for use within the CLI.
"""
def __init__(self, url: str, api_token: str, deployment: str, graphql_client):
self._url = url
self._api_token = api_token
self._deployment = deployment
self._graphql_client = graphql_client
@classmethod
def from_location_state(
cls, location_state: "LocationState", api_token: str, organization: str
):
from dagster_dg_cli.utils.plus.gql_client import DagsterPlusGraphQLClient
return cls(
location_state.url,
api_token,
location_state.deployment_name,
DagsterPlusGraphQLClient.from_location_state(location_state, api_token, organization),
)
@property
def url(self) -> str:
return self._url
@property
def api_token(self) -> str:
return self._api_token
@property
def deployment(self) -> str:
return self._deployment
@property
def graphql_client(self) -> Any:
return self._graphql_client
def _execute_query(self, query, variables=None):
return self.graphql_client.execute(query, variables=variables)
def _get_artifact_key(self, key: str, version: str) -> str:
return f"__state__/{self._sanitize_key(key)}/{version}"
def download_state_to_path(self, key: str, version: str, path: Path) -> None:
download_artifact(
url=self.url,
scope=DagsterCloudInstanceScope.DEPLOYMENT,
api_token=self.api_token,
key=self._get_artifact_key(key, version),
path=path,
deployment=self.deployment,
)
def upload_state_from_path(self, key: str, version: str, path: Path) -> None:
upload_artifact(
url=self.url,
scope=DagsterCloudInstanceScope.DEPLOYMENT,
api_token=self.api_token,
key=self._get_artifact_key(key, version),
path=path,
deployment=self.deployment,
)
self.set_latest_version(key, version)
def get_latest_defs_state_info(self) -> Optional[DefsStateInfo]:
res = self._execute_query(GET_LATEST_DEFS_STATE_INFO_QUERY)
latest_info = res["latestDefsStateInfo"]
return DefsStateInfo.from_graphql(latest_info) if latest_info else None
def set_latest_version(self, key: str, version: str) -> None:
result = self._execute_query(
SET_LATEST_VERSION_MUTATION, variables={"key": key, "version": version}
)
check.invariant(
result.get("setLatestDefsStateVersion", {}).get("ok"),
f"Failed to set latest version. Result: {result}",
)
| DagsterPlusCliDefsStateStorage |
python | walkccc__LeetCode | solutions/1465. Maximum Area of a Piece of Cake After Horizontal and Vertical Cuts/1465.py | {
"start": 0,
"end": 510
} | class ____:
def maxArea(
self,
h: int,
w: int,
horizontalCuts: list[int],
verticalCuts: list[int],
) -> int:
MOD = 1_000_000_007
# the maximum gap of each direction
maxGapX = max(b - a
for a, b in itertools.pairwise(
[0] + sorted(horizontalCuts) + [h]))
maxGapY = max(b - a
for a, b in itertools.pairwise(
[0] + sorted(verticalCuts) + [w]))
return maxGapX * maxGapY % MOD
| Solution |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 41423,
"end": 41734
} | class ____(fftw_info):
section = 'fftw3'
dir_env_var = 'ARMPL_DIR'
notfounderror = FFTWNotFoundError
ver_info = [{'name': 'fftw3',
'libs': ['armpl_lp64_mp'],
'includes': ['fftw3.h'],
'macros': [('SCIPY_FFTW3_H', None)]}]
| fftw3_armpl_info |
python | numba__numba | numba/core/types/functions.py | {
"start": 24093,
"end": 24582
} | class ____(Function):
"""
A named native function (resolvable by LLVM) accepting an explicit
signature. For internal use only.
"""
def __init__(self, symbol, sig):
from numba.core import typing
self.symbol = symbol
self.sig = sig
template = typing.make_concrete_template(symbol, symbol, [sig])
super(ExternalFunction, self).__init__(template)
@property
def key(self):
return self.symbol, self.sig
| ExternalFunction |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_lower_triangular_test.py | {
"start": 1178,
"end": 6731
} | class ____(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
@staticmethod
def skip_these_tests():
# Cholesky does not make sense for triangular matrices.
return ["cholesky"]
def operator_and_matrix(self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
# Upper triangle will be nonzero, but ignored.
# Use a diagonal that ensures this matrix is well conditioned.
tril = linear_operator_test_util.random_tril_matrix(
shape, dtype=dtype, force_well_conditioned=True, remove_upper=False)
if ensure_self_adjoint_and_pd:
# Get the diagonal and make the matrix out of it.
tril = array_ops.matrix_diag_part(tril)
tril = math_ops.abs(tril) + 1e-1
tril = array_ops.matrix_diag(tril)
lin_op_tril = tril
if use_placeholder:
lin_op_tril = array_ops.placeholder_with_default(lin_op_tril, shape=None)
operator = linalg.LinearOperatorLowerTriangular(
lin_op_tril,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
matrix = array_ops.matrix_band_part(tril, -1, 0)
return operator, matrix
def test_assert_non_singular(self):
# Singular matrix with one positive eigenvalue and one zero eigenvalue.
with self.cached_session():
tril = [[1., 0.], [1., 0.]]
operator = linalg.LinearOperatorLowerTriangular(tril)
with self.assertRaisesOpError("Singular operator"):
operator.assert_non_singular().run()
def test_is_x_flags(self):
# Matrix with two positive eigenvalues.
tril = [[1., 0.], [1., 1.]]
operator = linalg.LinearOperatorLowerTriangular(
tril,
is_positive_definite=True,
is_non_singular=True,
is_self_adjoint=False)
self.assertTrue(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertFalse(operator.is_self_adjoint)
def test_tril_must_have_at_least_two_dims_or_raises(self):
with self.assertRaisesRegex(ValueError, "at least 2 dimensions"):
linalg.LinearOperatorLowerTriangular([1.])
def test_triangular_diag_matmul(self):
operator1 = linalg_lib.LinearOperatorLowerTriangular(
[[1., 0., 0.], [2., 1., 0.], [2., 3., 3.]])
operator2 = linalg_lib.LinearOperatorDiag([2., 2., 3.])
operator_matmul = operator1.matmul(operator2)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorLowerTriangular))
self.assertAllClose(
math_ops.matmul(
operator1.to_dense(),
operator2.to_dense()),
self.evaluate(operator_matmul.to_dense()))
operator_matmul = operator2.matmul(operator1)
self.assertTrue(isinstance(
operator_matmul,
linalg_lib.LinearOperatorLowerTriangular))
self.assertAllClose(
math_ops.matmul(
operator2.to_dense(),
operator1.to_dense()),
self.evaluate(operator_matmul.to_dense()))
def test_tape_safe(self):
tril = variables_module.Variable([[1., 0.], [0., 1.]])
operator = linalg_lib.LinearOperatorLowerTriangular(
tril, is_non_singular=True)
self.check_tape_safe(operator)
def test_convert_variables_to_tensors(self):
tril = variables_module.Variable([[1., 0.], [0., 1.]])
operator = linalg_lib.LinearOperatorLowerTriangular(
tril, is_non_singular=True)
with self.cached_session() as sess:
sess.run([tril.initializer])
self.check_convert_variables_to_tensors(operator)
def test_llt_composition_with_pd_l(self):
l = linalg_lib.LinearOperatorLowerTriangular(
[[1., 0.], [0.5, 0.2]], is_non_singular=True, is_positive_definite=True)
self.assertIs(l, (l @ l.H).cholesky())
def test_llt_composition_with_non_pd_l(self):
# The tril matrix here is selected so that multiplying the rows by the sign
# (the correct thing to do) is different than multiplying the columns.
l = linalg_lib.LinearOperatorLowerTriangular(
[[-1., 0., 0.], [0.5, 0.2, 0.], [0.1, 0.1, 1.]], is_non_singular=True)
llt = l @ l.H
chol = llt.cholesky()
self.assertIsInstance(chol, linalg_lib.LinearOperatorLowerTriangular)
self.assertGreater(self.evaluate(chol.diag_part()).min(), 0)
self.assertAllClose(
self.evaluate(llt.to_dense()), self.evaluate(
(chol @ chol.H).to_dense()))
def test_llt_composition_with_non_pd_complex_l(self):
# The tril matrix here is selected so that multiplying the rows by the sign
# (the correct thing to do) is different than multiplying the columns.
i = math_ops.complex(0., 1.)
l = linalg_lib.LinearOperatorLowerTriangular(
[[-1. + i, 0., 0.], [0.5, 0.2 - 2 * i, 0.], [0.1, 0.1, 1.]],
is_non_singular=True)
llt = l @ l.H
chol = llt.cholesky()
self.assertIsInstance(chol, linalg_lib.LinearOperatorLowerTriangular)
self.assertGreater(self.evaluate(math_ops.real(chol.diag_part())).min(), 0)
self.assertAllClose(
self.evaluate(math_ops.imag(chol.diag_part())).min(), 0)
self.assertAllClose(
self.evaluate(llt.to_dense()), self.evaluate(
(chol @ chol.H).to_dense()))
if __name__ == "__main__":
config.enable_tensor_float_32_execution(False)
linear_operator_test_util.add_tests(LinearOperatorLowerTriangularTest)
test.main()
| LinearOperatorLowerTriangularTest |
python | django__django | django/contrib/auth/forms.py | {
"start": 4888,
"end": 7515
} | class ____:
"""
Form mixin that allows setting an unusable password for a user.
This mixin should be used in combination with `SetPasswordMixin`.
"""
usable_password_help_text = _(
"Whether the user will be able to authenticate using a password or not. "
"If disabled, they may still be able to authenticate using other backends, "
"such as Single Sign-On or LDAP."
)
@staticmethod
def create_usable_password_field(help_text=usable_password_help_text):
return forms.ChoiceField(
label=_("Password-based authentication"),
required=False,
initial="true",
choices={"true": _("Enabled"), "false": _("Disabled")},
widget=forms.RadioSelect(attrs={"class": "radiolist inline"}),
help_text=help_text,
)
@sensitive_variables("password1", "password2")
def validate_passwords(
self,
password1_field_name="password1",
password2_field_name="password2",
usable_password_field_name="usable_password",
):
usable_password = (
self.cleaned_data.pop(usable_password_field_name, None) != "false"
)
self.cleaned_data["set_usable_password"] = usable_password
if not usable_password:
return
password1 = self.cleaned_data.get(password1_field_name)
password2 = self.cleaned_data.get(password2_field_name)
if not password1 and password1_field_name not in self.errors:
error = ValidationError(
self.fields[password1_field_name].error_messages["required"],
code="required",
)
self.add_error(password1_field_name, error)
if not password2 and password2_field_name not in self.errors:
error = ValidationError(
self.fields[password2_field_name].error_messages["required"],
code="required",
)
self.add_error(password2_field_name, error)
super().validate_passwords(password1_field_name, password2_field_name)
def validate_password_for_user(self, user, **kwargs):
if self.cleaned_data["set_usable_password"]:
super().validate_password_for_user(user, **kwargs)
def set_password_and_save(self, user, commit=True, **kwargs):
if self.cleaned_data["set_usable_password"]:
user = super().set_password_and_save(user, **kwargs, commit=commit)
else:
user.set_unusable_password()
if commit:
user.save()
return user
| SetUnusablePasswordMixin |
python | vyperlang__vyper | vyper/ast/pre_parser.py | {
"start": 6389,
"end": 8085
} | class ____:
def __init__(self):
self.locations = []
self._tokens = []
self._state = ParserState.NOT_RUNNING
def consume(self, token, result):
# prepare to check if the next token is a STRING
if self._state == ParserState.NOT_RUNNING:
if token.type == NAME and token.string == "x":
self._tokens.append(token)
self._state = ParserState.RUNNING
return True
return False
assert self._state == ParserState.RUNNING, "unreachable"
self._state = ParserState.NOT_RUNNING
if token.type != STRING:
# flush the tokens we have accumulated and move on
result.extend(self._tokens)
self._tokens = []
return False
# mark hex string in locations for later processing
self.locations.append(token.start)
# discard the `x` token and apply sanity checks -
# we should only be discarding one token.
assert len(self._tokens) == 1
assert (x_tok := self._tokens[0]).type == NAME and x_tok.string == "x"
self._tokens = [] # discard tokens
result.append(token)
return True
# compound statements that are replaced with `class`
# TODO remove enum in favor of flag
VYPER_CLASS_TYPES = {
"flag": "FlagDef",
"enum": "EnumDef",
"event": "EventDef",
"interface": "InterfaceDef",
"struct": "StructDef",
}
# simple statements that are replaced with `yield`
CUSTOM_STATEMENT_TYPES = {"log": "Log"}
# expression types that are replaced with `await`
CUSTOM_EXPRESSION_TYPES = {"extcall": "ExtCall", "staticcall": "StaticCall"}
| HexStringParser |
python | networkx__networkx | networkx/algorithms/assortativity/tests/test_correlation.py | {
"start": 244,
"end": 2681
} | class ____(BaseTestDegreeMixing):
def test_degree_assortativity_undirected(self):
r = nx.degree_assortativity_coefficient(self.P4)
np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4)
def test_degree_assortativity_node_kwargs(self):
G = nx.Graph()
edges = [(0, 1), (0, 3), (1, 2), (1, 3), (1, 4), (5, 9), (9, 0)]
G.add_edges_from(edges)
r = nx.degree_assortativity_coefficient(G, nodes=[1, 2, 4])
np.testing.assert_almost_equal(r, -1.0, decimal=4)
def test_degree_assortativity_directed(self):
r = nx.degree_assortativity_coefficient(self.D)
np.testing.assert_almost_equal(r, -0.57735, decimal=4)
def test_degree_assortativity_directed2(self):
"""Test degree assortativity for a directed graph where the set of
in/out degree does not equal the total degree."""
r = nx.degree_assortativity_coefficient(self.D2)
np.testing.assert_almost_equal(r, 0.14852, decimal=4)
def test_degree_assortativity_multigraph(self):
r = nx.degree_assortativity_coefficient(self.M)
np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4)
def test_degree_pearson_assortativity_undirected(self):
r = nx.degree_pearson_correlation_coefficient(self.P4)
np.testing.assert_almost_equal(r, -1.0 / 2, decimal=4)
def test_degree_pearson_assortativity_directed(self):
r = nx.degree_pearson_correlation_coefficient(self.D)
np.testing.assert_almost_equal(r, -0.57735, decimal=4)
def test_degree_pearson_assortativity_directed2(self):
"""Test degree assortativity with Pearson for a directed graph where
the set of in/out degree does not equal the total degree."""
r = nx.degree_pearson_correlation_coefficient(self.D2)
np.testing.assert_almost_equal(r, 0.14852, decimal=4)
def test_degree_pearson_assortativity_multigraph(self):
r = nx.degree_pearson_correlation_coefficient(self.M)
np.testing.assert_almost_equal(r, -1.0 / 7.0, decimal=4)
def test_degree_assortativity_weighted(self):
r = nx.degree_assortativity_coefficient(self.W, weight="weight")
np.testing.assert_almost_equal(r, -0.1429, decimal=4)
def test_degree_assortativity_double_star(self):
r = nx.degree_assortativity_coefficient(self.DS)
np.testing.assert_almost_equal(r, -0.9339, decimal=4)
| TestDegreeMixingCorrelation |
python | sympy__sympy | sympy/physics/secondquant.py | {
"start": 11384,
"end": 15397
} | class ____(SqOperator):
@property
def is_restricted(self):
"""
Is this FermionicOperator restricted with respect to fermi level?
Returns
=======
1 : restricted to orbits above fermi
0 : no restriction
-1 : restricted to orbits below fermi
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F, Fd
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_restricted
1
>>> Fd(a).is_restricted
1
>>> F(i).is_restricted
-1
>>> Fd(i).is_restricted
-1
>>> F(p).is_restricted
0
>>> Fd(p).is_restricted
0
"""
ass = self.args[0].assumptions0
if ass.get("below_fermi"):
return -1
if ass.get("above_fermi"):
return 1
return 0
@property
def is_above_fermi(self):
"""
Does the index of this FermionicOperator allow values above fermi?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_above_fermi
True
>>> F(i).is_above_fermi
False
>>> F(p).is_above_fermi
True
Note
====
The same applies to creation operators Fd
"""
return not self.args[0].assumptions0.get("below_fermi")
@property
def is_below_fermi(self):
"""
Does the index of this FermionicOperator allow values below fermi?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_below_fermi
False
>>> F(i).is_below_fermi
True
>>> F(p).is_below_fermi
True
The same applies to creation operators Fd
"""
return not self.args[0].assumptions0.get("above_fermi")
@property
def is_only_below_fermi(self):
"""
Is the index of this FermionicOperator restricted to values below fermi?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_below_fermi
False
>>> F(i).is_only_below_fermi
True
>>> F(p).is_only_below_fermi
False
The same applies to creation operators Fd
"""
return self.is_below_fermi and not self.is_above_fermi
@property
def is_only_above_fermi(self):
"""
Is the index of this FermionicOperator restricted to values above fermi?
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.secondquant import F
>>> a = Symbol('a', above_fermi=True)
>>> i = Symbol('i', below_fermi=True)
>>> p = Symbol('p')
>>> F(a).is_only_above_fermi
True
>>> F(i).is_only_above_fermi
False
>>> F(p).is_only_above_fermi
False
The same applies to creation operators Fd
"""
return self.is_above_fermi and not self.is_below_fermi
def _sortkey(self):
h = hash(self)
label = str(self.args[0])
if self.is_only_q_creator:
return 1, label, h
if self.is_only_q_annihilator:
return 4, label, h
if isinstance(self, Annihilator):
return 3, label, h
if isinstance(self, Creator):
return 2, label, h
| FermionicOperator |
python | pennersr__django-allauth | allauth/socialaccount/providers/fivehundredpx/provider.py | {
"start": 486,
"end": 1128
} | class ____(OAuthProvider):
id = "500px"
name = "500px"
package = "allauth.socialaccount.providers.fivehundredpx"
account_class = FiveHundredPxAccount
oauth_adapter_class = FiveHundredPxOAuthAdapter
def get_default_scope(self):
return []
def extract_uid(self, data):
return str(data["id"])
def extract_common_fields(self, data):
return dict(
username=data.get("username"),
email=data.get("email"),
first_name=data.get("firstname"),
last_name=data.get("lastname"),
)
provider_classes = [FiveHundredPxProvider]
| FiveHundredPxProvider |
python | pytorch__pytorch | torch/_export/serde/serialize.py | {
"start": 5227,
"end": 5363
} | class ____:
exported_program: bytes
state_dict: bytes
constants: bytes
example_inputs: bytes
@dataclass
| SerializedArtifact |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/asyncpg.py | {
"start": 9899,
"end": 9963
} | class ____(sqltypes.Date):
render_bind_cast = True
| AsyncpgDate |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/execution_tests/execution_plan_tests/test_external_step.py | {
"start": 22817,
"end": 24398
} | class ____(CacheableAssetsDefinition):
_cacheable_data = AssetsDefinitionCacheableData(
keys_by_output_name={"result": dg.AssetKey("foo")}
)
def compute_cacheable_data(self):
# used for tracking how many times this function gets called over an execution
# since we're crossing process boundaries, we pre-populate this value in the host process
# and assert that this pre-populated value is present, to ensure that we'll error if this
# gets called in a child process
instance = DagsterInstance.get()
val = instance.run_storage.get_cursor_values({"val"}).get("val")
assert val == "INITIAL_VALUE"
instance.run_storage.set_cursor_values({"val": "NEW_VALUE"})
return [self._cacheable_data]
def build_definitions(self, data):
assert len(data) == 1
assert data == [self._cacheable_data]
@dg.op(required_resource_keys={"step_launcher"})
def _op():
return 1
return dg.with_resources(
[
AssetsDefinition.from_op(
_op,
keys_by_output_name=cd.keys_by_output_name,
)
for cd in data
],
{"step_launcher": local_external_step_launcher},
)
@lazy_repository
def cacheable_asset_defs():
@dg.asset
def bar(foo):
return foo + 1
@dg.repository
def repo():
return [bar, MyCacheableAssetsDefinition("xyz"), dg.define_asset_job("all_asset_job")]
return repo
| MyCacheableAssetsDefinition |
python | walkccc__LeetCode | solutions/87. Scramble String/87.py | {
"start": 0,
"end": 492
} | class ____:
@functools.lru_cache(None)
def isScramble(self, s1: str, s2: str) -> bool:
if s1 == s2:
return True
if collections.Counter(s1) != collections.Counter(s2):
return False
for i in range(1, len(s1)):
if self.isScramble(s1[:i], s2[:i]) and self.isScramble(s1[i:], s2[i:]):
return True
if (self.isScramble(s1[:i], s2[len(s2) - i:]) and
self.isScramble(s1[i:], s2[: len(s2) - i])):
return True
return False
| Solution |
python | pydantic__pydantic | pydantic-core/tests/serializers/test_model_root.py | {
"start": 359,
"end": 593
} | class ____:
__slots__ = '__dict__', '__pydantic_fields_set__', '__pydantic_extra__', '__pydantic_private__'
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
| BaseModel |
python | spack__spack | lib/spack/spack/binary_distribution.py | {
"start": 108723,
"end": 108883
} | class ____(spack.error.SpackError):
"""
Raised when gpg2 is not in PATH
"""
def __init__(self, msg):
super().__init__(msg)
| NoGpgException |
python | matplotlib__matplotlib | lib/matplotlib/backend_tools.py | {
"start": 28986,
"end": 30567
} | class ____(ToolBase):
description = 'Print tool list, shortcuts and description'
default_keymap = property(lambda self: mpl.rcParams['keymap.help'])
image = 'mpl-data/images/help'
@staticmethod
def format_shortcut(key_sequence):
"""
Convert a shortcut string from the notation used in rc config to the
standard notation for displaying shortcuts, e.g. 'ctrl+a' -> 'Ctrl+A'.
"""
return (key_sequence if len(key_sequence) == 1 else
re.sub(r"\+[A-Z]", r"+Shift\g<0>", key_sequence).title())
def _format_tool_keymap(self, name):
keymaps = self.toolmanager.get_tool_keymap(name)
return ", ".join(self.format_shortcut(keymap) for keymap in keymaps)
def _get_help_entries(self):
return [(name, self._format_tool_keymap(name), tool.description)
for name, tool in sorted(self.toolmanager.tools.items())
if tool.description]
def _get_help_text(self):
entries = self._get_help_entries()
entries = ["{}: {}\n\t{}".format(*entry) for entry in entries]
return "\n".join(entries)
def _get_help_html(self):
fmt = "<tr><td>{}</td><td>{}</td><td>{}</td></tr>"
rows = [fmt.format(
"<b>Action</b>", "<b>Shortcuts</b>", "<b>Description</b>")]
rows += [fmt.format(*row) for row in self._get_help_entries()]
return ("<style>td {padding: 0px 4px}</style>"
"<table><thead>" + rows[0] + "</thead>"
"<tbody>".join(rows[1:]) + "</tbody></table>")
| ToolHelpBase |
python | pytorch__pytorch | test/dynamo/test_modes.py | {
"start": 1538,
"end": 3960
} | class ____(torch._dynamo.test_case.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def test_torch_dispatch_ignore_compile_internals(self):
counters.clear()
from torch.utils._python_dispatch import TorchDispatchMode
@torch.library.custom_op("mylib::modes_checksum", mutates_args=())
def foo(x: torch.Tensor) -> torch.Tensor:
return x.clone()
def checksum(x):
return x.abs().sum()
_checksums = []
class ChecksumFoo(TorchDispatchMode):
@classmethod
def ignore_compile_internals(cls):
return True
def __init__(self) -> None:
super().__init__()
def __torch_dispatch__(self, func, types, args, kwargs=None):
kwargs = kwargs or {}
if func is torch.ops.mylib.modes_checksum.default:
# Do some compute, smoketest to see if there's a bad interaction
_checksums.append(args[0].abs().sum())
return func(*args, **kwargs)
# test e2e, with Inductor, as smoketest.
@torch._dynamo.error_on_graph_break(True)
@torch.compile(backend="inductor")
def g(x):
return 2 * x.sin().cos()
x = torch.randn(3)
with ChecksumFoo():
foo(x)
g(x)
foo(x)
self.assertEqual(len(_checksums), 2)
# The correct result here is 1: Dynamo should capture the `g` frame.
self.assertEqual(counters["frames"]["total"], 1)
self.assertEqual(counters["frames"]["ok"], 1)
def test_skip_torch_dispatch_modes(self):
class RewriteAddToMul(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if func is torch.ops.aten.add.Tensor:
func = torch.ops.aten.mul.Tensor
return func(*args, **kwargs)
def fn(x):
return x + x
cnt = torch._dynamo.testing.CompileCounter()
x = torch.tensor([3.0])
with RewriteAddToMul():
eager_res = fn(x)
compiled_res = torch.compile(fn, backend=cnt)(x)
self.assertEqual(eager_res, compiled_res)
self.assertEqual(cnt.frame_count, 0)
| TorchDispatchModeTests |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/selector.py | {
"start": 7364,
"end": 7973
} | class ____:
location_name: str
repository_name: str
resource_name: str
def to_graphql_input(self):
return {
"repositoryLocationName": self.location_name,
"repositoryName": self.repository_name,
"resourceName": self.resource_name,
}
@staticmethod
def from_graphql_input(graphql_data):
return ResourceSelector(
location_name=graphql_data["repositoryLocationName"],
repository_name=graphql_data["repositoryName"],
resource_name=graphql_data["resourceName"],
)
@record
| ResourceSelector |
python | getsentry__sentry | tests/sentry/workflow_engine/models/test_action.py | {
"start": 400,
"end": 6529
} | class ____(TestCase):
def setUp(self) -> None:
mock_group_event = Mock(spec=GroupEvent)
self.group = self.create_group()
self.mock_event = WorkflowEventData(event=mock_group_event, group=self.group)
self.action = Action(type=Action.Type.SLACK)
self.config_schema = {
"$id": "https://example.com/user-profile.schema.json",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"description": "A representation of a user profile",
"type": "object",
"properties": {
"foo": {"type": "string"},
},
"additionalProperties": False,
}
self.valid_params = {
"type": Action.Type.SLACK,
"config": {"foo": "bar"},
"data": {"foo": "bar"},
}
def test_get_handler_notification_type(self) -> None:
with patch("sentry.workflow_engine.registry.action_handler_registry.get") as mock_get:
mock_handler = Mock(spec=ActionHandler)
mock_get.return_value = mock_handler
handler = self.action.get_handler()
mock_get.assert_called_once_with(Action.Type.SLACK)
assert handler == mock_handler
def test_get_handler_webhook_type(self) -> None:
self.action = Action(type=Action.Type.WEBHOOK)
with patch("sentry.workflow_engine.registry.action_handler_registry.get") as mock_get:
mock_handler = Mock(spec=ActionHandler)
mock_get.return_value = mock_handler
handler = self.action.get_handler()
mock_get.assert_called_once_with(Action.Type.WEBHOOK)
assert handler == mock_handler
def test_get_handler_unregistered_type(self) -> None:
with patch("sentry.workflow_engine.registry.action_handler_registry.get") as mock_get:
mock_get.side_effect = NoRegistrationExistsError(
"No handler registered for notification type"
)
with pytest.raises(
NoRegistrationExistsError, match="No handler registered for notification type"
):
self.action.get_handler()
# Verify the registry was queried with the correct action type
mock_get.assert_called_once_with(Action.Type.SLACK)
@patch("sentry.workflow_engine.processors.detector.get_detector_from_event_data")
def test_trigger_calls_handler_execute(self, mock_get_detector: MagicMock) -> None:
mock_handler = Mock(spec=ActionHandler)
mock_get_detector.return_value = Mock(spec=Detector, type="error")
with patch.object(self.action, "get_handler", return_value=mock_handler):
self.action.trigger(self.mock_event)
mock_handler.execute.assert_called_once_with(
self.mock_event, self.action, mock_get_detector.return_value
)
@patch("sentry.workflow_engine.processors.detector.get_detector_from_event_data")
def test_trigger_with_failing_handler(self, mock_get_detector: MagicMock) -> None:
mock_handler = Mock(spec=ActionHandler)
mock_handler.execute.side_effect = Exception("Handler failed")
mock_get_detector.return_value = Mock(spec=Detector, type="error")
with patch.object(self.action, "get_handler", return_value=mock_handler):
with pytest.raises(Exception, match="Handler failed"):
self.action.trigger(self.mock_event)
@patch("sentry.utils.metrics.incr")
@patch("sentry.workflow_engine.processors.detector.get_detector_from_event_data")
def test_trigger_metrics(self, mock_get_detector: MagicMock, mock_incr: MagicMock) -> None:
mock_handler = Mock(spec=ActionHandler)
mock_get_detector.return_value = Mock(spec=Detector, type="error")
with patch.object(self.action, "get_handler", return_value=mock_handler):
self.action.trigger(self.mock_event)
mock_handler.execute.assert_called_once()
mock_incr.assert_called_once_with(
"workflow_engine.action.trigger",
tags={"action_type": self.action.type, "detector_type": "error"},
sample_rate=1.0,
)
def test_config_schema(self) -> None:
mock_handler = Mock(spec=ActionHandler)
mock_handler.config_schema = self.config_schema
mock_handler.data_schema = self.config_schema
with patch.object(Action, "get_handler", return_value=mock_handler):
params = self.valid_params.copy()
params["config"] = {"foo": "bar"}
result = Action.objects.create(**params)
assert result is not None
def test_config_schema__invalid(self) -> None:
mock_handler = Mock(spec=ActionHandler)
mock_handler.config_schema = self.config_schema
mock_handler.data_schema = self.config_schema
with patch.object(Action, "get_handler", return_value=mock_handler):
with pytest.raises(ValidationError):
params = self.valid_params.copy()
params["config"] = {"baz": 42}
Action.objects.create(**params)
def test_data_schema(self) -> None:
mock_handler = Mock(spec=ActionHandler)
mock_handler.config_schema = self.config_schema
mock_handler.data_schema = self.config_schema
with patch.object(Action, "get_handler", return_value=mock_handler):
params = self.valid_params.copy()
params["data"] = {"foo": "bar"}
result = Action.objects.create(**params)
assert result is not None
def test_data_schema__invalid(self) -> None:
mock_handler = Mock(spec=ActionHandler)
mock_handler.config_schema = self.config_schema
mock_handler.data_schema = self.config_schema
with patch.object(Action, "get_handler", return_value=mock_handler):
with pytest.raises(ValidationError):
params = self.valid_params.copy()
params["data"] = {"baz": 42}
Action.objects.create(**params)
| TestAction |
python | getsentry__sentry | tests/sentry/grouping/test_components.py | {
"start": 1080,
"end": 17619
} | class ____(TestCase):
def setUp(self) -> None:
self.contributing_system_frame = {
"function": "handleRequest",
"filename": "/node_modules/express/router.js",
"context_line": "return handler(request);",
}
self.non_contributing_system_frame = {
"function": "runApp",
"filename": "/node_modules/express/app.js",
"context_line": "return server.serve(port);",
}
self.contributing_in_app_frame = {
"function": "playFetch",
"filename": "/dogApp/dogpark.js",
"context_line": "raise FailedToFetchError('Charlie didn't bring the ball back');",
}
self.non_contributing_in_app_frame = {
"function": "recordMetrics",
"filename": "/dogApp/metrics.js",
"context_line": "return withMetrics(handler, metricName, tags);",
}
self.exception_value = {
"type": "FailedToFetchError",
"value": "Charlie didn't bring the ball back",
}
self.event = Event(
event_id="12312012041520130908201311212012",
project_id=self.project.id,
data={
"title": "FailedToFetchError('Charlie didn't bring the ball back')",
"exception": {"values": [self.exception_value]},
},
)
self.project.update_option(
"sentry:grouping_enhancements",
"\n".join(
[
"stack.function:runApp -app -group",
"stack.function:handleRequest -app +group",
"stack.function:recordMetrics +app -group",
"stack.function:playFetch +app +group",
]
),
)
def test_primitive_wrappers_wrap_at_most_one_value(self) -> None:
# These run without erroring
FunctionGroupingComponent(values=[])
FunctionGroupingComponent(values=["playFetch"])
# Not so much this one
with pytest.raises(AssertionError):
FunctionGroupingComponent(values=["playFetch", "rollOver"])
def test_component_wrappers_can_wrap_multiple_values(self) -> None:
get_frame = lambda: FrameGroupingComponent(in_app=True, values=[])
# Any number of values is fine
StacktraceGroupingComponent(values=[])
StacktraceGroupingComponent(values=[get_frame()])
StacktraceGroupingComponent(values=[get_frame(), get_frame()])
def test_frame_components_record_in_app(self) -> None:
self.event.data["exception"]["values"][0]["stacktrace"] = {
"frames": [
self.contributing_system_frame,
self.contributing_in_app_frame,
]
}
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
for variant_name in ["app", "system"]:
exception_component = variants[variant_name].root_component.values[0]
assert isinstance(exception_component, ExceptionGroupingComponent)
stacktrace_component = find_given_child_component(
exception_component, StacktraceGroupingComponent
)
assert stacktrace_component
frame_components = stacktrace_component.values
found = []
for frame_component in frame_components:
child_component = find_given_child_component(
frame_component, FunctionGroupingComponent
)
assert child_component is not None
found.append(child_component.values[0])
assert found == ["handleRequest", "playFetch"]
assert [frame_component.in_app for frame_component in frame_components] == [False, True]
def test_stacktrace_component_tallies_frame_types_simple(self) -> None:
self.event.data["exception"]["values"][0]["stacktrace"] = {
"frames": (
[self.non_contributing_system_frame] * 11
+ [self.contributing_system_frame] * 21
+ [self.non_contributing_in_app_frame] * 12
+ [self.contributing_in_app_frame] * 31
)
}
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
system_exception_component = variants["system"].root_component.values[0]
app_exception_component = variants["app"].root_component.values[0]
assert isinstance(app_exception_component, ExceptionGroupingComponent)
assert isinstance(system_exception_component, ExceptionGroupingComponent)
app_stacktrace_component = find_given_child_component(
app_exception_component, StacktraceGroupingComponent
)
system_stacktrace_component = find_given_child_component(
system_exception_component, StacktraceGroupingComponent
)
assert app_stacktrace_component
assert system_stacktrace_component
assert (
app_exception_component.frame_counts
== system_exception_component.frame_counts
== app_stacktrace_component.frame_counts
== system_stacktrace_component.frame_counts
== Counter(
system_non_contributing_frames=11,
system_contributing_frames=21,
in_app_non_contributing_frames=12,
in_app_contributing_frames=31,
)
)
def test_stacktrace_component_tallies_frame_types_not_all_types_present(self) -> None:
self.event.data["exception"]["values"][0]["stacktrace"] = {
"frames": (
[self.contributing_system_frame] * 20 + [self.contributing_in_app_frame] * 13
)
}
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
system_exception_component = variants["system"].root_component.values[0]
app_exception_component = variants["app"].root_component.values[0]
assert isinstance(app_exception_component, ExceptionGroupingComponent)
assert isinstance(system_exception_component, ExceptionGroupingComponent)
app_stacktrace_component = find_given_child_component(
app_exception_component, StacktraceGroupingComponent
)
system_stacktrace_component = find_given_child_component(
system_exception_component, StacktraceGroupingComponent
)
assert app_stacktrace_component
assert system_stacktrace_component
assert (
app_exception_component.frame_counts
== system_exception_component.frame_counts
== app_stacktrace_component.frame_counts
== system_stacktrace_component.frame_counts
== Counter(
system_non_contributing_frames=0,
system_contributing_frames=20,
in_app_non_contributing_frames=0,
in_app_contributing_frames=13,
)
)
def test_exception_component_uses_stacktrace_frame_counts(self) -> None:
self.event.data["exception"]["values"][0]["stacktrace"] = {
"frames": (
[self.non_contributing_system_frame] * 4
+ [self.contributing_system_frame] * 15
+ [self.non_contributing_in_app_frame] * 9
+ [self.contributing_in_app_frame] * 8
)
}
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
system_exception_component = variants["system"].root_component.values[0]
app_exception_component = variants["app"].root_component.values[0]
assert isinstance(app_exception_component, ExceptionGroupingComponent)
assert isinstance(system_exception_component, ExceptionGroupingComponent)
app_stacktrace_component = find_given_child_component(
app_exception_component, StacktraceGroupingComponent
)
system_stacktrace_component = find_given_child_component(
system_exception_component, StacktraceGroupingComponent
)
assert app_stacktrace_component
assert system_stacktrace_component
assert (
app_exception_component.frame_counts
== system_exception_component.frame_counts
== app_stacktrace_component.frame_counts
== system_stacktrace_component.frame_counts
== Counter(
system_non_contributing_frames=4,
system_contributing_frames=15,
in_app_non_contributing_frames=9,
in_app_contributing_frames=8,
)
)
def test_threads_component_uses_stacktrace_frame_counts(self) -> None:
self.event.data["threads"] = self.event.data.pop("exception")
self.event.data["threads"]["values"][0]["stacktrace"] = {
"frames": (
[self.non_contributing_system_frame] * 20
+ [self.contributing_system_frame] * 12
+ [self.non_contributing_in_app_frame] * 20
+ [self.contributing_in_app_frame] * 13
)
}
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
app_threads_component = variants["app"].root_component.values[0]
system_threads_component = variants["system"].root_component.values[0]
assert isinstance(app_threads_component, ThreadsGroupingComponent)
assert isinstance(system_threads_component, ThreadsGroupingComponent)
app_stacktrace_component = find_given_child_component(
app_threads_component, StacktraceGroupingComponent
)
system_stacktrace_component = find_given_child_component(
system_threads_component, StacktraceGroupingComponent
)
assert app_stacktrace_component
assert system_stacktrace_component
assert (
app_threads_component.frame_counts
== system_threads_component.frame_counts
== app_stacktrace_component.frame_counts
== system_stacktrace_component.frame_counts
== Counter(
system_non_contributing_frames=20,
system_contributing_frames=12,
in_app_non_contributing_frames=20,
in_app_contributing_frames=13,
)
)
def test_chained_exception_component_sums_stacktrace_frame_counts(self) -> None:
self.event.data["exception"]["values"] = [
{**self.exception_value},
{**self.exception_value},
]
self.event.data["exception"]["values"][0]["stacktrace"] = {
"frames": (
[self.non_contributing_system_frame] * 11
+ [self.contributing_system_frame] * 21
+ [self.non_contributing_in_app_frame] * 12
+ [self.contributing_in_app_frame] * 31
)
}
self.event.data["exception"]["values"][1]["stacktrace"] = {
"frames": (
[self.non_contributing_system_frame] * 4
+ [self.contributing_system_frame] * 15
+ [self.non_contributing_in_app_frame] * 9
+ [self.contributing_in_app_frame] * 8
)
}
# `normalize_stacktraces=True` forces the custom stacktrace enhancements to run
variants = self.event.get_grouping_variants(normalize_stacktraces=True)
app_chained_exception_component = variants["app"].root_component.values[0]
system_chained_exception_component = variants["system"].root_component.values[0]
assert isinstance(app_chained_exception_component, ChainedExceptionGroupingComponent)
assert isinstance(system_chained_exception_component, ChainedExceptionGroupingComponent)
app_exception_components = app_chained_exception_component.values
system_exception_components = system_chained_exception_component.values
assert (
[exception_component.frame_counts for exception_component in app_exception_components]
== [
exception_component.frame_counts
for exception_component in system_exception_components
]
== [
Counter(
system_non_contributing_frames=11,
system_contributing_frames=21,
in_app_non_contributing_frames=12,
in_app_contributing_frames=31,
),
Counter(
system_non_contributing_frames=4,
system_contributing_frames=15,
in_app_non_contributing_frames=9,
in_app_contributing_frames=8,
),
]
)
assert (
app_chained_exception_component.frame_counts
== system_chained_exception_component.frame_counts
== Counter(
system_non_contributing_frames=15,
system_contributing_frames=36,
in_app_non_contributing_frames=21,
in_app_contributing_frames=39,
)
)
def test_get_subcomponent(self) -> None:
root_component = self.event.get_grouping_variants()["app"].root_component
# When `recursive` isn't specified, it should find direct children but not grandchildren
exception_component = root_component.get_subcomponent("exception")
stacktrace_component = root_component.get_subcomponent("stacktrace")
error_value_component = root_component.get_subcomponent("value")
assert exception_component
assert not stacktrace_component
assert not error_value_component
# Grandchildren can be found, however, if the search is recursive
stacktrace_component = root_component.get_subcomponent("stacktrace", recursive=True)
error_value_component = root_component.get_subcomponent("value", recursive=True)
assert stacktrace_component
assert error_value_component
# The `only_contributing` flag can be used to exclude components which don't contribute
assert stacktrace_component.contributes is False
contributing_stacktrace_component = root_component.get_subcomponent(
"stacktrace", recursive=True, only_contributing=True
)
assert not contributing_stacktrace_component
# Even if a component itself is marked as contributing, if `only_contributing` is set, the
# component won't be found if it has a non-contributing ancestor
exception_component.contributes = False
assert error_value_component.contributes is True
contributing_error_value_component = root_component.get_subcomponent(
"value", recursive=True, only_contributing=True
)
assert not contributing_error_value_component
# TODO: Once we're fully transitioned off of the `newstyle:2023-01-11` config, this test can
# be deleted
def test_configs_put_exception_subcomponents_in_expected_order(self) -> None:
self.event.data["exception"]["values"][0]["stacktrace"] = {"frames": []}
self.project.update_option("sentry:grouping_config", WINTER_2023_GROUPING_CONFIG)
variants = self.event.get_grouping_variants()
exception_component = variants["app"].root_component.values[0]
assert isinstance(exception_component, ExceptionGroupingComponent)
assert [subcomponent.id for subcomponent in exception_component.values] == [
"stacktrace",
"type",
"value",
]
self.project.update_option("sentry:grouping_config", FALL_2025_GROUPING_CONFIG)
variants = self.event.get_grouping_variants()
exception_component = variants["app"].root_component.values[0]
assert isinstance(exception_component, ExceptionGroupingComponent)
assert [subcomponent.id for subcomponent in exception_component.values] == [
"type",
"value",
"stacktrace",
]
| ComponentTest |
python | apache__airflow | airflow-core/src/airflow/utils/state.py | {
"start": 1256,
"end": 1646
} | class ____(str, Enum):
"""States that a Task Instance can be in that indicate it is not yet in a terminal or running state."""
SCHEDULED = "scheduled"
QUEUED = "queued"
RESTARTING = "restarting"
UP_FOR_RETRY = "up_for_retry"
UP_FOR_RESCHEDULE = "up_for_reschedule"
DEFERRED = "deferred"
def __str__(self) -> str:
return self.value
| IntermediateTIState |
python | pytorch__pytorch | test/higher_order_ops/test_invoke_subgraph.py | {
"start": 83513,
"end": 86730
} | class ____(torch.nn.Module):
def forward(self, primals_1: "Sym(s77)", primals_2: "f32[s77, 16]"):
partitioned_fw_subgraph_0_1 = self.partitioned_fw_subgraph_0_1
invoke_subgraph_8 = torch.ops.higher_order.invoke_subgraph(partitioned_fw_subgraph_0_1, 'partitioned_fw_subgraph_0_1', primals_1, primals_2); partitioned_fw_subgraph_0_1 = primals_2 = None
getitem_17: "Sym(s77)" = invoke_subgraph_8[2]
getitem_16: "f32[s77, 16]" = invoke_subgraph_8[1]
getitem: "f32[s77, 16]" = invoke_subgraph_8[0]; invoke_subgraph_8 = None
partitioned_fw_subgraph_0_2 = self.partitioned_fw_subgraph_0_1
invoke_subgraph_10 = torch.ops.higher_order.invoke_subgraph(partitioned_fw_subgraph_0_2, 'partitioned_fw_subgraph_0_1', primals_1, getitem); partitioned_fw_subgraph_0_2 = getitem = None
getitem_19: "Sym(s77)" = invoke_subgraph_10[2]
getitem_18: "f32[s77, 16]" = invoke_subgraph_10[1]
getitem_1: "f32[s77, 16]" = invoke_subgraph_10[0]; invoke_subgraph_10 = None
sin: "f32[s77, 16]" = torch.ops.aten.sin.default(getitem_1)
partitioned_fw_subgraph_0_3 = self.partitioned_fw_subgraph_0_1
invoke_subgraph_12 = torch.ops.higher_order.invoke_subgraph(partitioned_fw_subgraph_0_3, 'partitioned_fw_subgraph_0_1', primals_1, sin); partitioned_fw_subgraph_0_3 = sin = None
getitem_21: "Sym(s77)" = invoke_subgraph_12[2]
getitem_20: "f32[s77, 16]" = invoke_subgraph_12[1]
getitem_2: "f32[s77, 16]" = invoke_subgraph_12[0]; invoke_subgraph_12 = None
partitioned_fw_subgraph_0_0 = self.partitioned_fw_subgraph_0_0
invoke_subgraph_14 = torch.ops.higher_order.invoke_subgraph(partitioned_fw_subgraph_0_0, 'partitioned_fw_subgraph_0_0', primals_1, getitem_2); partitioned_fw_subgraph_0_0 = None
getitem_23: "Sym(s77)" = invoke_subgraph_14[2]
getitem_22: "f32[s77, 16]" = invoke_subgraph_14[1]
getitem_3: "f32[s77, 16]" = invoke_subgraph_14[0]; invoke_subgraph_14 = None
sum_1: "f32[]" = torch.ops.aten.sum.default(getitem_2); getitem_2 = None
sum_2: "f32[]" = torch.ops.aten.sum.default(getitem_3); getitem_3 = None
add_15: "f32[]" = torch.ops.aten.add.Tensor(sum_1, sum_2); sum_1 = sum_2 = None
cos: "f32[s77, 16]" = torch.ops.aten.cos.default(getitem_1); getitem_1 = None
return (add_15, getitem_16, getitem_18, getitem_20, getitem_22, cos, primals_1, getitem_17, getitem_19, getitem_21, getitem_23)
class partitioned_fw_subgraph_0_1(torch.nn.Module):
def forward(self, primals_0: "Sym(s77)", primals_1: "f32[s77, 16]"):
cos: "f32[s77, 16]" = torch.ops.aten.cos.default(primals_1)
return (cos, primals_1, primals_0)
class partitioned_fw_subgraph_0_0(torch.nn.Module):
def forward(self, primals_0: "Sym(s77)", primals_1: "f32[s77, 16]"):
cos: "f32[s77, 16]" = torch.ops.aten.cos.default(primals_1)
return (cos, primals_1, primals_0)
""",
ignore_empty_lines=True,
)
self.assertExpectedInline(
normalize_gm(backend.bw_graphs[0].print_readable(print_output=False)),
"""\
| GraphModule |
python | huggingface__transformers | src/transformers/models/aria/modular_aria.py | {
"start": 40389,
"end": 40512
} | class ____(ImagesKwargs, total=False):
split_image: bool
max_image_size: int
min_image_size: int
| AriaImagesKwargs |
python | RaRe-Technologies__gensim | gensim/models/tfidfmodel.py | {
"start": 7398,
"end": 21472
} | class ____(interfaces.TransformationABC):
"""Objects of this class realize the transformation between word-document co-occurrence matrix (int)
into a locally/globally weighted TF-IDF matrix (positive floats).
Examples
--------
.. sourcecode:: pycon
>>> import gensim.downloader as api
>>> from gensim.models import TfidfModel
>>> from gensim.corpora import Dictionary
>>>
>>> dataset = api.load("text8")
>>> dct = Dictionary(dataset) # fit dictionary
>>> corpus = [dct.doc2bow(line) for line in dataset] # convert corpus to BoW format
>>>
>>> model = TfidfModel(corpus) # fit model
>>> vector = model[corpus[0]] # apply model to the first corpus document
"""
def __init__(self, corpus=None, id2word=None, dictionary=None, wlocal=utils.identity,
wglobal=df2idf, normalize=True, smartirs=None, pivot=None, slope=0.25):
r"""Compute TF-IDF by multiplying a local component (term frequency) with a global component
(inverse document frequency), and normalizing the resulting documents to unit length.
Formula for non-normalized weight of term :math:`i` in document :math:`j` in a corpus of :math:`D` documents
.. math:: weight_{i,j} = frequency_{i,j} * log_2 \frac{D}{document\_freq_{i}}
or, more generally
.. math:: weight_{i,j} = wlocal(frequency_{i,j}) * wglobal(document\_freq_{i}, D)
so you can plug in your own custom :math:`wlocal` and :math:`wglobal` functions.
Parameters
----------
corpus : iterable of iterable of (int, int), optional
Input corpus
id2word : {dict, :class:`~gensim.corpora.Dictionary`}, optional
Mapping token - id, that was used for converting input data to bag of words format.
dictionary : :class:`~gensim.corpora.Dictionary`
If `dictionary` is specified, it must be a `corpora.Dictionary` object and it will be used.
to directly construct the inverse document frequency mapping (then `corpus`, if specified, is ignored).
wlocals : callable, optional
Function for local weighting, default for `wlocal` is :func:`~gensim.utils.identity`
(other options: :func:`numpy.sqrt`, `lambda tf: 0.5 + (0.5 * tf / tf.max())`, etc.).
wglobal : callable, optional
Function for global weighting, default is :func:`~gensim.models.tfidfmodel.df2idf`.
normalize : {bool, callable}, optional
Normalize document vectors to unit euclidean length? You can also inject your own function into `normalize`.
smartirs : str, optional
SMART (System for the Mechanical Analysis and Retrieval of Text) Information Retrieval System,
a mnemonic scheme for denoting tf-idf weighting variants in the vector space model.
The mnemonic for representing a combination of weights takes the form XYZ,
for example 'ntc', 'bpn' and so on, where the letters represents the term weighting of the document vector.
Term frequency weighing:
* `b` - binary,
* `t` or `n` - raw,
* `a` - augmented,
* `l` - logarithm,
* `d` - double logarithm,
* `L` - log average.
Document frequency weighting:
* `x` or `n` - none,
* `f` - idf,
* `t` - zero-corrected idf,
* `p` - probabilistic idf.
Document normalization:
* `x` or `n` - none,
* `c` - cosine,
* `u` - pivoted unique,
* `b` - pivoted character length.
Default is 'nfc'.
For more information visit `SMART Information Retrieval System
<https://en.wikipedia.org/wiki/SMART_Information_Retrieval_System>`_.
pivot : float or None, optional
In information retrieval, TF-IDF is biased against long documents [1]_. Pivoted document length
normalization solves this problem by changing the norm of a document to `slope * old_norm + (1.0 -
slope) * pivot`.
You can either set the `pivot` by hand, or you can let Gensim figure it out automatically with the following
two steps:
* Set either the `u` or `b` document normalization in the `smartirs` parameter.
* Set either the `corpus` or `dictionary` parameter. The `pivot` will be automatically determined from
the properties of the `corpus` or `dictionary`.
If `pivot` is None and you don't follow steps 1 and 2, then pivoted document length normalization will be
disabled. Default is None.
See also the blog post at https://rare-technologies.com/pivoted-document-length-normalisation/.
slope : float, optional
In information retrieval, TF-IDF is biased against long documents [1]_. Pivoted document length
normalization solves this problem by changing the norm of a document to `slope * old_norm + (1.0 -
slope) * pivot`.
Setting the `slope` to 0.0 uses only the `pivot` as the norm, and setting the `slope` to 1.0 effectively
disables pivoted document length normalization. Singhal [2]_ suggests setting the `slope` between 0.2 and
0.3 for best results. Default is 0.25.
See also the blog post at https://rare-technologies.com/pivoted-document-length-normalisation/.
References
----------
.. [1] Singhal, A., Buckley, C., & Mitra, M. (1996). `Pivoted Document Length
Normalization <http://singhal.info/pivoted-dln.pdf>`_. *SIGIR Forum*, 51, 176–184.
.. [2] Singhal, A. (2001). `Modern information retrieval: A brief overview <http://singhal.info/ieee2001.pdf>`_.
*IEEE Data Eng. Bull.*, 24(4), 35–43.
"""
self.id2word = id2word
self.wlocal, self.wglobal, self.normalize = wlocal, wglobal, normalize
self.num_docs, self.num_nnz, self.idfs = None, None, None
self.smartirs = resolve_weights(smartirs) if smartirs is not None else None
self.slope = slope
self.pivot = pivot
self.eps = 1e-12
if smartirs is not None:
n_tf, n_df, n_n = self.smartirs
self.wlocal = partial(smartirs_wlocal, local_scheme=n_tf)
self.wglobal = partial(smartirs_wglobal, global_scheme=n_df)
if dictionary is not None:
# user supplied a Dictionary object, which already contains all the
# statistics we need to construct the IDF mapping. we can skip the
# step that goes through the corpus (= an optimization).
if corpus is not None:
logger.warning(
"constructor received both corpus and explicit inverse document frequencies; ignoring the corpus"
)
self.num_docs, self.num_nnz = dictionary.num_docs, dictionary.num_nnz
self.cfs = dictionary.cfs.copy()
self.dfs = dictionary.dfs.copy()
self.term_lens = {termid: len(term) for termid, term in dictionary.items()}
self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
if id2word is None:
self.id2word = dictionary
elif corpus is not None:
self.initialize(corpus)
else:
# NOTE: everything is left uninitialized; presumably the model will
# be initialized in some other way
pass
# If smartirs is not None, override pivot and normalize
if smartirs is None:
return
if self.pivot is not None:
if n_n in 'ub':
logger.warning("constructor received pivot; ignoring smartirs[2]")
return
if n_n in 'ub' and callable(self.normalize):
logger.warning("constructor received smartirs; ignoring normalize")
if n_n in 'ub' and not dictionary and not corpus:
logger.warning("constructor received no corpus or dictionary; ignoring smartirs[2]")
elif n_n == "u":
self.pivot = 1.0 * self.num_nnz / self.num_docs
elif n_n == "b":
self.pivot = 1.0 * sum(
self.cfs[termid] * (self.term_lens[termid] + 1.0) for termid in dictionary.keys()
) / self.num_docs
@classmethod
def load(cls, *args, **kwargs):
"""Load a previously saved TfidfModel class. Handles backwards compatibility from
older TfidfModel versions which did not use pivoted document normalization.
"""
model = super(TfidfModel, cls).load(*args, **kwargs)
if not hasattr(model, 'pivot'):
model.pivot = None
logger.info('older version of %s loaded without pivot arg', cls.__name__)
logger.info('Setting pivot to %s.', model.pivot)
if not hasattr(model, 'slope'):
model.slope = 0.65
logger.info('older version of %s loaded without slope arg', cls.__name__)
logger.info('Setting slope to %s.', model.slope)
if not hasattr(model, 'smartirs'):
model.smartirs = None
logger.info('older version of %s loaded without smartirs arg', cls.__name__)
logger.info('Setting smartirs to %s.', model.smartirs)
return model
def __str__(self):
return "%s<num_docs=%s, num_nnz=%s>" % (self.__class__.__name__, self.num_docs, self.num_nnz)
def initialize(self, corpus):
"""Compute inverse document weights, which will be used to modify term frequencies for documents.
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus.
"""
logger.info("collecting document frequencies")
dfs = {}
numnnz, docno = 0, -1
for docno, bow in enumerate(corpus):
if docno % 10000 == 0:
logger.info("PROGRESS: processing document #%i", docno)
numnnz += len(bow)
for termid, _ in bow:
dfs[termid] = dfs.get(termid, 0) + 1
# keep some stats about the training corpus
self.num_docs = docno + 1
self.num_nnz = numnnz
self.cfs = None
self.dfs = dfs
self.term_lengths = None
# and finally compute the idf weights
self.idfs = precompute_idfs(self.wglobal, self.dfs, self.num_docs)
self.add_lifecycle_event(
"initialize",
msg=(
f"calculated IDF weights for {self.num_docs} documents and {max(dfs.keys()) + 1 if dfs else 0}"
f" features ({self.num_nnz} matrix non-zeros)"
),
)
def __getitem__(self, bow, eps=1e-12):
"""Get the tf-idf representation of an input vector and/or corpus.
bow : {list of (int, int), iterable of iterable of (int, int)}
Input document in the `sparse Gensim bag-of-words format
<https://radimrehurek.com/gensim/intro.html#core-concepts>`_,
or a streamed corpus of such documents.
eps : float
Threshold value, will remove all position that have tfidf-value less than `eps`.
Returns
-------
vector : list of (int, float)
TfIdf vector, if `bow` is a single document
:class:`~gensim.interfaces.TransformedCorpus`
TfIdf corpus, if `bow` is a corpus.
"""
self.eps = eps
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
# unknown (new) terms will be given zero weight (NOT infinity/huge weight,
# as strict application of the IDF formula would dictate)
termid_array, tf_array = [], []
for termid, tf in bow:
termid_array.append(termid)
tf_array.append(tf)
tf_array = self.wlocal(np.array(tf_array))
vector = [
(termid, tf * self.idfs.get(termid))
for termid, tf in zip(termid_array, tf_array) if abs(self.idfs.get(termid, 0.0)) > self.eps
]
# and finally, normalize the vector either to unit length, or use a
# user-defined normalization function
if self.smartirs:
n_n = self.smartirs[2]
if n_n == "n" or (n_n in 'ub' and self.pivot is None):
if self.pivot is not None:
_, old_norm = matutils.unitvec(vector, return_norm=True)
norm_vector = vector
elif n_n == "c":
if self.pivot is not None:
_, old_norm = matutils.unitvec(vector, return_norm=True)
else:
norm_vector = matutils.unitvec(vector)
elif n_n == "u":
_, old_norm = matutils.unitvec(vector, return_norm=True, norm='unique')
elif n_n == "b":
old_norm = sum(freq * (self.term_lens[termid] + 1.0) for termid, freq in bow)
else:
if self.normalize is True:
self.normalize = matutils.unitvec
elif self.normalize is False:
self.normalize = utils.identity
if self.pivot is not None:
_, old_norm = self.normalize(vector, return_norm=True)
else:
norm_vector = self.normalize(vector)
if self.pivot is None:
norm_vector = [(termid, weight) for termid, weight in norm_vector if abs(weight) > self.eps]
else:
pivoted_norm = (1 - self.slope) * self.pivot + self.slope * old_norm
norm_vector = [
(termid, weight / float(pivoted_norm))
for termid, weight in vector
if abs(weight / float(pivoted_norm)) > self.eps
]
return norm_vector
| TfidfModel |
python | jazzband__django-formtools | tests/wizard/namedwizardtests/tests.py | {
"start": 15006,
"end": 15400
} | class ____:
def test_revalidation(self):
request = get_request()
testform = self.formwizard_class.as_view(
[('start', Step1), ('step2', Step2)],
url_name=self.wizard_urlname)
response, instance = testform(request, step='done')
instance.render_done(None)
self.assertEqual(instance.storage.current_step, 'start')
| NamedFormTests |
python | tiangolo__fastapi | docs_src/dependencies/tutorial008b_an.py | {
"start": 278,
"end": 785
} | class ____(Exception):
pass
def get_username():
try:
yield "Rick"
except OwnerError as e:
raise HTTPException(status_code=400, detail=f"Owner error: {e}")
@app.get("/items/{item_id}")
def get_item(item_id: str, username: Annotated[str, Depends(get_username)]):
if item_id not in data:
raise HTTPException(status_code=404, detail="Item not found")
item = data[item_id]
if item["owner"] != username:
raise OwnerError(username)
return item
| OwnerError |
python | explosion__spaCy | spacy/lang/lb/__init__.py | {
"start": 401,
"end": 515
} | class ____(Language):
lang = "lb"
Defaults = LuxembourgishDefaults
__all__ = ["Luxembourgish"]
| Luxembourgish |
python | run-llama__llama_index | llama-index-core/tests/vector_stores/test_simple.py | {
"start": 2260,
"end": 17196
} | class ____(unittest.TestCase):
def test_query_without_filters_returns_all_rows_sorted_by_similarity(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
query = VectorStoreQuery(query_embedding=[1.0, 1.0], similarity_top_k=3)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertCountEqual(
result.ids,
[
_NODE_ID_WEIGHT_1_RANK_A,
_NODE_ID_WEIGHT_2_RANK_C,
_NODE_ID_WEIGHT_3_RANK_C,
],
)
self.assertEqual(result.ids[0], _NODE_ID_WEIGHT_3_RANK_C)
def test_query_with_filters_returns_multiple_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(filters=[ExactMatchFilter(key="rank", value="c")])
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
self.assertEqual(
result.ids, [_NODE_ID_WEIGHT_3_RANK_C, _NODE_ID_WEIGHT_2_RANK_C]
)
def test_query_with_filter_applies_top_k(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(filters=[ExactMatchFilter(key="rank", value="c")])
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=1
)
result = simple_vector_store.query(query)
self.assertEqual(result.ids, [_NODE_ID_WEIGHT_3_RANK_C])
def test_query_with_filter_applies_node_id_filter(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(filters=[ExactMatchFilter(key="rank", value="c")])
query = VectorStoreQuery(
query_embedding=[1.0, 1.0],
filters=filters,
similarity_top_k=3,
node_ids=[_NODE_ID_WEIGHT_3_RANK_C],
)
result = simple_vector_store.query(query)
self.assertEqual(result.ids, [_NODE_ID_WEIGHT_3_RANK_C])
def test_query_with_exact_filters_returns_single_match(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
ExactMatchFilter(key="rank", value="c"),
ExactMatchFilter(key="weight", value=2.0),
]
)
query = VectorStoreQuery(query_embedding=[1.0, 1.0], filters=filters)
result = simple_vector_store.query(query)
self.assertEqual(result.ids, [_NODE_ID_WEIGHT_2_RANK_C])
def test_query_with_contradictive_filter_returns_no_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
ExactMatchFilter(key="weight", value=2),
ExactMatchFilter(key="weight", value=3),
]
)
query = VectorStoreQuery(query_embedding=[1.0, 1.0], filters=filters)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertEqual(len(result.ids), 0)
def test_query_with_filter_on_unknown_field_returns_no_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[ExactMatchFilter(key="unknown_field", value="c")]
)
query = VectorStoreQuery(query_embedding=[1.0, 1.0], filters=filters)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertEqual(len(result.ids), 0)
def test_delete_removes_document_from_query_results(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
simple_vector_store.delete("test-1")
query = VectorStoreQuery(query_embedding=[1.0, 1.0], similarity_top_k=2)
result = simple_vector_store.query(query)
self.assertEqual(
result.ids,
[_NODE_ID_WEIGHT_3_RANK_C, _NODE_ID_WEIGHT_1_RANK_A],
)
def test_query_with_filters_with_filter_condition(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
# test OR filter
filters = MetadataFilters(
filters=[
ExactMatchFilter(key="rank", value="c"),
ExactMatchFilter(key="weight", value=1.0),
],
condition=FilterCondition.OR,
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
self.assertEqual(len(result.ids), 3)
# test AND filter
filters = MetadataFilters(
filters=[
ExactMatchFilter(key="rank", value="c"),
ExactMatchFilter(key="weight", value=1.0),
],
condition=FilterCondition.AND,
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
self.assertEqual(len(result.ids), 0)
def test_query_with_equal_filter_returns_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
MetadataFilter(key="weight", operator=FilterOperator.EQ, value=1.0)
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertEqual(len(result.ids), 1)
def test_query_with_notequal_filter_returns_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
MetadataFilter(key="weight", operator=FilterOperator.NE, value=1.0)
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertEqual(len(result.ids), 2)
def test_query_with_greaterthan_filter_returns_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
MetadataFilter(key="weight", operator=FilterOperator.GT, value=1.5)
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertEqual(len(result.ids), 2)
def test_query_with_greaterthanequal_filter_returns_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
MetadataFilter(key="weight", operator=FilterOperator.GTE, value=1.0)
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertEqual(len(result.ids), 3)
def test_query_with_lessthan_filter_returns_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
MetadataFilter(key="weight", operator=FilterOperator.LT, value=1.1)
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
assert result.ids is not None
def test_query_with_lessthanequal_filter_returns_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
MetadataFilter(key="weight", operator=FilterOperator.LTE, value=1.0)
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertEqual(len(result.ids), 1)
def test_query_with_in_filter_returns_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
MetadataFilter(key="rank", operator=FilterOperator.IN, value=["a", "c"])
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertEqual(len(result.ids), 3)
def test_query_with_notin_filter_returns_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
MetadataFilter(key="rank", operator=FilterOperator.NIN, value=["c"])
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertEqual(len(result.ids), 1)
def test_query_with_contains_filter_returns_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
MetadataFilter(
key="quality", operator=FilterOperator.CONTAINS, value="high"
)
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertEqual(len(result.ids), 2)
def test_query_with_textmatch_filter_returns_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
MetadataFilter(
key="identifier",
operator=FilterOperator.TEXT_MATCH,
value="6FTR78Y",
)
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertEqual(len(result.ids), 2)
def test_query_with_any_filter_returns_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
MetadataFilter(
key="quality", operator=FilterOperator.ANY, value=["high", "low"]
)
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertEqual(len(result.ids), 2)
def test_query_with_all_filter_returns_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
MetadataFilter(
key="quality", operator=FilterOperator.ALL, value=["medium", "high"]
)
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
assert result.ids is not None
self.assertEqual(len(result.ids), 2)
def test_query_with_is_empty_filter_returns_matches(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
filters = MetadataFilters(
filters=[
MetadataFilter(
key="not_existed_key", operator=FilterOperator.IS_EMPTY, value=None
)
]
)
query = VectorStoreQuery(
query_embedding=[1.0, 1.0], filters=filters, similarity_top_k=3
)
result = simple_vector_store.query(query)
self.assertEqual(len(result.ids), len(_node_embeddings_for_test()))
def test_clear(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
simple_vector_store.clear()
query = VectorStoreQuery(query_embedding=[1.0, 1.0], similarity_top_k=3)
result = simple_vector_store.query(query)
self.assertEqual(len(result.ids), 0)
def test_delete_nodes(self) -> None:
simple_vector_store = SimpleVectorStore()
simple_vector_store.add(_node_embeddings_for_test())
simple_vector_store.delete_nodes(
[_NODE_ID_WEIGHT_1_RANK_A, _NODE_ID_WEIGHT_2_RANK_C]
)
query = VectorStoreQuery(query_embedding=[1.0, 1.0], similarity_top_k=3)
result = simple_vector_store.query(query)
self.assertEqual(result.ids, [_NODE_ID_WEIGHT_3_RANK_C])
def test_from_persist_dir(persist_dir: str) -> None:
vector_store = SimpleVectorStore.from_persist_dir(persist_dir=persist_dir)
assert vector_store is not None
def test_from_namespaced_persist_dir(persist_dir: str) -> None:
vector_store = SimpleVectorStore.from_namespaced_persist_dir(
persist_dir=persist_dir
)
assert vector_store is not None
| SimpleVectorStoreTest |
python | jazzband__django-model-utils | model_utils/tracker.py | {
"start": 7112,
"end": 10903
} | class ____:
def __init__(self, instance: models.Model, fields: Iterable[str], field_map: Mapping[str, str]):
self.instance = cast('_AugmentedModel', instance)
self.fields = fields
self.field_map = field_map
self.context = FieldsContext(self, *self.fields)
def __enter__(self) -> FieldsContext:
return self.context.__enter__()
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None
) -> None:
return self.context.__exit__(exc_type, exc_val, exc_tb)
def __call__(self, *fields: str) -> FieldsContext:
return FieldsContext(self, *fields, state=self.context.state)
@property
def deferred_fields(self) -> set[str]:
return self.instance.get_deferred_fields()
def get_field_value(self, field: str) -> Any:
return getattr(self.instance, self.field_map[field])
def set_saved_fields(self, fields: Iterable[str] | None = None) -> None:
if not self.instance.pk:
self.saved_data = {}
elif fields is None:
self.saved_data = self.current()
else:
self.saved_data.update(**self.current(fields=fields))
# preventing mutable fields side effects
for field, field_value in self.saved_data.items():
self.saved_data[field] = lightweight_deepcopy(field_value)
def current(self, fields: Iterable[str] | None = None) -> dict[str, Any]:
"""Returns dict of current values for all tracked fields"""
if fields is None:
deferred_fields = self.deferred_fields
if deferred_fields:
fields = [
field for field in self.fields
if self.field_map[field] not in deferred_fields
]
else:
fields = self.fields
return {f: self.get_field_value(f) for f in fields}
def has_changed(self, field: str) -> bool:
"""Returns ``True`` if field has changed from currently saved value"""
if field in self.fields:
# deferred fields haven't changed
if field in self.deferred_fields and field not in self.instance.__dict__:
return False
prev: object = self.previous(field)
curr: object = self.get_field_value(field)
return prev != curr
else:
raise FieldError('field "%s" not tracked' % field)
def previous(self, field: str) -> Any:
"""Returns currently saved value of given field"""
# handle deferred fields that have not yet been loaded from the database
if self.instance.pk and field in self.deferred_fields and field not in self.saved_data:
# if the field has not been assigned locally, simply fetch and un-defer the value
if field not in self.instance.__dict__:
self.get_field_value(field)
# if the field has been assigned locally, store the local value, fetch the database value,
# store database value to saved_data, and restore the local value
else:
current_value = self.get_field_value(field)
self.instance.refresh_from_db(fields=[field])
self.saved_data[field] = lightweight_deepcopy(self.get_field_value(field))
setattr(self.instance, self.field_map[field], current_value)
return self.saved_data.get(field)
def changed(self) -> dict[str, Any]:
"""Returns dict of fields that changed since save (with old values)"""
return {
field: self.previous(field)
for field in self.fields
if self.has_changed(field)
}
| FieldInstanceTracker |
python | fluentpython__example-code-2e | 24-class-metaprog/evaltime/builderlib.py | {
"start": 60,
"end": 668
} | class ____: # <1>
print('@ Builder body')
def __init_subclass__(cls): # <2>
print(f'@ Builder.__init_subclass__({cls!r})')
def inner_0(self): # <3>
print(f'@ SuperA.__init_subclass__:inner_0({self!r})')
cls.method_a = inner_0
def __init__(self):
super().__init__()
print(f'@ Builder.__init__({self!r})')
def deco(cls): # <4>
print(f'@ deco({cls!r})')
def inner_1(self): # <5>
print(f'@ deco:inner_1({self!r})')
cls.method_b = inner_1
return cls # <6>
# end::BUILDERLIB_TOP[]
# tag::BUILDERLIB_BOTTOM[]
| Builder |
python | pytorch__pytorch | torch/testing/_internal/common_modules.py | {
"start": 8037,
"end": 8182
} | class ____(Enum):
""" Enumerates when error is raised when testing modules. """
CONSTRUCTION_ERROR = 0
FORWARD_ERROR = 1
| ModuleErrorEnum |
python | huggingface__transformers | src/transformers/models/sam2/modular_sam2.py | {
"start": 38909,
"end": 39760
} | class ____(SamTwoWayAttentionBlock, GradientCheckpointingLayer):
def __init__(self, config: Sam2MaskDecoderConfig, skip_first_layer_pe: bool = False):
nn.Module.__init__(self)
self.self_attn = Sam2Attention(config, downsample_rate=1)
self.layer_norm1 = nn.LayerNorm(config.hidden_size)
self.cross_attn_token_to_image = Sam2Attention(config)
self.layer_norm2 = nn.LayerNorm(config.hidden_size)
self.mlp = Sam2FeedForward(
config.hidden_size, config.mlp_dim, config.hidden_size, num_layers=config.num_hidden_layers
)
self.layer_norm3 = nn.LayerNorm(config.hidden_size)
self.layer_norm4 = nn.LayerNorm(config.hidden_size)
self.cross_attn_image_to_token = Sam2Attention(config)
self.skip_first_layer_pe = skip_first_layer_pe
| Sam2TwoWayAttentionBlock |
python | facebookresearch__faiss | tests/test_io.py | {
"start": 10554,
"end": 11804
} | class ____(unittest.TestCase):
"""
test read and write IndexLSH.
"""
def test_io_lsh(self):
xt, xb, xq = get_dataset_2(d, nt, nb, nq)
index_lsh = faiss.IndexLSH(d, 32, True, True)
index_lsh.train(xt)
index_lsh.add(xb)
D, I = index_lsh.search(xq, 10)
fd, fname = tempfile.mkstemp()
os.close(fd)
try:
faiss.write_index(index_lsh, fname)
reader = faiss.BufferedIOReader(
faiss.FileIOReader(fname), 1234)
read_index_lsh = faiss.read_index(reader)
# Delete reader to prevent [WinError 32] The process cannot
# access the file because it is being used by another process
del reader
self.assertEqual(index_lsh.d, read_index_lsh.d)
np.testing.assert_array_equal(
faiss.vector_to_array(index_lsh.codes),
faiss.vector_to_array(read_index_lsh.codes)
)
D_read, I_read = read_index_lsh.search(xq, 10)
np.testing.assert_array_equal(D, D_read)
np.testing.assert_array_equal(I, I_read)
finally:
if os.path.exists(fname):
os.unlink(fname)
| Test_IO_IndexLSH |
python | django__django | django/db/migrations/questioner.py | {
"start": 11902,
"end": 13568
} | class ____(MigrationQuestioner):
def __init__(
self,
defaults=None,
specified_apps=None,
dry_run=None,
verbosity=1,
log=None,
):
self.verbosity = verbosity
self.log = log
super().__init__(
defaults=defaults,
specified_apps=specified_apps,
dry_run=dry_run,
)
def log_lack_of_migration(self, field_name, model_name, reason):
if self.verbosity > 0:
self.log(
f"Field '{field_name}' on model '{model_name}' not migrated: "
f"{reason}."
)
def ask_not_null_addition(self, field_name, model_name):
# We can't ask the user, so act like the user aborted.
self.log_lack_of_migration(
field_name,
model_name,
"it is impossible to add a non-nullable field without specifying "
"a default",
)
sys.exit(3)
def ask_not_null_alteration(self, field_name, model_name):
# We can't ask the user, so set as not provided.
self.log(
f"Field '{field_name}' on model '{model_name}' given a default of "
f"NOT PROVIDED and must be corrected."
)
return NOT_PROVIDED
def ask_auto_now_add_addition(self, field_name, model_name):
# We can't ask the user, so act like the user aborted.
self.log_lack_of_migration(
field_name,
model_name,
"it is impossible to add a field with 'auto_now_add=True' without "
"specifying a default",
)
sys.exit(3)
| NonInteractiveMigrationQuestioner |
python | getsentry__sentry | tests/sentry/models/test_project.py | {
"start": 29960,
"end": 33174
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.options_dict = {
"sentry:resolve_age": 1,
"sentry:scrub_data": False,
"sentry:scrub_defaults": False,
}
self.other_project = self.create_project()
for key, value in self.options_dict.items():
self.other_project.update_option(key=key, value=value)
self.teams = [self.create_team(), self.create_team(), self.create_team()]
for team in self.teams:
ProjectTeam.objects.create(team=team, project=self.other_project)
self.environments = [
self.create_environment(project=self.other_project),
self.create_environment(project=self.other_project),
]
self.ownership = ProjectOwnership.objects.create(
project=self.other_project, raw='{"hello":"hello"}', schema={"hello": "hello"}
)
Rule.objects.create(project=self.other_project, label="rule1")
Rule.objects.create(project=self.other_project, label="rule2")
Rule.objects.create(project=self.other_project, label="rule3")
# there is a default rule added to project
self.rules = Rule.objects.filter(project_id=self.other_project.id).order_by("label")
def assert_other_project_settings_not_changed(self):
# other_project should not have changed. This should check that.
self.assert_settings_copied(self.other_project)
def assert_settings_copied(self, project):
for key, value in self.options_dict.items():
assert project.get_option(key) == value
project_teams = ProjectTeam.objects.filter(project_id=project.id, team__in=self.teams)
assert len(project_teams) == len(self.teams)
project_env = EnvironmentProject.objects.filter(
project_id=project.id, environment__in=self.environments
)
assert len(project_env) == len(self.environments)
ownership = ProjectOwnership.objects.get(project_id=project.id)
assert ownership.raw == self.ownership.raw
assert ownership.schema == self.ownership.schema
rules = Rule.objects.filter(project_id=project.id).order_by("label")
for rule, other_rule in zip(rules, self.rules):
assert rule.label == other_rule.label
def test_simple(self) -> None:
project = self.create_project(fire_project_created=True)
assert project.copy_settings_from(self.other_project.id)
self.assert_settings_copied(project)
self.assert_other_project_settings_not_changed()
def test_copy_with_previous_settings(self) -> None:
project = self.create_project(fire_project_created=True)
project.update_option("sentry:resolve_age", 200)
ProjectTeam.objects.create(team=self.create_team(), project=project)
self.create_environment(project=project)
Rule.objects.filter(project_id=project.id)[0]
assert project.copy_settings_from(self.other_project.id)
self.assert_settings_copied(project)
self.assert_other_project_settings_not_changed()
@control_silo_test
| CopyProjectSettingsTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 858511,
"end": 858687
} | class ____(VegaLiteSchema):
"""Parse schema wrapper."""
_schema = {"$ref": "#/definitions/Parse"}
def __init__(self, **kwds):
super().__init__(**kwds)
| Parse |
python | lepture__authlib | authlib/integrations/base_client/async_app.py | {
"start": 308,
"end": 2425
} | class ____(OAuth1Base):
async def request(self, method, url, token=None, **kwargs):
async with self._get_oauth_client() as session:
return await _http_request(self, session, method, url, token, kwargs)
async def create_authorization_url(self, redirect_uri=None, **kwargs):
"""Generate the authorization url and state for HTTP redirect.
:param redirect_uri: Callback or redirect URI for authorization.
:param kwargs: Extra parameters to include.
:return: dict
"""
if not self.authorize_url:
raise RuntimeError('Missing "authorize_url" value')
if self.authorize_params:
kwargs.update(self.authorize_params)
async with self._get_oauth_client() as client:
client.redirect_uri = redirect_uri
params = {}
if self.request_token_params:
params.update(self.request_token_params)
request_token = await client.fetch_request_token(
self.request_token_url, **params
)
log.debug(f"Fetch request token: {request_token!r}")
url = client.create_authorization_url(self.authorize_url, **kwargs)
state = request_token["oauth_token"]
return {"url": url, "request_token": request_token, "state": state}
async def fetch_access_token(self, request_token=None, **kwargs):
"""Fetch access token in one step.
:param request_token: A previous request token for OAuth 1.
:param kwargs: Extra parameters to fetch access token.
:return: A token dict.
"""
async with self._get_oauth_client() as client:
if request_token is None:
raise MissingRequestTokenError()
# merge request token with verifier
token = {}
token.update(request_token)
token.update(kwargs)
client.token = token
params = self.access_token_params or {}
token = await client.fetch_access_token(self.access_token_url, **params)
return token
| AsyncOAuth1Mixin |
python | pytorch__pytorch | torch/testing/_internal/common_device_type.py | {
"start": 54875,
"end": 56017
} | class ____:
def __init__(self, device_type, dtype=None):
self.device_type = device_type
self.dtype = dtype
def __call__(self, fn):
@wraps(fn)
def efail_fn(slf, *args, **kwargs):
if (
not hasattr(slf, "device_type")
and hasattr(slf, "device")
and isinstance(slf.device, str)
):
target_device_type = slf.device
else:
target_device_type = slf.device_type
target_dtype = kwargs.get("dtype", getattr(slf, "dtype", None))
device_matches = (
self.device_type is None or self.device_type == target_device_type
)
dtype_matches = self.dtype is None or self.dtype == target_dtype
if device_matches and dtype_matches:
try:
fn(slf, *args, **kwargs)
except Exception:
return
else:
slf.fail("expected test to fail, but it passed")
return fn(slf, *args, **kwargs)
return efail_fn
| expectedFailure |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/assets/graph/base_asset_graph.py | {
"start": 3402,
"end": 6657
} | class ____(BaseEntityNode[AssetKey]):
key: AssetKey
parent_keys: AbstractSet[AssetKey]
child_keys: AbstractSet[AssetKey]
@property
def parent_entity_keys(self) -> AbstractSet[AssetKey]:
return self.parent_keys
@property
def child_entity_keys(self) -> AbstractSet[EntityKey]:
return self.child_keys | self.check_keys
@property
def has_self_dependency(self) -> bool:
return self.key in self.parent_keys
@property
@abstractmethod
def description(self) -> Optional[str]: ...
@property
@abstractmethod
def group_name(self) -> str: ...
@property
@abstractmethod
def is_materializable(self) -> bool: ...
@property
@abstractmethod
def is_observable(self) -> bool: ...
@property
@abstractmethod
def is_external(self) -> bool: ...
@property
@abstractmethod
def is_executable(self) -> bool: ...
@property
@abstractmethod
def execution_type(self) -> "AssetExecutionType": ...
@property
@abstractmethod
def metadata(self) -> ArbitraryMetadataMapping: ...
@property
@abstractmethod
def tags(self) -> Mapping[str, str]: ...
@property
@abstractmethod
def pools(self) -> Optional[set[str]]: ...
@property
@abstractmethod
def owners(self) -> Sequence[str]: ...
@property
@abstractmethod
def is_partitioned(self) -> bool: ...
@property
@abstractmethod
def legacy_freshness_policy(self) -> Optional[LegacyFreshnessPolicy]: ...
@property
@abstractmethod
def freshness_policy(self) -> Optional[FreshnessPolicy]:
"""WARNING: This field is not backwards compatible for policies created prior to 1.11.0.
For backwards compatibility, use freshness_policy_or_from_metadata instead.
"""
...
@property
def freshness_policy_or_from_metadata(self) -> Optional[FreshnessPolicy]:
"""Prior to 1.11.0, freshness policy was stored in the node metadata. Freshness policy is a first-class attribute of the asset starting in 1.11.0.
This field is backwards compatible since it checks for the policy in both the top-level attribute and the node metadata.
"""
from dagster._core.definitions.freshness import FreshnessPolicy
return self.freshness_policy or FreshnessPolicy.from_asset_spec_metadata(self.metadata)
@property
@abstractmethod
def auto_materialize_policy(self) -> Optional["AutoMaterializePolicy"]: ...
@property
@abstractmethod
def auto_observe_interval_minutes(self) -> Optional[float]: ...
@property
@abstractmethod
def backfill_policy(self) -> Optional[BackfillPolicy]: ...
@property
@abstractmethod
def code_version(self) -> Optional[str]: ...
@property
@abstractmethod
def check_keys(self) -> AbstractSet[AssetCheckKey]: ...
@property
@abstractmethod
def execution_set_asset_keys(self) -> AbstractSet[AssetKey]: ...
@property
@abstractmethod
def execution_set_entity_keys(
self,
) -> AbstractSet[Union[AssetKey, AssetCheckKey]]: ...
def __str__(self) -> str:
return f"{self.__class__.__name__}<{self.key.to_user_string()}>"
| BaseAssetNode |
python | ipython__ipython | IPython/core/interactiveshell.py | {
"start": 9158,
"end": 10612
} | class ____:
"""The result of a call to :meth:`InteractiveShell.run_cell`
Stores information about what took place.
"""
execution_count: Optional[int] = None
error_before_exec: Optional[BaseException] = None
error_in_exec: Optional[BaseException] = None
info = None
result = None
def __init__(self, info):
self.info = info
@property
def success(self):
return (self.error_before_exec is None) and (self.error_in_exec is None)
def raise_error(self):
"""Reraises error if `success` is `False`, otherwise does nothing"""
if self.error_before_exec is not None:
raise self.error_before_exec
if self.error_in_exec is not None:
raise self.error_in_exec
def __repr__(self):
name = self.__class__.__qualname__
return '<%s object at %x, execution_count=%s error_before_exec=%s error_in_exec=%s info=%s result=%s>' %\
(name, id(self), self.execution_count, self.error_before_exec, self.error_in_exec, repr(self.info), repr(self.result))
@functools.wraps(io_open)
def _modified_open(file, *args, **kwargs):
if file in {0, 1, 2}:
raise ValueError(
f"IPython won't let you open fd={file} by default "
"as it is likely to crash IPython. If you know what you are doing, "
"you can use builtins' open."
)
return io_open(file, *args, **kwargs)
| ExecutionResult |
python | PyCQA__pylint | doc/data/messages/a/assigning-non-slot/good.py | {
"start": 0,
"end": 203
} | class ____:
__slots__ = ("name", "surname")
def __init__(self, name, surname):
self.name = name
self.surname = surname
self.setup()
def setup(self):
pass
| Student |
python | pallets__werkzeug | src/werkzeug/datastructures/structures.py | {
"start": 35517,
"end": 41356
} | class ____(cabc.MutableSet[str]):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(
self,
headers: cabc.Iterable[str] | None = None,
on_update: cabc.Callable[[te.Self], None] | None = None,
) -> None:
self._headers = list(headers or ())
self._set = {x.lower() for x in self._headers}
self.on_update = on_update
def add(self, header: str) -> None:
"""Add a new header to the set."""
self.update((header,))
def remove(self: te.Self, header: str) -> None:
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self: te.Self, iterable: cabc.Iterable[str]) -> None:
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header: str) -> None:
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
self.remove(header)
except KeyError:
pass
def find(self, header: str) -> int:
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header: str) -> int:
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self: te.Self) -> None:
"""Clear the set."""
self._set.clear()
self._headers.clear()
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing: bool = False) -> set[str]:
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self) -> str:
"""Convert the header set into an HTTP header string."""
return ", ".join(map(http.quote_header_value, self._headers))
def __getitem__(self, idx: t.SupportsIndex) -> str:
return self._headers[idx]
def __delitem__(self: te.Self, idx: t.SupportsIndex) -> None:
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self: te.Self, idx: t.SupportsIndex, value: str) -> None:
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header: str) -> bool: # type: ignore[override]
return header.lower() in self._set
def __len__(self) -> int:
return len(self._set)
def __iter__(self) -> cabc.Iterator[str]:
return iter(self._headers)
def __bool__(self) -> bool:
return bool(self._set)
def __str__(self) -> str:
return self.to_header()
def __repr__(self) -> str:
return f"{type(self).__name__}({self._headers!r})"
# circular dependencies
from .. import http # noqa: E402
def __getattr__(name: str) -> t.Any:
import warnings
if name == "OrderedMultiDict":
warnings.warn(
"'OrderedMultiDict' is deprecated and will be removed in Werkzeug"
" 3.2. Use 'MultiDict' instead.",
DeprecationWarning,
stacklevel=2,
)
return _OrderedMultiDict
if name == "ImmutableOrderedMultiDict":
warnings.warn(
"'ImmutableOrderedMultiDict' is deprecated and will be removed in"
" Werkzeug 3.2. Use 'ImmutableMultiDict' instead.",
DeprecationWarning,
stacklevel=2,
)
return _ImmutableOrderedMultiDict
raise AttributeError(name)
| HeaderSet |
python | sympy__sympy | sympy/solvers/ode/single.py | {
"start": 41325,
"end": 44554
} | class ____(SinglePatternODESolver):
r"""
Solves separable 1st order differential equations.
This is any differential equation that can be written as `P(y)
\tfrac{dy}{dx} = Q(x)`. The solution can then just be found by
rearranging terms and integrating: `\int P(y) \,dy = \int Q(x) \,dx`.
This hint uses :py:meth:`sympy.simplify.simplify.separatevars` as its back
end, so if a separable equation is not caught by this solver, it is most
likely the fault of that function.
:py:meth:`~sympy.simplify.simplify.separatevars` is
smart enough to do most expansion and factoring necessary to convert a
separable equation `F(x, y)` into the proper form `P(x)\cdot{}Q(y)`. The
general solution is::
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x
>>> a, b, c, d, f = map(Function, ['a', 'b', 'c', 'd', 'f'])
>>> genform = Eq(a(x)*b(f(x))*f(x).diff(x), c(x)*d(f(x)))
>>> pprint(genform)
d
a(x)*b(f(x))*--(f(x)) = c(x)*d(f(x))
dx
>>> pprint(dsolve(genform, f(x), hint='separable_Integral'))
f(x)
/ /
| |
| b(y) | c(x)
| ---- dy = C1 + | ---- dx
| d(y) | a(x)
| |
/ /
Examples
========
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(Eq(f(x)*f(x).diff(x) + x, 3*x*f(x)**2), f(x),
... hint='separable', simplify=False))
/ 2 \ 2
log\3*f (x) - 1/ x
---------------- = C1 + --
6 2
References
==========
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 52
# indirect doctest
"""
hint = "separable"
has_integral = True
order = [1]
def _wilds(self, f, x, order):
d = Wild('d', exclude=[f(x).diff(x), f(x).diff(x, 2)])
e = Wild('e', exclude=[f(x).diff(x)])
return d, e
def _equation(self, fx, x, order):
d, e = self.wilds()
return d + e*fx.diff(x)
def _verify(self, fx):
d, e = self.wilds_match()
self.y = Dummy('y')
x = self.ode_problem.sym
d = separatevars(d.subs(fx, self.y))
e = separatevars(e.subs(fx, self.y))
# m1[coeff]*m1[x]*m1[y] + m2[coeff]*m2[x]*m2[y]*y'
self.m1 = separatevars(d, dict=True, symbols=(x, self.y))
self.m2 = separatevars(e, dict=True, symbols=(x, self.y))
return bool(self.m1 and self.m2)
def _get_match_object(self):
fx = self.ode_problem.func
x = self.ode_problem.sym
return self.m1, self.m2, x, fx
def _get_general_solution(self, *, simplify_flag: bool = True):
m1, m2, x, fx = self._get_match_object()
(C1,) = self.ode_problem.get_numbered_constants(num=1)
int = Integral(m2['coeff']*m2[self.y]/m1[self.y],
(self.y, None, fx))
gen_sol = Eq(int, Integral(-m1['coeff']*m1[x]/
m2[x], x) + C1)
return [gen_sol]
| Separable |
python | scrapy__scrapy | tests/test_spidermiddleware_output_chain.py | {
"start": 7910,
"end": 8453
} | class ____(_BaseSpiderMiddleware):
def process_spider_output(self, response, result):
out = []
for r in result:
r["processed"].append(f"{self.__class__.__name__}.process_spider_output")
out.append(r)
return out
def process_spider_exception(self, response, exception):
method = f"{self.__class__.__name__}.process_spider_exception"
self.crawler.spider.logger.info(
"%s: %s caught", method, exception.__class__.__name__
)
| _NotGeneratorDoNothingMiddleware |
python | pandas-dev__pandas | pandas/_config/config.py | {
"start": 11744,
"end": 27125
} | class ____:
"""provide attribute-style access to a nested dict"""
d: dict[str, Any]
def __init__(self, d: dict[str, Any], prefix: str = "") -> None:
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __setattr__(self, key: str, val: Any) -> None:
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key: str):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
try:
v = object.__getattribute__(self, "d")[key]
except KeyError as err:
raise OptionError("No such option") from err
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return get_option(prefix)
def __dir__(self) -> list[str]:
return list(self.d.keys())
options = DictWrapper(_global_config)
# DictWrapper defines a custom setattr
object.__setattr__(options, "__module__", "pandas")
#
# Functions for use by pandas developers, in addition to User - api
@contextmanager
def option_context(*args) -> Generator[None]:
"""
Context manager to temporarily set options in a ``with`` statement.
This method allows users to set one or more pandas options temporarily
within a controlled block. The previous options' values are restored
once the block is exited. This is useful when making temporary adjustments
to pandas' behavior without affecting the global state.
Parameters
----------
*args : str | object | dict
An even amount of arguments provided in pairs which will be
interpreted as (pattern, value) pairs. Alternatively, a single
dictionary of {pattern: value} may be provided.
Returns
-------
None
No return value.
Yields
------
None
No yield value.
See Also
--------
get_option : Retrieve the value of the specified option.
set_option : Set the value of the specified option.
reset_option : Reset one or more options to their default value.
describe_option : Print the description for one or more registered options.
Notes
-----
For all available options, please view the :ref:`User Guide <options.available>`
or use ``pandas.describe_option()``.
Examples
--------
>>> from pandas import option_context
>>> with option_context("display.max_rows", 10, "display.max_columns", 5):
... pass
>>> with option_context({"display.max_rows": 10, "display.max_columns": 5}):
... pass
"""
if len(args) == 1 and isinstance(args[0], dict):
args = tuple(kv for item in args[0].items() for kv in item)
if len(args) % 2 != 0 or len(args) < 2:
raise ValueError(
"Provide an even amount of arguments as "
"option_context(pat, val, pat, val...)."
)
ops = tuple(zip(args[::2], args[1::2], strict=True))
undo: tuple[tuple[Any, Any], ...] = ()
try:
undo = tuple((pat, get_option(pat)) for pat, val in ops)
for pat, val in ops:
set_option(pat, val)
yield
finally:
for pat, val in undo:
set_option(pat, val)
def register_option(
key: str,
defval: object,
doc: str = "",
validator: Callable[[object], Any] | None = None,
cb: Callable[[str], Any] | None = None,
) -> None:
"""
Register an option in the package-wide pandas config object
Parameters
----------
key : str
Fully-qualified key, e.g. "x.y.option - z".
defval : object
Default value of the option.
doc : str
Description of the option.
validator : Callable, optional
Function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb
a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import keyword
import tokenize
key = key.lower()
if key in _registered_options:
raise OptionError(f"Option '{key}' has already been registered")
if key in _reserved_keys:
raise OptionError(f"Option '{key}' is a reserved key")
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split(".")
for k in path:
if not re.match("^" + tokenize.Name + "$", k):
raise ValueError(f"{k} is not a valid identifier")
if keyword.iskeyword(k):
raise ValueError(f"{k} is a python keyword")
cursor = _global_config
msg = "Path prefix to option '{option}' is already an option"
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError(msg.format(option=".".join(path[:i])))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError(msg.format(option=".".join(path[:-1])))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(
key=key, defval=defval, doc=doc, validator=validator, cb=cb
)
def deprecate_option(
key: str,
category: type[Warning],
msg: str | None = None,
rkey: str | None = None,
removal_ver: str | None = None,
) -> None:
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key : str
Name of the option to be deprecated.
must be a fully-qualified option name (e.g "x.y.z.rkey").
category : Warning
Warning class for the deprecation.
msg : str, optional
Warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey : str, optional
Name of an option to reroute access to.
If specified, any referenced `key` will be
re-routed to `rkey` including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver : str, optional
Specifies the version in which this option will
be removed. used by the default message if no `msg` is specified.
Raises
------
OptionError
If the specified key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError(f"Option '{key}' has already been defined as deprecated.")
_deprecated_options[key] = DeprecatedOption(key, category, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat: str) -> list[str]:
"""
returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == "all": # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key: str) -> tuple[dict[str, Any], str]:
path = key.split(".")
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _get_deprecated_option(key: str):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key: str):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key: str) -> str:
"""
if `key` is deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as-is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key: str) -> bool:
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
warnings.warn(
d.msg,
d.category,
stacklevel=find_stack_level(),
)
else:
msg = f"'{key}' is deprecated"
if d.removal_ver:
msg += f" and will be removed in {d.removal_ver}"
if d.rkey:
msg += f", please use '{d.rkey}' instead."
else:
msg += ", please refrain from using it."
warnings.warn(
msg,
d.category,
stacklevel=find_stack_level(),
)
return True
return False
def _build_option_description(k: str) -> str:
"""Builds a formatted description of a registered option and prints it"""
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = f"{k} "
if o.doc:
s += "\n".join(o.doc.strip().split("\n"))
else:
s += "No description available."
if o:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
warnings.simplefilter("ignore", DeprecationWarning)
s += f"\n [default: {o.defval}] [currently: {get_option(k)}]"
if d:
rkey = d.rkey or ""
s += "\n (Deprecated"
s += f", use `{rkey}` instead."
s += ")"
return s
# helpers
@contextmanager
def config_prefix(prefix: str) -> Generator[None]:
"""
contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example
-------
import pandas._config.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option
def wrap(func: F) -> F:
def inner(key: str, *args, **kwds):
pkey = f"{prefix}.{key}"
return func(pkey, *args, **kwds)
return cast(F, inner)
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
try:
yield
finally:
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type: type[Any]) -> Callable[[Any], None]:
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which raises
ValueError if type(x) is not equal to `_type`
"""
def inner(x) -> None:
if type(x) != _type:
raise ValueError(f"Value must have type '{_type}'")
return inner
def is_instance_factory(_type: type | tuple[type, ...]) -> Callable[[Any], None]:
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which raises
ValueError if x is not an instance of `_type`
"""
if isinstance(_type, tuple):
type_repr = "|".join(map(str, _type))
else:
type_repr = f"'{_type}'"
def inner(x) -> None:
if not isinstance(x, _type):
raise ValueError(f"Value must be an instance of {type_repr}")
return inner
def is_one_of_factory(legal_values: Sequence) -> Callable[[Any], None]:
callables = [c for c in legal_values if callable(c)]
legal_values = [c for c in legal_values if not callable(c)]
def inner(x) -> None:
if x not in legal_values:
if not any(c(x) for c in callables):
uvals = [str(lval) for lval in legal_values]
pp_values = "|".join(uvals)
msg = f"Value must be one of {pp_values}"
if len(callables):
msg += " or a callable"
raise ValueError(msg)
return inner
def is_nonnegative_int(value: object) -> None:
"""
Verify that value is None or a positive int.
Parameters
----------
value : None or int
The `value` to be checked.
Raises
------
ValueError
When the value is not None or is a negative integer
"""
if value is None:
return
elif isinstance(value, int):
if value >= 0:
return
msg = "Value must be a nonnegative integer or None"
raise ValueError(msg)
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
is_text = is_instance_factory((str, bytes))
def is_callable(obj: object) -> bool:
"""
Parameters
----------
`obj` - the object to be checked
Returns
-------
validator - returns True if object is callable
raises ValueError otherwise.
"""
if not callable(obj):
raise ValueError("Value must be a callable")
return True
# import set_module here would cause circular import
get_option.__module__ = "pandas"
set_option.__module__ = "pandas"
describe_option.__module__ = "pandas"
reset_option.__module__ = "pandas"
option_context.__module__ = "pandas"
| DictWrapper |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.