language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
huggingface__transformers
|
src/transformers/models/wav2vec2/modeling_wav2vec2.py
|
{
"start": 2201,
"end": 10874
}
|
class ____(ModelOutput):
r"""
loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
paper](https://huggingface.co/papers/2006.11477).
projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
projected quantized states.
projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
target vectors for contrastive loss.
codevector_perplexity (`torch.FloatTensor` of shape `(1,)`):
The perplexity of the codevector distribution, used to measure the diversity of the codebook.
contrastive_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
The contrastive loss (L_m) as stated in the [official paper](https://huggingface.co/papers/2006.11477).
diversity_loss (*optional*, returned when `sample_negative_indices` are passed, `torch.FloatTensor` of shape `(1,)`):
The diversity loss (L_d) as stated in the [official paper](https://huggingface.co/papers/2006.11477).
"""
loss: Optional[torch.FloatTensor] = None
projected_states: Optional[torch.FloatTensor] = None
projected_quantized_states: Optional[torch.FloatTensor] = None
codevector_perplexity: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
contrastive_loss: Optional[torch.FloatTensor] = None
diversity_loss: Optional[torch.FloatTensor] = None
def _compute_mask_indices(
shape: tuple[int, int],
mask_prob: float,
mask_length: int,
attention_mask: Optional[torch.LongTensor] = None,
min_masks: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://huggingface.co/papers/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.
Args:
shape: The shape for which to compute masks. This should be of a tuple of size 2 where
the first element is the batch size and the second element is the length of the axis to span.
mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
independently generated mask spans of length `mask_length` is computed by
`mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
actual percentage will be smaller.
mask_length: size of the mask
min_masks: minimum number of masked spans
attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
each batch dimension.
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
f" and `sequence_length`: {sequence_length}`"
)
# epsilon is used for probabilistic rounding
epsilon = np.random.rand(1).item()
def compute_num_masked_span(input_length):
"""Given input length, compute how many spans should be masked"""
num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
num_masked_span = max(num_masked_span, min_masks)
# make sure num masked span <= sequence_length
if num_masked_span * mask_length > sequence_length:
num_masked_span = sequence_length // mask_length
# make sure num_masked span is also <= input_length - (mask_length - 1)
if input_length - (mask_length - 1) < num_masked_span:
num_masked_span = max(input_length - (mask_length - 1), 0)
return num_masked_span
# compute number of masked spans in batch
input_lengths = (
attention_mask.detach().sum(-1).tolist()
if attention_mask is not None
else [sequence_length for _ in range(batch_size)]
)
# SpecAugment mask to fill
spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
spec_aug_mask_idxs = []
max_num_masked_span = compute_num_masked_span(sequence_length)
if max_num_masked_span == 0:
return spec_aug_mask
for input_length in input_lengths:
# compute num of masked spans for this input
num_masked_span = compute_num_masked_span(input_length)
# get random indices to mask
spec_aug_mask_idx = np.random.choice(
np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
)
# pick first sampled index that will serve as a dummy index to pad vector
# to ensure same dimension for all batches due to probabilistic rounding
# Picking first sample just pads those vectors twice.
if len(spec_aug_mask_idx) == 0:
# this case can only happen if `input_length` is strictly smaller then
# `sequence_length` in which case the last token has to be a padding
# token which we can use as a dummy mask id
dummy_mask_idx = sequence_length - 1
else:
dummy_mask_idx = spec_aug_mask_idx[0]
spec_aug_mask_idx = np.concatenate(
[spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
)
spec_aug_mask_idxs.append(spec_aug_mask_idx)
spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
# expand masked indices to masked spans
spec_aug_mask_idxs = np.broadcast_to(
spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
)
spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
# add offset to the starting indexes so that indexes now create a span
offsets = np.arange(mask_length)[None, None, :]
offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
batch_size, max_num_masked_span * mask_length
)
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# ensure that we cannot have indices larger than sequence_length
if spec_aug_mask_idxs.max() > sequence_length - 1:
spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
# scatter indices to mask
np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
return spec_aug_mask
def _sample_negative_indices(
features_shape: tuple, num_negatives: int, mask_time_indices: Optional[np.ndarray] = None
):
"""
Sample `num_negatives` vectors from feature vectors.
"""
batch_size, sequence_length = features_shape
# generate indices of the positive vectors themselves, repeat them `num_negatives` times
sequence_length_range = np.arange(sequence_length)
# get `num_negatives` random vector indices from the same utterance
sampled_negative_indices = np.zeros(shape=(batch_size, sequence_length, num_negatives), dtype=np.int32)
mask_time_indices = (
mask_time_indices.astype(bool) if mask_time_indices is not None else np.ones(features_shape, dtype=bool)
)
for batch_idx in range(batch_size):
high = mask_time_indices[batch_idx].sum() - 1
mapped_masked_indices = sequence_length_range[mask_time_indices[batch_idx]]
feature_indices = np.broadcast_to(np.arange(high + 1)[:, None], (high + 1, num_negatives))
sampled_indices = np.random.randint(0, high, size=(high + 1, num_negatives))
# avoid sampling the same positive vector, but keep the distribution uniform
sampled_indices[sampled_indices >= feature_indices] += 1
# remap to actual indices
sampled_negative_indices[batch_idx][mask_time_indices[batch_idx]] = mapped_masked_indices[sampled_indices]
# correct for batch size
sampled_negative_indices[batch_idx] += batch_idx * sequence_length
return sampled_negative_indices
|
Wav2Vec2ForPreTrainingOutput
|
python
|
cython__cython
|
Cython/Compiler/MemoryView.py
|
{
"start": 14665,
"end": 15550
}
|
class ____(SliceIter):
def start_loops(self):
code = self.code
code.begin_block()
type_decl = self.slice_type.dtype.empty_declaration_code()
total_size = ' * '.join("%s.shape[%d]" % (self.slice_result, i)
for i in range(self.ndim))
code.putln("Py_ssize_t __pyx_temp_extent = %s;" % total_size)
code.putln("Py_ssize_t __pyx_temp_idx;")
code.putln("%s *__pyx_temp_pointer = (%s *) %s.data;" % (
type_decl, type_decl, self.slice_result))
code.putln("for (__pyx_temp_idx = 0; "
"__pyx_temp_idx < __pyx_temp_extent; "
"__pyx_temp_idx++) {")
return "__pyx_temp_pointer"
def end_loops(self):
self.code.putln("__pyx_temp_pointer += 1;")
self.code.putln("}")
self.code.end_block()
|
ContigSliceIter
|
python
|
crytic__slither
|
slither/tools/properties/properties/properties.py
|
{
"start": 54,
"end": 170
}
|
class ____(Enum):
CODE_QUALITY = 1
LOW_SEVERITY = 2
MEDIUM_SEVERITY = 3
HIGH_SEVERITY = 4
|
PropertyType
|
python
|
PrefectHQ__prefect
|
tests/cli/test_flow_run.py
|
{
"start": 28866,
"end": 31061
}
|
class ____:
@pytest.mark.parametrize("sigterm_handling", ["reschedule", None])
async def test_flow_run_execute_sigterm_handling(
self,
monkeypatch: pytest.MonkeyPatch,
prefect_client: PrefectClient,
sigterm_handling: str | None,
):
if sigterm_handling is not None:
monkeypatch.setenv(
"PREFECT_FLOW_RUN_EXECUTE_SIGTERM_BEHAVIOR", sigterm_handling
)
# Create a flow run that will take a while to run
deployment_id = await (await tired_flow.to_deployment(__file__)).apply()
flow_run = await prefect_client.create_flow_run_from_deployment(
deployment_id=deployment_id
)
# Run the flow run in a new process with a Runner
popen = subprocess.Popen(
[
"prefect",
"flow-run",
"execute",
str(flow_run.id),
],
env=get_current_settings().to_environment_variables(exclude_unset=True)
| os.environ,
)
assert popen.pid is not None
# Wait for the flow run to start
while True:
await anyio.sleep(0.5)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
if flow_run.state.is_running():
break
# Send the SIGTERM signal
popen.terminate()
# Wait for the process to exit
return_code = popen.wait(timeout=10)
flow_run = await prefect_client.read_flow_run(flow_run_id=flow_run.id)
assert flow_run.state
if sigterm_handling == "reschedule":
assert flow_run.state.is_scheduled(), (
"The flow run should have been rescheduled"
)
assert return_code == 0, "The process should have exited with a 0 exit code"
else:
assert flow_run.state.is_running(), (
"The flow run should be stuck in running"
)
assert return_code == -signal.SIGTERM.value, (
"The process should have exited with a SIGTERM exit code"
)
|
TestSignalHandling
|
python
|
Lightning-AI__lightning
|
examples/fabric/image_classifier/train_torch.py
|
{
"start": 1040,
"end": 5608
}
|
class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def run(hparams):
torch.manual_seed(hparams.seed)
use_cuda = torch.cuda.is_available()
use_mps = torch.backends.mps.is_available()
if use_cuda:
device = torch.device("cuda")
elif use_mps:
device = torch.device("mps")
else:
device = torch.device("cpu")
transform = T.Compose([T.ToTensor(), T.Normalize((0.1307,), (0.3081,))])
train_dataset = MNIST(DATASETS_PATH, train=True, download=True, transform=transform)
test_dataset = MNIST(DATASETS_PATH, train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=hparams.batch_size,
)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=hparams.batch_size)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=hparams.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=hparams.gamma)
# EPOCH LOOP
for epoch in range(1, hparams.epochs + 1):
# TRAINING LOOP
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if (batch_idx == 0) or ((batch_idx + 1) % hparams.log_interval == 0):
print(
f"Train Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)}"
f" ({100.0 * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}"
)
if hparams.dry_run:
break
scheduler.step()
# TESTING LOOP
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction="sum").item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
if hparams.dry_run:
break
test_loss /= len(test_loader.dataset)
print(
f"\nTest set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)}"
f" ({100.0 * correct / len(test_loader.dataset):.0f}%)\n"
)
if hparams.dry_run:
break
if hparams.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
def main():
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)"
)
parser.add_argument("--epochs", type=int, default=14, metavar="N", help="number of epochs to train (default: 14)")
parser.add_argument("--lr", type=float, default=1.0, metavar="LR", help="learning rate (default: 1.0)")
parser.add_argument("--gamma", type=float, default=0.7, metavar="M", help="Learning rate step gamma (default: 0.7)")
parser.add_argument("--dry-run", action="store_true", default=False, help="quickly check a single pass")
parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)")
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument("--save-model", action="store_true", default=False, help="For Saving the current Model")
hparams = parser.parse_args()
run(hparams)
if __name__ == "__main__":
main()
|
Net
|
python
|
huggingface__transformers
|
src/transformers/models/glm4v_moe/modeling_glm4v_moe.py
|
{
"start": 23291,
"end": 24085
}
|
class ____(nn.Module):
def __init__(self, config, intermediate_size=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
@use_kernel_forward_from_hub("RMSNorm")
|
Glm4vMoeTextMLP
|
python
|
ansible__ansible
|
test/units/module_utils/common/test_dict_transformations.py
|
{
"start": 1478,
"end": 1710
}
|
class ____:
def test_camel_to_snake_and_back(self):
for (k, v) in EXPECTED_REVERSIBLE.items():
assert _snake_to_camel(_camel_to_snake(k, reversible=True), capitalize_first=True) == k
|
TestCaseCamelToSnakeAndBack
|
python
|
Netflix__metaflow
|
metaflow/_vendor/click/_compat.py
|
{
"start": 1519,
"end": 2672
}
|
class ____(io.TextIOWrapper):
def __init__(
self,
stream,
encoding,
errors,
force_readable=False,
force_writable=False,
**extra
):
self._stream = stream = _FixupStream(stream, force_readable, force_writable)
io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
# The io module is a place where the Python 3 text behavior
# was forced upon Python 2, so we need to unbreak
# it to look like Python 2.
if PY2:
def write(self, x):
if isinstance(x, str) or is_bytes(x):
try:
self.flush()
except Exception:
pass
return self.buffer.write(str(x))
return io.TextIOWrapper.write(self, x)
def writelines(self, lines):
for line in lines:
self.write(line)
def __del__(self):
try:
self.detach()
except Exception:
pass
def isatty(self):
# https://bitbucket.org/pypy/pypy/issue/1803
return self._stream.isatty()
|
_NonClosingTextIOWrapper
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-gcp/tests/test_utilities.py
|
{
"start": 641,
"end": 1197
}
|
class ____(Mock):
call_count = 0
def __init__(self, succeeded=False, *args, **kwargs):
super().__init__()
self.log_uri = "test_uri"
self._succeeded = succeeded
def is_running(self):
MockExecution.call_count += 1
if self.call_count > 2:
return False
return True
def condition_after_completion(self):
return {"message": "test"}
def succeeded(self):
return self._succeeded
@classmethod
def get(cls, *args, **kwargs):
return cls()
|
MockExecution
|
python
|
yandexdataschool__Practical_RL
|
week04_approx_rl/dqn/logger.py
|
{
"start": 102,
"end": 2615
}
|
class ____:
def __init__(self, use_tensorboard=True, log_dir='runs'):
"""
Initializes the Logger.
:param use_tensorboard: If True, logs will be sent to TensorBoard.
:param log_dir: Directory where TensorBoard logs are saved.
"""
self.use_tensorboard = use_tensorboard
if self.use_tensorboard:
self.writer = SummaryWriter(log_dir=log_dir)
else:
# Initialize lists to store history for matplotlib
self.mean_rw_history = []
self.td_loss_history = []
self.grad_norm_history = []
self.initial_state_v_history = []
def log_loss(self, loss, step):
if self.use_tensorboard:
self.writer.add_scalar("Loss", loss, step)
else:
self.td_loss_history.append(loss)
def log_grad_norm(self, grad_norm, step):
if self.use_tensorboard:
self.writer.add_scalar("Grad Norm", grad_norm, step)
else:
self.grad_norm_history.append(grad_norm)
def log_mean_reward(self, mean_reward, step):
if self.use_tensorboard:
self.writer.add_scalar("Mean Reward per Life", mean_reward, step)
else:
self.mean_rw_history.append(mean_reward)
def log_initial_state_v(self, initial_v, step):
if self.use_tensorboard:
self.writer.add_scalar("Initial State V", initial_v, step)
else:
self.initial_state_v_history.append(initial_v)
def plot(self):
if not self.use_tensorboard:
plt.figure(figsize=[16, 9])
plt.subplot(2, 2, 1)
plt.title("Mean Reward per Episode")
plt.plot(self.mean_rw_history, label='Mean Reward')
plt.legend()
plt.grid()
plt.subplot(2, 2, 2)
plt.title("TD Loss History")
plt.plot(self.td_loss_history, label='TD Loss')
plt.legend()
plt.grid()
plt.subplot(2, 2, 3)
plt.title("Initial State V")
plt.plot(self.initial_state_v_history, label='Initial State V')
plt.legend()
plt.grid()
plt.subplot(2, 2, 4)
plt.title("Grad Norm History")
plt.plot(self.grad_norm_history, label='Grad Norm')
plt.legend()
plt.grid()
plt.tight_layout()
plt.show()
def close(self):
if self.use_tensorboard:
self.writer.close()
|
Logger
|
python
|
openai__openai-python
|
src/openai/types/chat/chat_completion_named_tool_choice_custom_param.py
|
{
"start": 361,
"end": 565
}
|
class ____(TypedDict, total=False):
custom: Required[Custom]
type: Required[Literal["custom"]]
"""For custom tool calling, the type is always `custom`."""
|
ChatCompletionNamedToolChoiceCustomParam
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/io_ops/parsing_ops_test.py
|
{
"start": 42272,
"end": 51985
}
|
class ____(test.TestCase):
def _test(self, kwargs, expected_values=None, expected_err=None):
if expected_err:
with self.assertRaisesWithPredicateMatch(expected_err[0],
expected_err[1]):
self.evaluate(parsing_ops.parse_single_example(**kwargs))
else:
out = parsing_ops.parse_single_example(**kwargs)
_compare_output_to_expected(self, out, expected_values)
# Check shapes.
for k, f in kwargs["features"].items():
if isinstance(f, parsing_ops.FixedLenFeature) and f.shape is not None:
self.assertEqual(
tuple(out[k].get_shape()), tensor_shape.as_shape(f.shape))
elif isinstance(f, parsing_ops.VarLenFeature):
if context.executing_eagerly():
self.assertEqual(tuple(out[k].indices.shape.as_list()), (2, 1))
self.assertEqual(tuple(out[k].values.shape.as_list()), (2,))
self.assertEqual(tuple(out[k].dense_shape.shape.as_list()), (1,))
else:
self.assertEqual(tuple(out[k].indices.shape.as_list()), (None, 1))
self.assertEqual(tuple(out[k].values.shape.as_list()), (None,))
self.assertEqual(tuple(out[k].dense_shape.shape.as_list()), (1,))
def testSingleExampleWithSparseAndSparseFeatureAndDense(self):
original = example(
features=features({
"c": float_feature([3, 4]),
"d": float_feature([0.0, 1.0]),
"val": bytes_feature([b"a", b"b"]),
"idx": int64_feature([0, 3]),
"st_a": float_feature([3.0, 4.0])
}))
serialized = original.SerializeToString()
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
test_features = {
"st_a":
parsing_ops.VarLenFeature(dtypes.float32),
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.string, [13]),
"a":
parsing_ops.FixedLenFeature((1, 3),
dtypes.int64,
default_value=a_default),
"b":
parsing_ops.FixedLenFeature((3, 3),
dtypes.string,
default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature(2, dtypes.float32),
"d":
parsing_ops.FixedLenSequenceFeature([],
dtypes.float32,
allow_missing=True)
}
expected_st_a = (
np.array([[0], [1]], dtype=np.int64), # indices
np.array([3.0, 4.0], dtype=np.float32), # values
np.array([2], dtype=np.int64)) # shape: max_values = 2
expected_sp = ( # indices, values, shape
np.array([[0], [3]], dtype=np.int64), np.array(["a", "b"], dtype="|S"),
np.array([13], dtype=np.int64)) # max_values = 13
expected_output = {
"st_a": expected_st_a,
"sp": expected_sp,
"a": [a_default],
"b": b_default,
"c": np.array([3, 4], dtype=np.float32),
"d": np.array([0.0, 1.0], dtype=np.float32),
}
self._test(
{
"example_names": ops.convert_to_tensor("in1"),
"serialized": ops.convert_to_tensor(serialized),
"features": test_features,
}, expected_output)
# Note: if example_names is None, then a different code-path gets used.
self._test(
{
"serialized": ops.convert_to_tensor(serialized),
"features": test_features,
}, expected_output)
def testSingleExampleWithAllFeatureTypes(self):
original = example(
features=features({
# FixLen features
"c": float_feature([3, 4]),
"d": float_feature([0.0, 1.0]),
# Sparse features
"val": bytes_feature([b"a", b"b"]), # for sp
"idx": int64_feature([0, 3]), # for sp
"st_a": float_feature([3.0, 4.0]),
# Ragged features
"rt_1d": float_feature([3.0, 4.0]),
"rt_values": float_feature([5, 6, 7]), # for rt_2d
"rt_splits": int64_feature([0, 1, 1, 3]), # for rt_2d
"rt_lengths": int64_feature([1, 0, 2]), # for rt_2d
"rt_starts": int64_feature([0, 1, 1]), # for rt_2d
"rt_limits": int64_feature([1, 1, 3]), # for rt_2d
"rt_rowids": int64_feature([0, 2, 2]), # for rt_2d
"rt_splits2": int64_feature([0, 2, 3]), # for rt_3d
}))
serialized = original.SerializeToString()
a_default = [1, 2, 3]
b_default = np.random.rand(3, 3).astype(bytes)
test_features = {
"st_a":
parsing_ops.VarLenFeature(dtypes.float32),
"sp":
parsing_ops.SparseFeature(["idx"], "val", dtypes.string, [13]),
"a":
parsing_ops.FixedLenFeature((1, 3),
dtypes.int64,
default_value=a_default),
"b":
parsing_ops.FixedLenFeature((3, 3),
dtypes.string,
default_value=b_default),
# Feature "c" must be provided, since it has no default_value.
"c":
parsing_ops.FixedLenFeature(2, dtypes.float32),
"d":
parsing_ops.FixedLenSequenceFeature([],
dtypes.float32,
allow_missing=True),
"rt_1d":
parsing_ops.RaggedFeature(dtypes.float32),
"rt_2d_with_splits":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[parsing_ops.RaggedFeature.RowSplits("rt_splits")],
dtype=dtypes.float32),
"rt_2d_with_lengths":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[parsing_ops.RaggedFeature.RowLengths("rt_lengths")],
dtype=dtypes.float32),
"rt_2d_with_starts":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[parsing_ops.RaggedFeature.RowStarts("rt_starts")],
dtype=dtypes.float32),
"rt_2d_with_limits":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[parsing_ops.RaggedFeature.RowLimits("rt_limits")],
dtype=dtypes.float32),
"rt_2d_with_rowids":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[parsing_ops.RaggedFeature.ValueRowIds("rt_rowids")],
dtype=dtypes.float32),
"rt_2d_with_uniform_row_length":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[parsing_ops.RaggedFeature.UniformRowLength(1)],
dtype=dtypes.float32),
"rt_3d":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[
parsing_ops.RaggedFeature.RowSplits("rt_splits2"),
parsing_ops.RaggedFeature.RowSplits("rt_splits")
],
dtype=dtypes.float32),
"rt_3d_with_uniform_row_length":
parsing_ops.RaggedFeature(
value_key="rt_values",
partitions=[
parsing_ops.RaggedFeature.UniformRowLength(1),
parsing_ops.RaggedFeature.RowSplits("rt_splits")
],
dtype=dtypes.float32),
}
expected_st_a = (
np.array([[0], [1]], dtype=np.int64), # indices
np.array([3.0, 4.0], dtype=np.float32), # values
np.array([2], dtype=np.int64)) # shape: max_values = 2
expected_sp = ( # indices, values, shape
np.array([[0], [3]], dtype=np.int64), np.array(["a", "b"], dtype="|S"),
np.array([13], dtype=np.int64)) # max_values = 13
expected_rt_1d = constant_op.constant([3, 4], dtypes.float32)
expected_rt_2d = ragged_factory_ops.constant([[5], [], [6, 7]],
dtype=dtypes.float32)
expected_rt_2d_uniform = constant_op.constant([[5], [6], [7]],
dtype=dtypes.float32)
expected_rt_3d = ragged_factory_ops.constant([[[5], []], [[6, 7]]],
dtype=dtypes.float32)
expected_rt_3d_with_uniform = (
ragged_tensor.RaggedTensor.from_uniform_row_length(
expected_rt_2d, uniform_row_length=1))
expected_output = {
"st_a": expected_st_a,
"sp": expected_sp,
"a": [a_default],
"b": b_default,
"c": np.array([3, 4], dtype=np.float32),
"d": np.array([0.0, 1.0], dtype=np.float32),
"rt_1d": expected_rt_1d,
"rt_2d_with_splits": expected_rt_2d,
"rt_2d_with_lengths": expected_rt_2d,
"rt_2d_with_starts": expected_rt_2d,
"rt_2d_with_limits": expected_rt_2d,
"rt_2d_with_rowids": expected_rt_2d,
"rt_2d_with_uniform_row_length": expected_rt_2d_uniform,
"rt_3d": expected_rt_3d,
"rt_3d_with_uniform_row_length": expected_rt_3d_with_uniform,
}
self._test(
{
"example_names": ops.convert_to_tensor("in1"),
"serialized": ops.convert_to_tensor(serialized),
"features": test_features,
}, expected_output)
@test_util.run_all_in_graph_and_eager_modes
|
ParseSingleExampleTest
|
python
|
django__django
|
tests/generic_relations/models.py
|
{
"start": 3823,
"end": 3933
}
|
class ____(ConcreteRelatedModel):
class Meta:
proxy = True
# To test fix for #7551
|
ProxyRelatedModel
|
python
|
sympy__sympy
|
sympy/core/logic.py
|
{
"start": 8939,
"end": 9653
}
|
class ____(AndOr_Base):
op_x_notx = False
def _eval_propagate_not(self):
# !(a&b&c ...) == !a | !b | !c ...
return Or(*[Not(a) for a in self.args])
# (a|b|...) & c == (a&c) | (b&c) | ...
def expand(self):
# first locate Or
for i, arg in enumerate(self.args):
if isinstance(arg, Or):
arest = self.args[:i] + self.args[i + 1:]
orterms = [And(*(arest + (a,))) for a in arg.args]
for j in range(len(orterms)):
if isinstance(orterms[j], Logic):
orterms[j] = orterms[j].expand()
res = Or(*orterms)
return res
return self
|
And
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/nn_test.py
|
{
"start": 39335,
"end": 40069
}
|
class ____(test_lib.TestCase):
def test(self):
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
y = np.maximum(x, 0.0)
z = self.evaluate(nn_ops.relu(constant_op.constant(x)))
self.assertAllEqual(y, z)
@test_util.disable_xla(
"This test relies on undefined behavior that XLA does not replicate")
@test_util.run_deprecated_v1
def testNaNs(self):
# Test that relu(nan) = nan for various sizes.
for i in range(18):
x = np.zeros(i) + np.nan
# TODO(b/178335491): This is broken on GPU today.
with self.cached_session(use_gpu=False):
z = nn_ops.relu(constant_op.constant(x)).eval()
self.assertTrue(np.isnan(z).all())
|
ReluTest
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 97165,
"end": 97657
}
|
class ____(sgqlc.types.Enum):
"""Represents the individual results of a search.
Enumeration Choices:
* `DISCUSSION`: Returns matching discussions in repositories.
* `ISSUE`: Returns results matching issues in repositories.
* `REPOSITORY`: Returns results matching repositories.
* `USER`: Returns results matching users and organizations on
GitHub.
"""
__schema__ = github_schema
__choices__ = ("DISCUSSION", "ISSUE", "REPOSITORY", "USER")
|
SearchType
|
python
|
numpy__numpy
|
numpy/exceptions.py
|
{
"start": 5644,
"end": 7709
}
|
class ____(TypeError):
"""Multiple DTypes could not be converted to a common one.
This exception derives from ``TypeError`` and is raised whenever dtypes
cannot be converted to a single common one. This can be because they
are of a different category/class or incompatible instances of the same
one (see Examples).
Notes
-----
Many functions will use promotion to find the correct result and
implementation. For these functions the error will typically be chained
with a more specific error indicating that no implementation was found
for the input dtypes.
Typically promotion should be considered "invalid" between the dtypes of
two arrays when `arr1 == arr2` can safely return all ``False`` because the
dtypes are fundamentally different.
Examples
--------
Datetimes and complex numbers are incompatible classes and cannot be
promoted:
>>> import numpy as np
>>> np.result_type(np.dtype("M8[s]"), np.complex128) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
DTypePromotionError: The DType <class 'numpy.dtype[datetime64]'> could not
be promoted by <class 'numpy.dtype[complex128]'>. This means that no common
DType exists for the given inputs. For example they cannot be stored in a
single array unless the dtype is `object`. The full list of DTypes is:
(<class 'numpy.dtype[datetime64]'>, <class 'numpy.dtype[complex128]'>)
For example for structured dtypes, the structure can mismatch and the
same ``DTypePromotionError`` is given when two structured dtypes with
a mismatch in their number of fields is given:
>>> dtype1 = np.dtype([("field1", np.float64), ("field2", np.int64)])
>>> dtype2 = np.dtype([("field1", np.float64)])
>>> np.promote_types(dtype1, dtype2) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
DTypePromotionError: field names `('field1', 'field2')` and `('field1',)`
mismatch.
""" # noqa: E501
pass
|
DTypePromotionError
|
python
|
ansible__ansible
|
lib/ansible/module_utils/facts/system/selinux.py
|
{
"start": 1027,
"end": 3227
}
|
class ____(BaseFactCollector):
name = 'selinux'
_fact_ids = set() # type: t.Set[str]
def collect(self, module=None, collected_facts=None):
facts_dict = {}
selinux_facts = {}
# If selinux library is missing, only set the status and selinux_python_present since
# there is no way to tell if SELinux is enabled or disabled on the system
# without the library.
if not HAVE_SELINUX:
selinux_facts['status'] = 'Missing selinux Python library'
facts_dict['selinux'] = selinux_facts
facts_dict['selinux_python_present'] = False
return facts_dict
# Set a boolean for testing whether the Python library is present
facts_dict['selinux_python_present'] = True
if not selinux.is_selinux_enabled():
selinux_facts['status'] = 'disabled'
else:
selinux_facts['status'] = 'enabled'
try:
selinux_facts['policyvers'] = selinux.security_policyvers()
except (AttributeError, OSError):
selinux_facts['policyvers'] = 'unknown'
try:
(rc, configmode) = selinux.selinux_getenforcemode()
if rc == 0:
selinux_facts['config_mode'] = SELINUX_MODE_DICT.get(configmode, 'unknown')
else:
selinux_facts['config_mode'] = 'unknown'
except (AttributeError, OSError):
selinux_facts['config_mode'] = 'unknown'
try:
mode = selinux.security_getenforce()
selinux_facts['mode'] = SELINUX_MODE_DICT.get(mode, 'unknown')
except (AttributeError, OSError):
selinux_facts['mode'] = 'unknown'
try:
(rc, policytype) = selinux.selinux_getpolicytype()
if rc == 0:
selinux_facts['type'] = policytype
else:
selinux_facts['type'] = 'unknown'
except (AttributeError, OSError):
selinux_facts['type'] = 'unknown'
facts_dict['selinux'] = selinux_facts
return facts_dict
|
SelinuxFactCollector
|
python
|
oauthlib__oauthlib
|
tests/oauth2/rfc6749/test_server.py
|
{
"start": 8948,
"end": 15975
}
|
class ____(TestCase):
def setUp(self):
self.expires_in = 1800
def set_user(request):
request.user = mock.MagicMock()
request.client = mock.MagicMock()
request.client.client_id = 'mocked_client_id'
return True
self.mock_validator = mock.MagicMock()
self.mock_validator.get_code_challenge.return_value = None
self.mock_validator.authenticate_client.side_effect = set_user
self.addCleanup(setattr, self, 'mock_validator', mock.MagicMock())
self.private_pem = """
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA6TtDhWGwzEOWZP6m/zHoZnAPLABfetvoMPmxPGjFjtDuMRPv
EvI1sbixZBjBtdnc5rTtHUUQ25Am3JzwPRGo5laMGbj1pPyCPxlVi9LK82HQNX0B
YK7tZtVfDHElQA7F4v3j9d3rad4O9/n+lyGIQ0tT7yQcBm2A8FEaP0bZYCLMjwMN
WfaVLE8eXHyv+MfpNNLI9wttLxygKYM48I3NwsFuJgOa/KuodXaAmf8pJnx8t1Wn
nxvaYXFiUn/TxmhM/qhemPa6+0nqq+aWV5eT7xn4K/ghLgNs09v6Yge0pmPl9Oz+
+bjJ+aKRnAmwCOY8/5U5EilAiUOeBoO9+8OXtwIDAQABAoIBAGFTTbXXMkPK4HN8
oItVdDlrAanG7hECuz3UtFUVE3upS/xG6TjqweVLwRqYCh2ssDXFwjy4mXRGDzF4
e/e/6s9Txlrlh/w1MtTJ6ZzTdcViR9RKOczysjZ7S5KRlI3KnGFAuWPcG2SuOWjZ
dZfzcj1Crd/ZHajBAVFHRsCo/ATVNKbTRprFfb27xKpQ2BwH/GG781sLE3ZVNIhs
aRRaED4622kI1E/WXws2qQMqbFKzo0m1tPbLb3Z89WgZJ/tRQwuDype1Vfm7k6oX
xfbp3948qSe/yWKRlMoPkleji/WxPkSIalzWSAi9ziN/0Uzhe65FURgrfHL3XR1A
B8UR+aECgYEA7NPQZV4cAikk02Hv65JgISofqV49P8MbLXk8sdnI1n7Mj10TgzU3
lyQGDEX4hqvT0bTXe4KAOxQZx9wumu05ejfzhdtSsEm6ptGHyCdmYDQeV0C/pxDX
JNCK8XgMku2370XG0AnyBCT7NGlgtDcNCQufcesF2gEuoKiXg6Zjo7sCgYEA/Bzs
9fWGZZnSsMSBSW2OYbFuhF3Fne0HcxXQHipl0Rujc/9g0nccwqKGizn4fGOE7a8F
usQgJoeGcinL7E9OEP/uQ9VX1C9RNVjIxP1O5/Guw1zjxQQYetOvbPhN2QhD1Ye7
0TRKrW1BapcjwLpFQlVg1ZeTPOi5lv24W/wX9jUCgYEAkrMSX/hPuTbrTNVZ3L6r
NV/2hN+PaTPeXei/pBuXwOaCqDurnpcUfFcgN/IP5LwDVd+Dq0pHTFFDNv45EFbq
R77o5n3ZVsIVEMiyJ1XgoK8oLDw7e61+15smtjT69Piz+09pu+ytMcwGn4y3Dmsb
dALzHYnL8iLRU0ubrz0ec4kCgYAJiVKRTzNBPptQom49h85d9ac3jJCAE8o3WTjh
Gzt0uHXrWlqgO280EY/DTnMOyXjqwLcXxHlu26uDP/99tdY/IF8z46sJ1KxetzgI
84f7kBHLRAU9m5UNeFpnZdEUB5MBTbwWAsNcYgiabpMkpCcghjg+fBhOsoLqqjhC
CnwhjQKBgQDkv0QTdyBU84TE8J0XY3eLQwXbrvG2yD5A2ntN3PyxGEneX5WTJGMZ
xJxwaFYQiDS3b9E7b8Q5dg8qa5Y1+epdhx3cuQAWPm+AoHKshDfbRve4txBDQAqh
c6MxSWgsa+2Ld5SWSNbGtpPcmEM3Fl5ttMCNCKtNc0UE16oHwaPAIw==
-----END RSA PRIVATE KEY-----
"""
self.public_pem = """
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6TtDhWGwzEOWZP6m/zHo
ZnAPLABfetvoMPmxPGjFjtDuMRPvEvI1sbixZBjBtdnc5rTtHUUQ25Am3JzwPRGo
5laMGbj1pPyCPxlVi9LK82HQNX0BYK7tZtVfDHElQA7F4v3j9d3rad4O9/n+lyGI
Q0tT7yQcBm2A8FEaP0bZYCLMjwMNWfaVLE8eXHyv+MfpNNLI9wttLxygKYM48I3N
wsFuJgOa/KuodXaAmf8pJnx8t1WnnxvaYXFiUn/TxmhM/qhemPa6+0nqq+aWV5eT
7xn4K/ghLgNs09v6Yge0pmPl9Oz++bjJ+aKRnAmwCOY8/5U5EilAiUOeBoO9+8OX
twIDAQAB
-----END PUBLIC KEY-----
"""
signed_token = tokens.signed_token_generator(self.private_pem,
user_id=123)
self.endpoint = Server(
self.mock_validator,
token_expires_in=self.expires_in,
token_generator=signed_token,
refresh_token_generator=tokens.random_token_generator
)
@mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
def test_authorization_grant(self):
body = 'client_id=me&redirect_uri=http%3A%2F%2Fback.to%2Fme&grant_type=authorization_code&code=abc&scope=all+of+them'
headers, body, status_code = self.endpoint.create_token_response(
'', body=body)
body = json.loads(body)
token = {
'token_type': 'Bearer',
'expires_in': self.expires_in,
'access_token': body['access_token'],
'refresh_token': 'abc',
'scope': 'all of them'
}
self.assertEqual(body, token)
body = 'client_id=me&redirect_uri=http%3A%2F%2Fback.to%2Fme&grant_type=authorization_code&code=abc'
headers, body, status_code = self.endpoint.create_token_response(
'', body=body)
body = json.loads(body)
token = {
'token_type': 'Bearer',
'expires_in': self.expires_in,
'access_token': body['access_token'],
'refresh_token': 'abc'
}
self.assertEqual(body, token)
# try with additional custom variables
body = 'client_id=me&redirect_uri=http%3A%2F%2Fback.to%2Fme&grant_type=authorization_code&code=abc&state=foobar'
headers, body, status_code = self.endpoint.create_token_response(
'', body=body)
body = json.loads(body)
token = {
'token_type': 'Bearer',
'expires_in': self.expires_in,
'access_token': body['access_token'],
'refresh_token': 'abc'
}
self.assertEqual(body, token)
@mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
def test_password_grant(self):
body = 'grant_type=password&username=a&password=hello&scope=all+of+them'
headers, body, status_code = self.endpoint.create_token_response(
'', body=body)
body = json.loads(body)
token = {
'token_type': 'Bearer',
'expires_in': self.expires_in,
'access_token': body['access_token'],
'refresh_token': 'abc',
'scope': 'all of them',
}
self.assertEqual(body, token)
@mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
def test_scopes_and_user_id_stored_in_access_token(self):
body = 'grant_type=password&username=a&password=hello&scope=all+of+them'
headers, body, status_code = self.endpoint.create_token_response(
'', body=body)
access_token = json.loads(body)['access_token']
claims = common.verify_signed_token(self.public_pem, access_token)
self.assertEqual(claims['scope'], 'all of them')
self.assertEqual(claims['user_id'], 123)
@mock.patch('oauthlib.common.generate_token', new=lambda: 'abc')
def test_client_grant(self):
body = 'grant_type=client_credentials&scope=all+of+them'
headers, body, status_code = self.endpoint.create_token_response(
'', body=body)
body = json.loads(body)
token = {
'token_type': 'Bearer',
'expires_in': self.expires_in,
'access_token': body['access_token'],
'scope': 'all of them',
}
self.assertEqual(body, token)
def test_missing_type(self):
_, body, _ = self.endpoint.create_token_response('', body='client_id=me&redirect_uri=http%3A%2F%2Fback.to%2Fme&code=abc')
token = {'error': 'unsupported_grant_type'}
self.assertEqual(json.loads(body), token)
def test_invalid_type(self):
body = 'client_id=me&redirect_uri=http%3A%2F%2Fback.to%2Fme&grant_type=invalid&code=abc'
_, body, _ = self.endpoint.create_token_response('', body=body)
token = {'error': 'unsupported_grant_type'}
self.assertEqual(json.loads(body), token)
|
SignedTokenEndpointTest
|
python
|
FactoryBoy__factory_boy
|
tests/test_django.py
|
{
"start": 21721,
"end": 22942
}
|
class ____(django_test.TestCase):
def test_undeclared_fields(self):
class WithDefaultValueFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.WithDefaultValue
class Params:
with_bar = factory.Trait(
foo='bar',
)
o = WithDefaultValueFactory()
self.assertEqual('', o.foo)
def test_pointing_with_traits_using_same_name(self):
class PointedFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.PointedModel
class Params:
with_bar = factory.Trait(
foo='bar',
)
class PointerFactory(factory.django.DjangoModelFactory):
class Meta:
model = models.PointerModel
pointed = factory.SubFactory(PointedFactory)
class Params:
with_bar = factory.Trait(
bar='bar',
pointed__with_bar=True,
)
o = PointerFactory(with_bar=True)
self.assertEqual('bar', o.bar)
self.assertEqual('bar', o.pointed.foo)
|
DjangoParamsTestCase
|
python
|
tiangolo__fastapi
|
tests/test_response_model_as_return_annotation.py
|
{
"start": 397,
"end": 442
}
|
class ____(User):
password_hash: str
|
DBUser
|
python
|
openai__openai-python
|
src/openai/types/responses/tool_choice_mcp_param.py
|
{
"start": 246,
"end": 540
}
|
class ____(TypedDict, total=False):
server_label: Required[str]
"""The label of the MCP server to use."""
type: Required[Literal["mcp"]]
"""For MCP tools, the type is always `mcp`."""
name: Optional[str]
"""The name of the tool to call on the server."""
|
ToolChoiceMcpParam
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_froms.py
|
{
"start": 114010,
"end": 116742
}
|
class ____(QueryTest):
run_setup_mappers = None
def test_double_same_mappers_explicit_alias(self):
"""test aliasing of joins with a custom join condition"""
(
addresses,
items,
order_items,
orders,
Item,
User,
Address,
Order,
users,
) = (
self.tables.addresses,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="select",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(Item, items)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(Address, lazy="select"),
open_orders=relationship(
Order,
primaryjoin=and_(
orders.c.isopen == 1, users.c.id == orders.c.user_id
),
lazy="select",
viewonly=True,
),
closed_orders=relationship(
Order,
primaryjoin=and_(
orders.c.isopen == 0, users.c.id == orders.c.user_id
),
lazy="select",
viewonly=True,
),
),
)
q = fixture_session().query(User)
oo = aliased(Order)
co = aliased(Order)
oi = aliased(Item)
ci = aliased(Item)
# converted from aliased=True. This is kind of the worst case
# kind of query when we don't have aliased=True. two different
# styles are illustrated here, but the important point is that
# the filter() is not doing any trickery, you need to pass it the
# aliased entity explicitly.
eq_(
q.join(oo, User.open_orders)
.join(oi, oo.items)
.filter(oi.id == 4)
.join(User.closed_orders.of_type(co))
.join(co.items.of_type(ci))
.filter(ci.id == 3)
.all(),
[User(id=7)],
)
|
CustomJoinTest
|
python
|
RaRe-Technologies__gensim
|
gensim/similarities/termsim.py
|
{
"start": 3073,
"end": 15423
}
|
class ____(TermSimilarityIndex):
"""
Computes cosine similarities between word embeddings and retrieves most
similar terms for a given term.
Notes
-----
By fitting the word embeddings to a vocabulary that you will be using, you
can eliminate all out-of-vocabulary (OOV) words that you would otherwise
receive from the `most_similar` method. In subword models such as fastText,
this procedure will also infer word-vectors for words from your vocabulary
that previously had no word-vector.
>>> from gensim.test.utils import common_texts, datapath
>>> from gensim.corpora import Dictionary
>>> from gensim.models import FastText
>>> from gensim.models.word2vec import LineSentence
>>> from gensim.similarities import WordEmbeddingSimilarityIndex
>>>
>>> model = FastText(common_texts, vector_size=20, min_count=1) # train word-vectors on a corpus
>>> different_corpus = LineSentence(datapath('lee_background.cor'))
>>> dictionary = Dictionary(different_corpus) # construct a vocabulary on a different corpus
>>> words = [word for word, count in dictionary.most_common()]
>>> word_vectors = model.wv.vectors_for_all(words) # remove OOV word-vectors and infer word-vectors for new words
>>> assert len(dictionary) == len(word_vectors) # all words from our vocabulary received their word-vectors
>>> termsim_index = WordEmbeddingSimilarityIndex(word_vectors)
Parameters
----------
keyedvectors : :class:`~gensim.models.keyedvectors.KeyedVectors`
The word embeddings.
threshold : float, optional
Only embeddings more similar than `threshold` are considered when retrieving word embeddings
closest to a given word embedding.
exponent : float, optional
Take the word embedding similarities larger than `threshold` to the power of `exponent`.
kwargs : dict or None
A dict with keyword arguments that will be passed to the
:meth:`~gensim.models.keyedvectors.KeyedVectors.most_similar` method
when retrieving the word embeddings closest to a given word embedding.
See Also
--------
:class:`~gensim.similarities.levenshtein.LevenshteinSimilarityIndex`
Retrieve most similar terms for a given term using the Levenshtein distance.
:class:`~gensim.similarities.termsim.SparseTermSimilarityMatrix`
Build a term similarity matrix and compute the Soft Cosine Measure.
"""
def __init__(self, keyedvectors, threshold=0.0, exponent=2.0, kwargs=None):
self.keyedvectors = keyedvectors
self.threshold = threshold
self.exponent = exponent
self.kwargs = kwargs or {}
super(WordEmbeddingSimilarityIndex, self).__init__()
def most_similar(self, t1, topn=10):
if t1 not in self.keyedvectors:
logger.debug('an out-of-dictionary term "%s"', t1)
else:
most_similar = self.keyedvectors.most_similar(positive=[t1], topn=topn, **self.kwargs)
for t2, similarity in most_similar:
if similarity > self.threshold:
yield (t2, similarity**self.exponent)
def _shortest_uint_dtype(max_value):
"""Get the shortest unsingned integer data-type required for representing values up to a given
maximum value.
Returns the shortest unsingned integer data-type required for representing values up to a given
maximum value.
Parameters
----------
max_value : int
The maximum value we wish to represent.
Returns
-------
data-type
The shortest unsigned integer data-type required for representing values up to a given
maximum value.
"""
if max_value < 2**8:
return np.uint8
elif max_value < 2**16:
return np.uint16
elif max_value < 2**32:
return np.uint32
return np.uint64
def _create_source(index, dictionary, tfidf, symmetric, dominant, nonzero_limit, dtype):
"""Build a sparse term similarity matrix using a term similarity index.
Returns
-------
matrix : :class:`scipy.sparse.coo_matrix`
The sparse term similarity matrix.
"""
assert isinstance(index, TermSimilarityIndex)
assert dictionary is not None
matrix_order = len(dictionary)
if matrix_order == 0:
raise ValueError('Dictionary provided to SparseTermSimilarityMatrix must not be empty')
logger.info("constructing a sparse term similarity matrix using %s", index)
if nonzero_limit is None:
nonzero_limit = matrix_order
def tfidf_sort_key(term_index):
if isinstance(term_index, tuple):
term_index, *_ = term_index
term_idf = tfidf.idfs[term_index]
return (-term_idf, term_index)
if tfidf is None:
columns = sorted(dictionary.keys())
logger.info("iterating over %i columns in dictionary order", len(columns))
else:
assert max(tfidf.idfs) == matrix_order - 1
columns = sorted(tfidf.idfs.keys(), key=tfidf_sort_key)
logger.info("iterating over %i columns in tf-idf order", len(columns))
nonzero_counter_dtype = _shortest_uint_dtype(nonzero_limit)
column_nonzero = np.array([0] * matrix_order, dtype=nonzero_counter_dtype)
if dominant:
column_sum = np.zeros(matrix_order, dtype=dtype)
if symmetric:
assigned_cells = set()
row_buffer = array('Q')
column_buffer = array('Q')
if dtype is np.float16 or dtype is np.float32:
data_buffer = array('f')
elif dtype is np.float64:
data_buffer = array('d')
else:
raise ValueError('Dtype %s is unsupported, use numpy.float16, float32, or float64.' % dtype)
def cell_full(t1_index, t2_index, similarity):
if dominant and column_sum[t1_index] + abs(similarity) >= 1.0:
return True # after adding the similarity, the matrix would cease to be strongly diagonally dominant
assert column_nonzero[t1_index] <= nonzero_limit
if column_nonzero[t1_index] == nonzero_limit:
return True # after adding the similarity, the column would contain more than nonzero_limit elements
if symmetric and (t1_index, t2_index) in assigned_cells:
return True # a similarity has already been assigned to this cell
return False
def populate_buffers(t1_index, t2_index, similarity):
column_buffer.append(t1_index)
row_buffer.append(t2_index)
data_buffer.append(similarity)
column_nonzero[t1_index] += 1
if symmetric:
assigned_cells.add((t1_index, t2_index))
if dominant:
column_sum[t1_index] += abs(similarity)
try:
from tqdm import tqdm as progress_bar
except ImportError:
def progress_bar(iterable):
return iterable
for column_number, t1_index in enumerate(progress_bar(columns)):
column_buffer.append(column_number)
row_buffer.append(column_number)
data_buffer.append(1.0)
if nonzero_limit <= 0:
continue
t1 = dictionary[t1_index]
num_nonzero = column_nonzero[t1_index]
num_rows = nonzero_limit - num_nonzero
most_similar = [
(dictionary.token2id[term], similarity)
for term, similarity in index.most_similar(t1, topn=num_rows)
if term in dictionary.token2id
] if num_rows > 0 else []
if tfidf is None:
rows = sorted(most_similar)
else:
rows = sorted(most_similar, key=tfidf_sort_key)
for t2_index, similarity in rows:
if cell_full(t1_index, t2_index, similarity):
continue
if not symmetric:
populate_buffers(t1_index, t2_index, similarity)
elif not cell_full(t2_index, t1_index, similarity):
populate_buffers(t1_index, t2_index, similarity)
populate_buffers(t2_index, t1_index, similarity)
data_buffer = np.frombuffer(data_buffer, dtype=dtype)
row_buffer = np.frombuffer(row_buffer, dtype=np.uint64)
column_buffer = np.frombuffer(column_buffer, dtype=np.uint64)
matrix = sparse.coo_matrix((data_buffer, (row_buffer, column_buffer)), shape=(matrix_order, matrix_order))
logger.info(
"constructed a sparse term similarity matrix with %0.06f%% density",
100.0 * matrix.getnnz() / matrix_order**2,
)
return matrix
def _normalize_dense_vector(vector, matrix, normalization):
"""Normalize a dense vector after a change of basis.
Parameters
----------
vector : 1xN ndarray
A dense vector.
matrix : NxN ndarray
A change-of-basis matrix.
normalization : {True, False, 'maintain'}
Whether the vector will be L2-normalized (True; corresponds to the soft
cosine measure), maintain its L2-norm during the change of basis
('maintain'; corresponds to query expansion with partial membership),
or kept as-is (False; corresponds to query expansion).
Returns
-------
vector : ndarray
The normalized dense vector.
"""
if not normalization:
return vector
vector_norm = vector.T.dot(matrix).dot(vector)[0, 0]
assert vector_norm >= 0.0, NON_NEGATIVE_NORM_ASSERTION_MESSAGE
if normalization == 'maintain' and vector_norm > 0.0:
vector_norm /= vector.T.dot(vector)
vector_norm = sqrt(vector_norm)
normalized_vector = vector
if vector_norm > 0.0:
normalized_vector /= vector_norm
return normalized_vector
def _normalize_dense_corpus(corpus, matrix, normalization):
"""Normalize a dense corpus after a change of basis.
Parameters
----------
corpus : MxN ndarray
A dense corpus.
matrix : NxN ndarray
A change-of-basis matrix.
normalization : {True, False, 'maintain'}
Whether the vector will be L2-normalized (True; corresponds to the soft
cosine measure), maintain its L2-norm during the change of basis
('maintain'; corresponds to query expansion with partial membership),
or kept as-is (False; corresponds to query expansion).
Returns
-------
normalized_corpus : ndarray
The normalized dense corpus.
"""
if not normalization:
return corpus
# use the following equality: np.diag(A.T.dot(B).dot(A)) == A.T.dot(B).multiply(A.T).sum(axis=1).T
corpus_norm = np.multiply(corpus.T.dot(matrix), corpus.T).sum(axis=1).T
assert corpus_norm.min() >= 0.0, NON_NEGATIVE_NORM_ASSERTION_MESSAGE
if normalization == 'maintain':
corpus_norm /= np.multiply(corpus.T, corpus.T).sum(axis=1).T
corpus_norm = np.sqrt(corpus_norm)
normalized_corpus = np.multiply(corpus, 1.0 / corpus_norm)
normalized_corpus = np.nan_to_num(normalized_corpus) # account for division by zero
return normalized_corpus
def _normalize_sparse_corpus(corpus, matrix, normalization):
"""Normalize a sparse corpus after a change of basis.
Parameters
----------
corpus : MxN :class:`scipy.sparse.csc_matrix`
A sparse corpus.
matrix : NxN :class:`scipy.sparse.csc_matrix`
A change-of-basis matrix.
normalization : {True, False, 'maintain'}
Whether the vector will be L2-normalized (True; corresponds to the soft
cosine measure), maintain its L2-norm during the change of basis
('maintain'; corresponds to query expansion with partial membership),
or kept as-is (False; corresponds to query expansion).
Returns
-------
normalized_corpus : :class:`scipy.sparse.csc_matrix`
The normalized sparse corpus.
"""
if not normalization:
return corpus
# use the following equality: np.diag(A.T.dot(B).dot(A)) == A.T.dot(B).multiply(A.T).sum(axis=1).T
corpus_norm = corpus.T.dot(matrix).multiply(corpus.T).sum(axis=1).T
assert corpus_norm.min() >= 0.0, NON_NEGATIVE_NORM_ASSERTION_MESSAGE
if normalization == 'maintain':
corpus_norm /= corpus.T.multiply(corpus.T).sum(axis=1).T
corpus_norm = np.sqrt(corpus_norm)
normalized_corpus = corpus.multiply(sparse.csr_matrix(1.0 / corpus_norm))
normalized_corpus[normalized_corpus == np.inf] = 0 # account for division by zero
return normalized_corpus
|
WordEmbeddingSimilarityIndex
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/basic.py
|
{
"start": 12933,
"end": 14156
}
|
class ____(RegexLexer):
"""
For CBM BASIC V2 sources.
.. versionadded:: 1.6
"""
name = 'CBM BASIC V2'
aliases = ['cbmbas']
filenames = ['*.bas']
flags = re.IGNORECASE
tokens = {
'root': [
(r'rem.*\n', Comment.Single),
(r'\s+', Text),
(r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont'
r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?'
r'|list|clr|cmd|open|close|get#?', Keyword.Reserved),
(r'data|restore|dim|let|def|fn', Keyword.Declaration),
(r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn'
r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin),
(r'[-+*/^<>=]', Operator),
(r'not|and|or', Operator.Word),
(r'"[^"\n]*.', String),
(r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float),
(r'[(),:;]', Punctuation),
(r'\w+[$%]?', Name),
]
}
def analyse_text(self, text):
# if it starts with a line number, it shouldn't be a "modern" Basic
# like VB.net
if re.match(r'\d+', text):
return 0.2
|
CbmBasicV2Lexer
|
python
|
neetcode-gh__leetcode
|
python/0904-fruit-into-baskets.py
|
{
"start": 20,
"end": 577
}
|
class ____:
def totalFruit(self, fruits: List[int]) -> int:
count = collections.defaultdict(int)
l, total, res = 0, 0, 0
for r in range(len(fruits)):
count[fruits[r]] += 1
total += 1
while len(count) > 2:
f = fruits[l]
count[f] -= 1
total -= 1
l += 1
if not count[f]:
count.pop(f)
res = max(res, total)
return res
|
Solution
|
python
|
walkccc__LeetCode
|
solutions/261. Graph Valid Tree/261-2.py
|
{
"start": 557,
"end": 793
}
|
class ____:
def validTree(self, n: int, edges: list[list[int]]) -> bool:
if n == 0 or len(edges) != n - 1:
return False
uf = UnionFind(n)
for u, v in edges:
uf.unionByRank(u, v)
return uf.count == 1
|
Solution
|
python
|
apache__airflow
|
dev/breeze/src/airflow_breeze/utils/packages.py
|
{
"start": 3069,
"end": 55988
}
|
class ____(NamedTuple):
"""Store details about python packages"""
package: str
version_required: str
@classmethod
def from_requirement(cls, requirement_string: str) -> PipRequirements:
from packaging.requirements import Requirement
req = Requirement(requirement_string)
package = req.name
if req.extras:
# Sort extras by name
package += f"[{','.join(sorted(req.extras))}]"
version_required = ""
if req.specifier:
# String representation of `packaging.specifiers.SpecifierSet` sorted by the operator
# which might not looking good, e.g. '>=5.3.0,<6,!=5.3.3,!=5.3.2' transform into the
# '!=5.3.3,!=5.3.2,<6,>=5.3.0'. Instead of that we sort by version and resulting string would be
# '>=5.3.0,!=5.3.2,!=5.3.3,<6'
version_required = ",".join(map(str, sorted(req.specifier, key=lambda spec: spec.version)))
if req.marker:
version_required += f"; {req.marker}"
return cls(package=package, version_required=version_required.strip())
@clearable_cache
def provider_yaml_schema() -> dict[str, Any]:
with open(PROVIDER_DATA_SCHEMA_PATH) as schema_file:
return json.load(schema_file)
PROVIDER_METADATA: dict[str, dict[str, Any]] = {}
def refresh_provider_metadata_from_yaml_file(provider_yaml_path: Path):
import yaml
with open(provider_yaml_path) as yaml_file:
provider_yaml_content = yaml.safe_load(yaml_file)
provider_id = get_short_package_name(provider_yaml_content["package-name"])
PROVIDER_METADATA[provider_id] = provider_yaml_content
toml_content = load_pyproject_toml(provider_yaml_path.parent / "pyproject.toml")
dependencies = toml_content["project"].get("dependencies")
if dependencies:
PROVIDER_METADATA[provider_id]["dependencies"] = dependencies
optional_dependencies = toml_content["project"].get("optional-dependencies")
if optional_dependencies:
PROVIDER_METADATA[provider_id]["optional-dependencies"] = optional_dependencies
dependency_groups = toml_content.get("dependency-groups")
if dependency_groups and dependency_groups.get("dev"):
devel_dependencies = [
dep for dep in dependency_groups.get("dev") if not dep.startswith("apache-airflow")
]
PROVIDER_METADATA[provider_id]["devel-dependencies"] = devel_dependencies
def clear_cache_for_provider_metadata(provider_yaml_path: Path):
get_provider_distributions_metadata.cache_clear()
refresh_provider_metadata_from_yaml_file(provider_yaml_path)
@clearable_cache
def get_all_provider_yaml_paths() -> list[Path]:
"""Returns list of provider.yaml files"""
return sorted(list(AIRFLOW_PROVIDERS_ROOT_PATH.glob("**/provider.yaml")))
def get_provider_id_from_path(file_path: Path) -> str | None:
"""
Get the provider id from the path of the file it belongs to.
"""
for parent in file_path.parents:
# This works fine for both new and old providers structure - because we moved provider.yaml to
# the top-level of the provider and this code finding "providers" will find the "providers" package
# in old structure and "providers" directory in new structure - in both cases we can determine
# the provider id from the relative folders
if (parent / "provider.yaml").exists():
for providers_root_candidate in parent.parents:
if providers_root_candidate.name == "providers":
return parent.relative_to(providers_root_candidate).as_posix().replace("/", ".")
return None
return None
@clearable_cache
def get_provider_distributions_metadata() -> dict[str, dict[str, Any]]:
"""
Load all data from providers files
:return: A list containing the contents of all provider.yaml files.
"""
if PROVIDER_METADATA:
return PROVIDER_METADATA
for provider_yaml_path in get_all_provider_yaml_paths():
refresh_provider_metadata_from_yaml_file(provider_yaml_path)
return PROVIDER_METADATA
def validate_provider_info_with_runtime_schema(provider_info: dict[str, Any]) -> None:
"""Validates provider info against the runtime schema.
This way we check if the provider info in the packages is future-compatible.
The Runtime Schema should only change when there is a major version change.
:param provider_info: provider info to validate
"""
import jsonschema
schema = json.loads(PROVIDER_RUNTIME_DATA_SCHEMA_PATH.read_text())
try:
jsonschema.validate(provider_info, schema=schema)
except jsonschema.ValidationError as ex:
get_console().print(
"[red]Error when validating schema. The schema must be compatible with "
"[bold]'airflow/provider_info.schema.json'[/bold].\n"
f"Original exception [bold]{type(ex).__name__}: {ex}[/]"
)
raise SystemExit(1)
def filter_provider_info_data(provider_info: dict[str, Any]) -> dict[str, Any]:
json_schema_dict = json.loads(PROVIDER_RUNTIME_DATA_SCHEMA_PATH.read_text())
runtime_properties = json_schema_dict["properties"].keys()
return_dict = {
property: provider_info[property]
for property in provider_info.keys()
if property in runtime_properties
}
return return_dict
def get_provider_info_dict(provider_id: str) -> dict[str, Any]:
"""Retrieves provider info from the provider yaml file.
:param provider_id: package id to retrieve provider.yaml from
:return: provider_info dictionary
"""
provider_yaml_dict = get_provider_distributions_metadata().get(provider_id)
if provider_yaml_dict:
provider_yaml_dict = filter_provider_info_data(provider_yaml_dict)
validate_provider_info_with_runtime_schema(provider_yaml_dict)
return provider_yaml_dict or {}
@lru_cache
def get_suspended_provider_ids() -> list[str]:
return get_available_distributions(include_suspended=True, include_regular=False)
@lru_cache
def get_suspended_provider_folders() -> list[str]:
return [provider_id.replace(".", "/") for provider_id in get_suspended_provider_ids()]
@lru_cache
def get_excluded_provider_ids(python_version: str) -> list[str]:
metadata = get_provider_distributions_metadata()
return [
provider_id
for provider_id, provider_metadata in metadata.items()
if python_version in provider_metadata.get("excluded-python-versions", [])
]
@lru_cache
def get_excluded_provider_folders(python_version: str) -> list[str]:
return [provider_id.replace(".", "/") for provider_id in get_excluded_provider_ids(python_version)]
@lru_cache
def get_removed_provider_ids() -> list[str]:
return get_available_distributions(include_removed=True, include_regular=False)
@lru_cache
def get_not_ready_provider_ids() -> list[str]:
return get_available_distributions(include_not_ready=True, include_regular=False)
def get_provider_requirements(provider_id: str) -> list[str]:
package_metadata = get_provider_distributions_metadata().get(provider_id)
return package_metadata["dependencies"] if package_metadata else []
def get_provider_optional_dependencies(provider_id: str) -> dict[str, list[str]]:
package_metadata = get_provider_distributions_metadata().get(provider_id)
return package_metadata.get("optional-dependencies", {}) if package_metadata else {}
@lru_cache
def get_available_distributions(
include_non_provider_doc_packages: bool = False,
include_all_providers: bool = False,
include_suspended: bool = False,
include_removed: bool = False,
include_not_ready: bool = False,
include_regular: bool = True,
) -> list[str]:
"""
Return provider ids for all packages that are available currently (not suspended).
:param include_suspended: whether the suspended packages should be included
:param include_removed: whether the removed packages should be included
:param include_not_ready: whether the not-ready packages should be included
:param include_regular: whether the regular packages should be included
:param include_non_provider_doc_packages: whether the non-provider doc packages should be included
(packages like apache-airflow, helm-chart, docker-stack)
:param include_all_providers: whether "all-providers" should be included ni the list.
"""
provider_dependencies = json.loads(PROVIDER_DEPENDENCIES_JSON_PATH.read_text())
valid_states = set()
if include_not_ready:
valid_states.add("not-ready")
if include_regular:
valid_states.update({"ready", "pre-release"})
if include_suspended:
valid_states.add("suspended")
if include_removed:
valid_states.add("removed")
available_packages: list[str] = [
provider_id
for provider_id, provider_dependencies in provider_dependencies.items()
if provider_dependencies["state"] in valid_states
]
if include_non_provider_doc_packages:
available_packages.extend(REGULAR_DOC_PACKAGES)
if include_all_providers:
available_packages.append("all-providers")
return sorted(set(available_packages))
def expand_all_provider_distributions(
short_doc_packages: tuple[str, ...],
include_removed: bool = False,
include_not_ready: bool = False,
) -> tuple[str, ...]:
"""In case there are "all-providers" in the list, expand the list with all providers."""
if "all-providers" in short_doc_packages:
packages = [package for package in short_doc_packages if package != "all-providers"]
packages.extend(
get_available_distributions(include_removed=include_removed, include_not_ready=include_not_ready)
)
short_doc_packages = tuple(set(packages))
return short_doc_packages
def get_long_package_names(short_form_providers: Iterable[str]) -> tuple[str, ...]:
providers: list[str] = []
for short_form_provider in short_form_providers:
long_package_name = get_long_package_name(short_form_provider)
providers.append(long_package_name)
return tuple(providers)
def get_long_package_name(short_form_provider: str) -> str:
if short_form_provider in REGULAR_DOC_PACKAGES:
long_package_name = short_form_provider
else:
long_package_name = LONG_PROVIDERS_PREFIX + "-".join(short_form_provider.split("."))
return long_package_name
def get_short_package_names(long_form_providers: Iterable[str]) -> tuple[str, ...]:
providers: list[str] = []
for long_form_provider in long_form_providers:
providers.append(get_short_package_name(long_form_provider))
return tuple(providers)
def get_short_package_name(long_form_provider: str) -> str:
if long_form_provider in REGULAR_DOC_PACKAGES:
return long_form_provider
if not long_form_provider.startswith(LONG_PROVIDERS_PREFIX):
raise ValueError(
f"Invalid provider name: {long_form_provider}. Should start with {LONG_PROVIDERS_PREFIX}"
)
return long_form_provider[len(LONG_PROVIDERS_PREFIX) :].replace("-", ".")
def find_matching_long_package_names(
short_packages: tuple[str, ...],
filters: tuple[str, ...] | None = None,
) -> tuple[str, ...]:
"""Finds matching long package names based on short package name and package filters specified.
The sequence of specified packages / filters is kept (filters first, packages next). In case there
are filters that do not match any of the packages error is raised.
:param short_packages: short forms of package names
:param filters: package filters specified
"""
available_doc_packages = list(
get_long_package_names(get_available_distributions(include_non_provider_doc_packages=True))
)
if not filters and not short_packages:
available_doc_packages.extend(filters or ())
return tuple(set(available_doc_packages))
processed_package_filters = list(filters or ())
processed_package_filters.extend(get_long_package_names(short_packages))
removed_packages: list[str] = [
f"apache-airflow-providers-{provider.replace('.', '-')}" for provider in get_removed_provider_ids()
]
all_packages_including_removed: list[str] = available_doc_packages + removed_packages
invalid_filters = [
f
for f in processed_package_filters
if not any(fnmatch.fnmatch(p, f) for p in all_packages_including_removed)
]
if invalid_filters:
raise SystemExit(
f"Some filters did not find any package: {invalid_filters}, Please check if they are correct."
)
return tuple(
[
p
for p in all_packages_including_removed
if any(fnmatch.fnmatch(p, f) for f in processed_package_filters)
]
)
def get_provider_root_path(provider_id: str) -> Path:
return AIRFLOW_PROVIDERS_ROOT_PATH / provider_id.replace(".", "/")
def get_possible_old_provider_paths(provider_id: str) -> list[Path]:
# This is used to get historical commits for the provider
paths: list[Path] = []
paths.append(AIRFLOW_ORIGINAL_PROVIDERS_DIR.joinpath(*provider_id.split(".")))
paths.append(PREVIOUS_AIRFLOW_PROVIDERS_NS_PACKAGE_PATH.joinpath(*provider_id.split(".")))
paths.append(DOCS_ROOT / f"apache-airflow-providers-{provider_id.replace('.', '-')}")
if provider_id == "edge3":
paths.append(get_provider_root_path("edge"))
paths.append(get_provider_root_path("edgeexecutor"))
return paths
def get_documentation_package_path(provider_id: str) -> Path:
return AIRFLOW_PROVIDERS_ROOT_PATH.joinpath(*provider_id.split(".")) / "docs"
def get_pip_package_name(provider_id: str) -> str:
"""
Returns PIP package name for the package id.
:param provider_id: id of the package
:return: the name of pip package
"""
return "apache-airflow-providers-" + provider_id.replace(".", "-")
def get_dist_package_name_prefix(provider_id: str) -> str:
"""
Returns Wheel package name prefix for the package id.
:param provider_id: id of the package
:return: the name of wheel package prefix
"""
return "apache_airflow_providers_" + provider_id.replace(".", "_")
def floor_version_suffix(version_suffix: str) -> str:
# always use `pre-release`+ `0` as the version suffix
from packaging.version import Version
base_version = "1.0.0"
version = Version(base_version + version_suffix)
pre_version_floored = version.pre[0] + "1" if version.pre else ""
dev_version_floored = ".dev0" if version.dev is not None else ""
post_version_floored = ".post0" if version.post is not None else ""
# local version cannot be used in >= comparison - so we have to remove it from floored version
floored_version = Version(
f"{base_version}{pre_version_floored}{dev_version_floored}{post_version_floored}"
)
return str(floored_version)[len(base_version) :]
def apply_version_suffix(install_clause: str, version_suffix: str) -> str:
# Need to resolve a version suffix based on PyPi versions, but can ignore local version suffix.
pypi_version_suffix = remove_local_version_suffix(version_suffix)
if pypi_version_suffix and install_clause.startswith("apache-airflow") and ">=" in install_clause:
# Applies version suffix to the apache-airflow and provider package dependencies to make
# sure that pre-release versions have correct limits - this address the issue with how
# pip handles pre-release versions when packages are pre-release and refer to each other - we
# need to make sure that all our >= references for all apache-airflow packages in pre-release
# versions of providers contain the same suffix as the provider itself.
# For example `apache-airflow-providers-fab==2.0.0.dev0` should refer to
# `apache-airflow>=2.9.0.dev0` and not `apache-airflow>=2.9.0` because both packages are
# released together and >= 2.9.0 is not correct reference for 2.9.0.dev0 version of Airflow.
# This assumes a local release, one where the suffix starts with a plus sign, uses the last
# version of the dependency, so it is not necessary to add the suffix to the dependency.
prefix, version = install_clause.split(">=")
# If version has a upper limit (e.g. ">=2.10.0,<3.0"), we need to cut this off not to fail
if "," in version:
version = version.split(",")[0]
from packaging.version import Version
base_version = Version(version).base_version
target_version = Version(str(base_version) + floor_version_suffix(pypi_version_suffix))
return prefix + ">=" + str(target_version)
return install_clause
def get_provider_yaml(provider_id: str) -> Path:
return AIRFLOW_PROVIDERS_ROOT_PATH / provider_id.replace(".", "/") / "provider.yaml"
def load_pyproject_toml(pyproject_toml_file_path: Path) -> dict[str, Any]:
try:
import tomllib
except ImportError:
import tomli as tomllib
toml_content = pyproject_toml_file_path.read_text()
syntax = Syntax(toml_content, "toml", theme="ansi_dark", line_numbers=True)
try:
return tomllib.loads(toml_content)
except tomllib.TOMLDecodeError as e:
get_console().print(syntax)
get_console().print(f"[red]Error when loading {pyproject_toml_file_path}: {e}:")
sys.exit(1)
def get_provider_details(provider_id: str) -> ProviderPackageDetails:
provider_info = get_provider_distributions_metadata().get(provider_id)
if not provider_info:
raise RuntimeError(f"The provider {provider_id} has no provider.yaml defined.")
plugins: list[PluginInfo] = []
if "plugins" in provider_info:
for plugin in provider_info["plugins"]:
package_name, class_name = plugin["plugin-class"].rsplit(".", maxsplit=1)
plugins.append(
PluginInfo(
name=plugin["name"],
package_name=package_name,
class_name=class_name,
)
)
provider_yaml_path = get_provider_yaml(provider_id)
pyproject_toml = load_pyproject_toml(provider_yaml_path.parent / "pyproject.toml")
dependencies = pyproject_toml["project"]["dependencies"]
changelog_path = provider_yaml_path.parent / "docs" / "changelog.rst"
documentation_provider_distribution_path = get_documentation_package_path(provider_id)
root_provider_path = provider_yaml_path.parent
base_provider_package_path = (provider_yaml_path.parent / "src" / "airflow" / "providers").joinpath(
*provider_id.split(".")
)
return ProviderPackageDetails(
provider_id=provider_id,
provider_yaml_path=provider_yaml_path,
source_date_epoch=provider_info["source-date-epoch"],
full_package_name=f"airflow.providers.{provider_id}",
pypi_package_name=f"apache-airflow-providers-{provider_id.replace('.', '-')}",
root_provider_path=root_provider_path,
base_provider_package_path=base_provider_package_path,
possible_old_provider_paths=get_possible_old_provider_paths(provider_id),
documentation_provider_distribution_path=documentation_provider_distribution_path,
changelog_path=changelog_path,
provider_description=provider_info["description"],
dependencies=dependencies,
versions=provider_info["versions"],
excluded_python_versions=provider_info.get("excluded-python-versions", []),
plugins=plugins,
removed=provider_info["state"] == "removed",
extra_project_metadata=provider_info.get("extra-project-metadata", ""),
)
def get_min_airflow_version(provider_id: str) -> str:
from packaging.version import Version as PackagingVersion
provider_details = get_provider_details(provider_id=provider_id)
min_airflow_version = MIN_AIRFLOW_VERSION
for dependency in provider_details.dependencies:
if dependency.startswith("apache-airflow>="):
current_min_airflow_version = dependency.split(">=")[1]
# If version has a upper limit (e.g. ">=2.10.0,<3.0"), we need to cut this off not to fail
if "," in current_min_airflow_version:
current_min_airflow_version = current_min_airflow_version.split(",")[0]
if PackagingVersion(current_min_airflow_version) > PackagingVersion(MIN_AIRFLOW_VERSION):
min_airflow_version = current_min_airflow_version
return min_airflow_version
def get_python_requires(provider_id: str) -> str:
python_requires = "~=3.10"
provider_details = get_provider_details(provider_id=provider_id)
for p in provider_details.excluded_python_versions:
python_requires += f", !={p}"
return python_requires
def convert_cross_package_dependencies_to_table(
cross_package_dependencies: list[str],
markdown: bool = True,
) -> str:
"""
Converts cross-package dependencies to a Markdown table
:param cross_package_dependencies: list of cross-package dependencies
:param markdown: if True, Markdown format is used else rst
:return: formatted table
"""
from tabulate import tabulate
headers = ["Dependent package", "Extra"]
table_data = []
prefix = "apache-airflow-providers-"
base_url = "https://airflow.apache.org/docs/"
for dependency in cross_package_dependencies:
pip_package_name = f"{prefix}{dependency.replace('.', '-')}"
url_suffix = f"{dependency.replace('.', '-')}"
if markdown:
url = f"[{pip_package_name}]({base_url}{url_suffix})"
else:
url = f"`{pip_package_name} <{base_url}{prefix}{url_suffix}>`_"
table_data.append((url, f"`{dependency}`" if markdown else f"``{dependency}``"))
return tabulate(table_data, headers=headers, tablefmt="pipe" if markdown else "rst")
def convert_optional_dependencies_to_table(
optional_dependencies: dict[str, list[str]],
markdown: bool = True,
) -> str:
"""
Converts optional dependencies to a Markdown/RST table
:param optional_dependencies: dict of optional dependencies
:param markdown: if True, Markdown format is used else rst
:return: formatted table
"""
import html
from tabulate import tabulate
headers = ["Extra", "Dependencies"]
table_data = []
for extra_name, dependencies in optional_dependencies.items():
decoded_deps = [html.unescape(dep) for dep in dependencies]
formatted_deps = ", ".join(f"`{dep}`" if markdown else f"``{dep}``" for dep in decoded_deps)
extra_col = f"`{extra_name}`" if markdown else f"``{extra_name}``"
table_data.append((extra_col, formatted_deps))
return tabulate(table_data, headers=headers, tablefmt="pipe" if markdown else "rst")
def get_cross_provider_dependent_packages(provider_id: str) -> list[str]:
if provider_id in get_removed_provider_ids():
return []
return PROVIDER_DEPENDENCIES[provider_id]["cross-providers-deps"]
def get_license_files(provider_id: str) -> str:
if provider_id == "fab":
return str(["LICENSE", "NOTICE", "3rd-party-licenses/LICENSES-*"]).replace('"', "'")
return str(["LICENSE", "NOTICE"]).replace('"', "'")
def get_provider_jinja_context(
provider_id: str,
current_release_version: str,
version_suffix: str,
):
provider_details = get_provider_details(provider_id=provider_id)
release_version_no_leading_zeros = strip_leading_zeros_from_version(current_release_version)
changelog = provider_details.changelog_path.read_text()
supported_python_versions = [
p for p in ALLOWED_PYTHON_MAJOR_MINOR_VERSIONS if p not in provider_details.excluded_python_versions
]
cross_providers_dependencies = get_cross_provider_dependent_packages(provider_id=provider_id)
requires_python_version: str = f">={DEFAULT_PYTHON_MAJOR_MINOR_VERSION}"
# Most providers require the same python versions, but some may have exclusions
for excluded_python_version in provider_details.excluded_python_versions:
requires_python_version += f",!={excluded_python_version}"
context: dict[str, Any] = {
"PROVIDER_ID": provider_details.provider_id,
"PACKAGE_PIP_NAME": get_pip_package_name(provider_details.provider_id),
"PACKAGE_DIST_PREFIX": get_dist_package_name_prefix(provider_details.provider_id),
"FULL_PACKAGE_NAME": provider_details.full_package_name,
"RELEASE": current_release_version,
"RELEASE_NO_LEADING_ZEROS": release_version_no_leading_zeros,
"VERSION_SUFFIX": version_suffix,
"PIP_REQUIREMENTS": get_provider_requirements(provider_details.provider_id),
"PROVIDER_DESCRIPTION": provider_details.provider_description,
"CHANGELOG_RELATIVE_PATH": os.path.relpath(
provider_details.root_provider_path,
provider_details.documentation_provider_distribution_path,
),
"LICENSE_FILES": get_license_files(provider_details.provider_id),
"CHANGELOG": changelog,
"SUPPORTED_PYTHON_VERSIONS": supported_python_versions,
"PLUGINS": provider_details.plugins,
"MIN_AIRFLOW_VERSION": get_min_airflow_version(provider_id),
"PROVIDER_REMOVED": provider_details.removed,
"PROVIDER_INFO": get_provider_info_dict(provider_id),
"CROSS_PROVIDERS_DEPENDENCIES": get_cross_provider_dependent_packages(provider_id),
"CROSS_PROVIDERS_DEPENDENCIES_TABLE_RST": convert_cross_package_dependencies_to_table(
cross_providers_dependencies, markdown=False
),
"PIP_REQUIREMENTS_TABLE_RST": convert_pip_requirements_to_table(
get_provider_requirements(provider_id), markdown=False
),
"REQUIRES_PYTHON": requires_python_version,
"EXTRA_PROJECT_METADATA": provider_details.extra_project_metadata,
"OPTIONAL_DEPENDENCIES": get_provider_optional_dependencies(provider_id),
"OPTIONAL_DEPENDENCIES_TABLE_RST": convert_optional_dependencies_to_table(
get_provider_optional_dependencies(provider_id), markdown=False
),
}
return context
def render_template(
template_name: str,
context: dict[str, Any],
extension: str,
autoescape: bool = True,
lstrip_blocks: bool = False,
trim_blocks: bool = False,
keep_trailing_newline: bool = False,
) -> str:
"""
Renders template based on its name. Reads the template from <name>_TEMPLATE.md.jinja2 in current dir.
:param template_name: name of the template to use
:param context: Jinja2 context
:param extension: Target file extension
:param autoescape: Whether to autoescape HTML
:param lstrip_blocks: Whether to strip leading blocks
:param trim_blocks: Whether to trim blocks
:param keep_trailing_newline: Whether to keep the newline in rendered output
:return: rendered template
"""
import jinja2
template_loader = jinja2.FileSystemLoader(searchpath=BREEZE_SOURCES_PATH / "airflow_breeze" / "templates")
template_env = jinja2.Environment(
loader=template_loader,
undefined=jinja2.StrictUndefined,
autoescape=autoescape,
lstrip_blocks=lstrip_blocks,
trim_blocks=trim_blocks,
keep_trailing_newline=keep_trailing_newline,
)
template = template_env.get_template(f"{template_name}_TEMPLATE{extension}.jinja2")
content: str = template.render(context)
return content
def make_sure_remote_apache_exists_and_fetch(github_repository: str = "apache/airflow"):
"""Make sure that apache remote exist in git.
We need to take a log from the apache repository main branch - not locally because we might
not have the latest version. Also, the local repo might be shallow, so we need to
un-shallow it to see all the history.
This will:
* check if the remote exists and add if it does not
* check if the local repo is shallow, mark it to un-shallow in this case
* fetch from the remote including all tags and overriding local tags in case
they are set differently
"""
try:
run_command(["git", "remote", "get-url", HTTPS_REMOTE], text=True, capture_output=True)
except subprocess.CalledProcessError as ex:
if ex.returncode == 128 or ex.returncode == 2:
run_command(
[
"git",
"remote",
"add",
HTTPS_REMOTE,
f"https://github.com/{github_repository}.git",
],
check=True,
)
else:
get_console().print(
f"[error]Error {ex}[/]\n[error]When checking if {HTTPS_REMOTE} is set.[/]\n\n"
)
sys.exit(1)
get_console().print("[info]Fetching full history and tags from remote.")
get_console().print("[info]This might override your local tags!")
result = run_command(
["git", "rev-parse", "--is-shallow-repository"],
check=True,
capture_output=True,
text=True,
)
is_shallow_repo = result.stdout.strip() == "true"
fetch_command = ["git", "fetch", "--tags", "--force", HTTPS_REMOTE]
if is_shallow_repo:
fetch_command.append("--unshallow")
try:
run_command(fetch_command)
except subprocess.CalledProcessError as e:
get_console().print(
f"[error]Error {e}[/]\n"
f"[error]When fetching tags from remote. Your tags might not be refreshed.[/]\n\n"
f'[warning]Please refresh the tags manually via:[/]\n\n"'
f"{' '.join(fetch_command)}\n\n"
)
sys.exit(1)
def convert_pip_requirements_to_table(requirements: Iterable[str], markdown: bool = True) -> str:
"""
Converts PIP requirement list to a Markdown table.
:param requirements: requirements list
:param markdown: if True, Markdown format is used else rst
:return: formatted table
"""
from tabulate import tabulate
headers = ["PIP package", "Version required"]
table_data = []
for dependency in requirements:
req = PipRequirements.from_requirement(dependency)
formatted_package = f"`{req.package}`" if markdown else f"``{req.package}``"
formatted_version = ""
if req.version_required:
formatted_version = f"`{req.version_required}`" if markdown else f"``{req.version_required}``"
table_data.append((formatted_package, formatted_version))
return tabulate(table_data, headers=headers, tablefmt="pipe" if markdown else "rst")
def tag_exists_for_provider(provider_id: str, current_tag: str) -> bool:
"""Return true if the tag exists in the provider repository."""
provider_details = get_provider_details(provider_id)
result = run_command(
["git", "rev-parse", current_tag],
cwd=provider_details.root_provider_path,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=False,
)
return result.returncode == 0
def get_latest_provider_tag(provider_id: str, suffix: str) -> str:
"""Returns latest tag for the provider."""
provider_details = get_provider_details(provider_id)
current_version = provider_details.versions[0]
return get_version_tag(current_version, provider_id, suffix)
def regenerate_pyproject_toml(
context: dict[str, Any], provider_details: ProviderPackageDetails, version_suffix: str | None
):
get_pyproject_toml_path = provider_details.root_provider_path / "pyproject.toml"
# we want to preserve comments in dependencies - both required and additional,
# so we should not really parse the toml file but extract dependencies "as is" in text form and pass
# them to context. While this is not "generic toml" perfect, for provider pyproject.toml files it is
# good enough, because we fully control the pyproject.toml content for providers as they are generated
# from our templates (Except the dependencies section that is manually updated)
pyproject_toml_content = get_pyproject_toml_path.read_text()
required_dependencies: list[str] = []
optional_dependencies: list[str] = []
dependency_groups: list[str] = []
in_required_dependencies = False
in_optional_dependencies = False
in_additional_devel_dependency_groups = False
for line in pyproject_toml_content.splitlines():
if line == "dependencies = [":
in_required_dependencies = True
continue
if in_required_dependencies and line == "]":
in_required_dependencies = False
continue
if line == (
" # Additional devel dependencies (do not remove this "
"line and add extra development dependencies)"
):
in_additional_devel_dependency_groups = True
continue
if in_additional_devel_dependency_groups and line == "]":
in_additional_devel_dependency_groups = False
continue
if line == "[project.optional-dependencies]":
in_optional_dependencies = True
continue
if in_optional_dependencies and line == "":
in_optional_dependencies = False
continue
if in_optional_dependencies and line.startswith("["):
in_optional_dependencies = False
if in_required_dependencies:
required_dependencies.append(line)
if in_optional_dependencies:
optional_dependencies.append(line)
if in_additional_devel_dependency_groups:
dependency_groups.append(line)
# For additional providers we want to load the dependencies and see if cross-provider-dependencies are
# present and if not, add them to the optional dependencies
if version_suffix:
new_required_dependencies = []
for dependency in required_dependencies:
modified_dependency = modify_dependency_with_suffix(dependency, version_suffix)
new_required_dependencies.append(modified_dependency)
required_dependencies = new_required_dependencies
new_optional_dependencies = []
for dependency in optional_dependencies:
modified_dependency = modify_dependency_with_suffix(dependency, version_suffix)
new_optional_dependencies.append(modified_dependency)
optional_dependencies = new_optional_dependencies
context["INSTALL_REQUIREMENTS"] = "\n".join(required_dependencies)
context["AIRFLOW_DOC_URL"] = (
"https://airflow.staged.apache.org" if version_suffix else "https://airflow.apache.org"
)
cross_provider_ids = set(PROVIDER_DEPENDENCIES.get(provider_details.provider_id)["cross-providers-deps"])
cross_provider_dependencies = []
# Add cross-provider dependencies to the optional dependencies if they are missing
for provider_id in sorted(cross_provider_ids):
cross_provider_dependencies.append(f' "{get_pip_package_name(provider_id)}",')
if f'"{provider_id}" = [' not in optional_dependencies and get_pip_package_name(
provider_id
) not in "\n".join(required_dependencies):
optional_dependencies.append(f'"{provider_id}" = [')
optional_dependencies.append(f' "{get_pip_package_name(provider_id)}"')
optional_dependencies.append("]")
context["EXTRAS_REQUIREMENTS"] = "\n".join(optional_dependencies)
formatted_dependency_groups = "\n".join(dependency_groups)
if formatted_dependency_groups:
formatted_dependency_groups = "\n" + formatted_dependency_groups
if cross_provider_dependencies:
formatted_cross_provider_dependencies = "\n" + "\n".join(cross_provider_dependencies)
else: # If there are no cross-provider dependencies, we need to remove the line
formatted_cross_provider_dependencies = ""
context["CROSS_PROVIDER_DEPENDENCIES"] = formatted_cross_provider_dependencies
context["DEPENDENCY_GROUPS"] = formatted_dependency_groups
pyproject_toml_content = render_template(
template_name="pyproject",
context=context,
extension=".toml",
autoescape=False,
lstrip_blocks=True,
trim_blocks=True,
keep_trailing_newline=True,
)
get_pyproject_toml_path.write_text(pyproject_toml_content)
get_console().print(
f"[info]Generated {get_pyproject_toml_path} for the {provider_details.provider_id} provider\n"
)
AIRFLOW_PACKAGE_MATCHER = re.compile(r"(^.*\")(apache-airflow.*>=[\d.]*)((\".*)$|;.*$)")
def modify_dependency_with_suffix(dependency: str, version_suffix: str) -> str:
match = AIRFLOW_PACKAGE_MATCHER.match(dependency)
if match and not version_suffix.startswith(".post"):
specifier_with_version_suffix = apply_version_suffix(match.group(2), version_suffix)
return match.group(1) + specifier_with_version_suffix + match.group(3)
return dependency
def get_provider_distribution_jinja_context(provider_id: str, version_suffix: str) -> dict[str, Any]:
provider_details = get_provider_details(provider_id)
jinja_context = get_provider_jinja_context(
provider_id=provider_id,
current_release_version=provider_details.versions[0],
version_suffix=version_suffix,
)
return jinja_context
def _prepare_get_provider_info_py_file(context: dict[str, Any], provider_id: str, target_path: Path):
from airflow_breeze.utils.black_utils import black_format
get_provider_template_name = "get_provider_info"
get_provider_content = render_template(
template_name=get_provider_template_name,
context=context,
extension=".py",
autoescape=False,
keep_trailing_newline=True,
)
target_provider_specific_path = (target_path / "airflow" / "providers").joinpath(*provider_id.split("."))
(target_provider_specific_path / "get_provider_info.py").write_text(black_format(get_provider_content))
get_console().print(f"[info]Generated get_provider_info.py in {target_provider_specific_path}[/]")
LICENCE_RST = """
.. Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
.. http://www.apache.org/licenses/LICENSE-2.0
.. Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
def _prepare_pyproject_toml_file(context: dict[str, Any], target_path: Path):
manifest_content = render_template(
template_name="pyproject",
context=context,
extension=".toml",
autoescape=False,
lstrip_blocks=True,
trim_blocks=True,
keep_trailing_newline=True,
)
(target_path / "pyproject.toml").write_text(manifest_content)
get_console().print(f"[info]Generated pyproject.toml in {target_path}[/]")
def _prepare_readme_file(context: dict[str, Any], target_path: Path):
readme_content = LICENCE_RST + render_template(
template_name="PROVIDER_README", context=context, extension=".rst"
)
(target_path / "README.rst").write_text(readme_content)
get_console().print(f"[info]Generated README.rst in {target_path}[/]")
def generate_build_files(provider_id: str, version_suffix: str, target_provider_root_sources_path: Path):
get_console().print(f"\n[info]Generate build files for {provider_id}\n")
jinja_context = get_provider_distribution_jinja_context(
provider_id=provider_id, version_suffix=version_suffix
)
_prepare_get_provider_info_py_file(jinja_context, provider_id, target_provider_root_sources_path)
_prepare_pyproject_toml_file(jinja_context, target_provider_root_sources_path)
_prepare_readme_file(jinja_context, target_provider_root_sources_path)
get_console().print(f"\n[info]Generated package build files for {provider_id}[/]\n")
@contextmanager
def apply_version_suffix_to_provider_pyproject_toml(
provider_id: str, version_suffix: str
) -> Generator[Path, None, None]:
"""Apply version suffix to pyproject.toml file of provider.
This context manager will read the pyproject.toml file, apply the version suffix
to the version, and write the modified content back to the file.
It will also restore the original content of the file when the context manager is exited.
"""
provider_details = get_provider_details(provider_id)
pyproject_toml_path = provider_details.root_provider_path / "pyproject.toml"
if not version_suffix:
yield pyproject_toml_path
return
original_pyproject_toml_content = pyproject_toml_path.read_text()
get_console().print(f"\n[info]Applying version suffix {version_suffix} to {pyproject_toml_path}")
jinja_context = get_provider_distribution_jinja_context(
provider_id=provider_id, version_suffix=version_suffix
)
regenerate_pyproject_toml(jinja_context, provider_details, version_suffix)
_prepare_pyproject_toml_file(jinja_context, provider_details.root_provider_path)
try:
yield pyproject_toml_path
finally:
get_console().print(f"\n[info]Restoring original pyproject.toml file {pyproject_toml_path}")
pyproject_toml_path.write_text(original_pyproject_toml_content)
def update_version_suffix_in_non_provider_pyproject_toml(version_suffix: str, pyproject_toml_path: Path):
if not version_suffix:
return
get_console().print(f"[info]Updating version suffix to {version_suffix} for {pyproject_toml_path}.\n")
lines = pyproject_toml_path.read_text().splitlines()
updated_lines = []
for line in lines:
base_line, comment = line.split(" #", 1) if " #" in line else (line, "")
if comment:
comment = " #" + comment
if base_line.startswith("version = "):
get_console().print(f"[info]Updating version suffix to {version_suffix} for {line}.")
base_line = base_line.rstrip('"') + f'{version_suffix}"'
if "https://airflow.apache.org/" in base_line and version_suffix:
get_console().print(f"[info]Updating documentation link to staging for {line}.")
base_line = base_line.replace("https://airflow.apache.org/", "https://airflow.staged.apache.org/")
# do not modify references for .post prefixes
if not version_suffix.startswith(".post"):
if base_line.strip().startswith('"apache-airflow-') and ">=" in base_line:
floored_version_suffix = floor_version_suffix(version_suffix)
get_console().print(
f"[info]Updating version suffix to {floored_version_suffix} for {base_line}."
)
if ";" in base_line:
split_on_semicolon = base_line.split(";")
# If there is a semicolon, we need to remove it before adding the version suffix
base_line = split_on_semicolon[0] + f"{floored_version_suffix};" + split_on_semicolon[1]
else:
base_line = base_line.rstrip('",') + f'{floored_version_suffix}",'
if base_line.strip().startswith('"apache-airflow-core') and "==" in base_line:
get_console().print(f"[info]Updating version suffix to {version_suffix} for {base_line}.")
base_line = base_line.rstrip('",') + f'{version_suffix}",'
if base_line.strip().startswith('"apache-airflow-task-sdk') and "==" in base_line:
get_console().print(f"[info]Updating version suffix to {version_suffix} for {base_line}.")
base_line = base_line.rstrip('",') + f'{version_suffix}",'
updated_lines.append(f"{base_line}{comment}")
new_content = "\n".join(updated_lines) + "\n"
get_console().print(f"[info]Writing updated content to {pyproject_toml_path}.\n")
pyproject_toml_path.write_text(new_content)
def set_package_version(version: str, init_file_path: Path, extra_text: str) -> None:
get_console().print(f"\n[warning]Setting {extra_text} {version} version in {init_file_path}\n")
# replace __version__ with the version passed as argument in python
init_content = init_file_path.read_text()
init_content = re.sub(r'__version__ = "[^"]+"', f'__version__ = "{version}"', init_content)
init_file_path.write_text(init_content)
@contextmanager
def apply_version_suffix_to_non_provider_pyproject_tomls(
version_suffix: str, init_file_path: Path, pyproject_toml_paths: list[Path]
) -> Generator[list[Path], None, None]:
from packaging.version import Version
original_version_search = re.search('__version__ = "([^"]+)"', init_file_path.read_text())
# Search beta version
beta_version_search = re.search('__version__ = "([^"]+)b[0-9]+"', init_file_path.read_text())
if not original_version_search:
raise RuntimeError(f"Could not find __version__ in {init_file_path}")
original_distribution_version = original_version_search.group(1)
packaging_version = Version(original_distribution_version)
# Forgiving check for beta versions
if not beta_version_search and packaging_version.base_version != str(packaging_version):
raise RuntimeError(
f"The package version in {init_file_path} should be `simple version` "
f"(no suffixes) and it is `{original_distribution_version}`."
)
original_contents = []
for pyproject_toml_path in pyproject_toml_paths:
original_contents.append(pyproject_toml_path.read_text())
update_version_in__init_py = False
base_package_version = original_distribution_version
if version_suffix:
base_package_version = str(Version(original_distribution_version).base_version)
update_version_in__init_py = True
if update_version_in__init_py:
set_package_version(
f"{base_package_version}{version_suffix}",
init_file_path=init_file_path,
extra_text="temporarily",
)
for pyproject_toml_path in pyproject_toml_paths:
update_version_suffix_in_non_provider_pyproject_toml(
version_suffix=version_suffix,
pyproject_toml_path=pyproject_toml_path,
)
try:
yield pyproject_toml_paths
finally:
if update_version_in__init_py:
set_package_version(
original_distribution_version, init_file_path=init_file_path, extra_text="back"
)
for pyproject_toml_path, original_content in zip(pyproject_toml_paths, original_contents):
get_console().print(f"[info]Restoring original content of {pyproject_toml_path}.\n")
pyproject_toml_path.write_text(original_content)
def _get_provider_version_from_package_name(provider_package_name: str) -> str | None:
"""
Get the current version of a provider from its pyproject.toml.
Args:
provider_package_name: The full package name (e.g., "apache-airflow-providers-common-compat")
Returns:
The version string if found, None otherwise
"""
# Convert package name to provider path
# apache-airflow-providers-common-compat -> common/compat
provider_id = provider_package_name.replace("apache-airflow-providers-", "").replace("-", "/")
provider_pyproject = AIRFLOW_PROVIDERS_ROOT_PATH / provider_id / "pyproject.toml"
if not provider_pyproject.exists():
get_console().print(f"[warning]Provider pyproject.toml not found: {provider_pyproject}")
return None
provider_toml = load_pyproject_toml(provider_pyproject)
provider_version = provider_toml.get("project", {}).get("version")
if not provider_version:
get_console().print(
f"[warning]Could not find version for {provider_package_name} in {provider_pyproject}"
)
return None
return provider_version
def _update_dependency_line_with_new_version(
line: str,
provider_package_name: str,
current_min_version: str,
new_version: str,
pyproject_file: Path,
updates_made: dict[str, dict[str, Any]],
) -> tuple[str, bool]:
"""
Update a dependency line with a new version and track the change.
Returns:
Tuple of (updated_line, was_modified)
"""
if new_version == current_min_version:
get_console().print(
f"[dim]Skipping {provider_package_name} in {pyproject_file.relative_to(AIRFLOW_PROVIDERS_ROOT_PATH)}: "
f"already at version {new_version}"
)
return line, False
# Replace the version in the line
old_constraint = f'"{provider_package_name}>={current_min_version}"'
new_constraint = f'"{provider_package_name}>={new_version}"'
updated_line = line.replace(old_constraint, new_constraint)
# Remove the "# use next version" comment after upgrading
updated_line = updated_line.replace(" # use next version", "")
# Track the update
provider_id_short = pyproject_file.parent.relative_to(AIRFLOW_PROVIDERS_ROOT_PATH)
provider_key = str(provider_id_short)
if provider_key not in updates_made:
updates_made[provider_key] = {}
updates_made[provider_key][provider_package_name] = {
"old_version": current_min_version,
"new_version": new_version,
"file": str(pyproject_file),
}
get_console().print(
f"[info]Updating {provider_package_name} in {pyproject_file.relative_to(AIRFLOW_PROVIDERS_ROOT_PATH)}: "
f"{current_min_version} -> {new_version} (comment removed)"
)
return updated_line, True
def _process_line_with_next_version_comment(
line: str, pyproject_file: Path, updates_made: dict[str, dict[str, Any]]
) -> tuple[str, bool]:
"""
Process a line that contains the "# use next version" comment.
Returns:
Tuple of (processed_line, was_modified)
"""
# Extract the provider package name and current version constraint
# Format is typically: "apache-airflow-providers-xxx>=version", # use next version
match = re.search(r'"(apache-airflow-providers-[^">=<]+)>=([^",]+)"', line)
if not match:
# Comment found but couldn't parse the line
return line, False
provider_package_name = match.group(1)
current_min_version = match.group(2)
# Get the current version from the referenced provider
provider_version = _get_provider_version_from_package_name(provider_package_name)
if not provider_version:
return line, False
# Update the line with the new version
return _update_dependency_line_with_new_version(
line, provider_package_name, current_min_version, provider_version, pyproject_file, updates_made
)
def update_providers_with_next_version_comment() -> dict[str, dict[str, Any]]:
"""
Scan all provider pyproject.toml files for "# use next version" comments and update the version
of the referenced provider to the current version from that provider's pyproject.toml.
Returns a dictionary with information about updated providers.
"""
updates_made: dict[str, dict[str, Any]] = {}
# Find all provider pyproject.toml files
provider_pyproject_files = list(AIRFLOW_PROVIDERS_ROOT_PATH.glob("**/pyproject.toml"))
for pyproject_file in provider_pyproject_files:
content = pyproject_file.read_text()
lines = content.split("\n")
updated_lines = []
file_modified = False
for line in lines:
# Check if line contains "# use next version" comment (but not the dependencies declaration line)
if "# use next version" in line and "dependencies = [" not in line:
processed_line, was_modified = _process_line_with_next_version_comment(
line, pyproject_file, updates_made
)
updated_lines.append(processed_line)
file_modified = file_modified or was_modified
else:
updated_lines.append(line)
# Write back if modified
if file_modified:
new_content = "\n".join(updated_lines)
pyproject_file.write_text(new_content)
get_console().print(
f"[success]Updated {pyproject_file.relative_to(AIRFLOW_PROVIDERS_ROOT_PATH)}\n"
)
return updates_made
|
PipRequirements
|
python
|
pydata__xarray
|
xarray/computation/rolling.py
|
{
"start": 1530,
"end": 9114
}
|
class ____(Generic[T_Xarray]):
"""A object that implements the moving window pattern.
See Also
--------
xarray.Dataset.groupby
xarray.DataArray.groupby
xarray.Dataset.rolling
xarray.DataArray.rolling
"""
__slots__ = ("center", "dim", "min_periods", "obj", "window")
_attributes = ("window", "min_periods", "center", "dim")
dim: list[Hashable]
window: list[int]
center: list[bool]
obj: T_Xarray
min_periods: int
def __init__(
self,
obj: T_Xarray,
windows: Mapping[Any, int],
min_periods: int | None = None,
center: bool | Mapping[Any, bool] = False,
) -> None:
"""
Moving window object.
Parameters
----------
obj : Dataset or DataArray
Object to window.
windows : mapping of hashable to int
A mapping from the name of the dimension to create the rolling
window along (e.g. `time`) to the size of the moving window.
min_periods : int or None, default: None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : bool or dict-like Hashable to bool, default: False
Set the labels at the center of the window. If dict-like, set this
property per rolling dimension.
Returns
-------
rolling : type of input argument
"""
self.dim = []
self.window = []
for d, w in windows.items():
self.dim.append(d)
if w <= 0:
raise ValueError("window must be > 0")
self.window.append(w)
self.center = self._mapping_to_list(center, default=False)
self.obj = obj
missing_dims = tuple(dim for dim in self.dim if dim not in self.obj.dims)
if missing_dims:
# NOTE: we raise KeyError here but ValueError in Coarsen.
raise KeyError(
f"Window dimensions {missing_dims} not found in {self.obj.__class__.__name__} "
f"dimensions {tuple(self.obj.dims)}"
)
# attributes
if min_periods is not None and min_periods <= 0:
raise ValueError("min_periods must be greater than zero or None")
self.min_periods = (
math.prod(self.window) if min_periods is None else min_periods
)
def __repr__(self) -> str:
"""provide a nice str repr of our rolling object"""
attrs = ",".join(
f"{k}->{w}{'(center)' if c else ''}"
for k, w, c in zip(self.dim, self.window, self.center, strict=True)
)
return f"{self.__class__.__name__} [{attrs}]"
def __len__(self) -> int:
return math.prod(self.obj.sizes[d] for d in self.dim)
@property
def ndim(self) -> int:
return len(self.dim)
def _reduce_method( # type: ignore[misc]
name: str,
fillna: Any,
rolling_agg_func: Callable | None = None,
automatic_rechunk: bool = False,
) -> Callable[..., T_Xarray]:
"""Constructs reduction methods built on a numpy reduction function (e.g. sum),
a numbagg reduction function (e.g. move_sum), a bottleneck reduction function
(e.g. move_sum), or a Rolling reduction (_mean).
The logic here for which function to run is quite diffuse, across this method &
_array_reduce. Arguably we could refactor this. But one constraint is that we
need context of xarray options, of the functions each library offers, of
the array (e.g. dtype).
Set automatic_rechunk=True when the reduction method makes a memory copy.
"""
if rolling_agg_func:
array_agg_func = None
else:
array_agg_func = getattr(duck_array_ops, name)
bottleneck_move_func = getattr(bottleneck, "move_" + name, None)
if module_available("numbagg"):
import numbagg
numbagg_move_func = getattr(numbagg, "move_" + name, None)
else:
numbagg_move_func = None
def method(self, keep_attrs=None, **kwargs):
keep_attrs = self._get_keep_attrs(keep_attrs)
return self._array_reduce(
array_agg_func=array_agg_func,
bottleneck_move_func=bottleneck_move_func,
numbagg_move_func=numbagg_move_func,
rolling_agg_func=rolling_agg_func,
keep_attrs=keep_attrs,
fillna=fillna,
sliding_window_view_kwargs=dict(automatic_rechunk=automatic_rechunk),
**kwargs,
)
method.__name__ = name
method.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name=name)
return method
def _mean(self, keep_attrs, **kwargs):
result = self.sum(keep_attrs=False, **kwargs)
# use dtype of result for casting of count
# this allows for GH #7062 and GH #8864, fixes GH #10340
result /= duck_array_ops.astype(
self.count(keep_attrs=False), dtype=result.dtype, copy=False
)
if keep_attrs:
result.attrs = self.obj.attrs
return result
_mean.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name="mean")
# automatic_rechunk is set to True for reductions that make a copy.
# std, var could be optimized after which we can set it to False
# See #4325
argmax = _reduce_method("argmax", dtypes.NINF, automatic_rechunk=True)
argmin = _reduce_method("argmin", dtypes.INF, automatic_rechunk=True)
max = _reduce_method("max", dtypes.NINF)
min = _reduce_method("min", dtypes.INF)
prod = _reduce_method("prod", 1)
sum = _reduce_method("sum", 0)
mean = _reduce_method("mean", None, _mean)
std = _reduce_method("std", None, automatic_rechunk=True)
var = _reduce_method("var", None, automatic_rechunk=True)
median = _reduce_method("median", None, automatic_rechunk=True)
def _counts(self, keep_attrs: bool | None) -> T_Xarray:
raise NotImplementedError()
def count(self, keep_attrs: bool | None = None) -> T_Xarray:
keep_attrs = self._get_keep_attrs(keep_attrs)
rolling_count = self._counts(keep_attrs=keep_attrs)
enough_periods = rolling_count >= self.min_periods
return rolling_count.where(enough_periods)
count.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name="count")
def _mapping_to_list(
self,
arg: _T | Mapping[Any, _T],
default: _T | None = None,
allow_default: bool = True,
allow_allsame: bool = True,
) -> list[_T]:
if utils.is_dict_like(arg):
if allow_default:
return [arg.get(d, default) for d in self.dim]
for d in self.dim:
if d not in arg:
raise KeyError(f"Argument has no dimension key {d}.")
return [arg[d] for d in self.dim]
if allow_allsame: # for single argument
return [arg] * self.ndim # type: ignore[list-item] # no check for negatives
if self.ndim == 1:
return [arg] # type: ignore[list-item] # no check for negatives
raise ValueError(f"Mapping argument is necessary for {self.ndim}d-rolling.")
def _get_keep_attrs(self, keep_attrs):
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
return keep_attrs
|
Rolling
|
python
|
ray-project__ray
|
python/ray/_private/thirdparty/pyamdsmi/pyamdsmi.py
|
{
"start": 4379,
"end": 4817
}
|
class ____(c_int):
RSMI_MEM_TYPE_FIRST = 0
RSMI_MEM_TYPE_VRAM = RSMI_MEM_TYPE_FIRST
RSMI_MEM_TYPE_VIS_VRAM = 1
RSMI_MEM_TYPE_GTT = 2
RSMI_MEM_TYPE_LAST = RSMI_MEM_TYPE_GTT
# memory_type_l includes names for with rsmi_memory_type_t
# Usage example to get corresponding names:
# memory_type_l[rsmi_memory_type_t.RSMI_MEM_TYPE_VRAM] will return string 'vram'
memory_type_l = ['VRAM', 'VIS_VRAM', 'GTT']
|
rsmi_memory_type_t
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-recharge/unit_tests/integration/request_builder.py
|
{
"start": 404,
"end": 2036
}
|
class ____:
@classmethod
def get_endpoint(cls, endpoint: str) -> RequestBuilder:
return cls(endpoint=endpoint)
def __init__(self, endpoint: str) -> None:
self._endpoint: str = endpoint
self._query_params: MutableMapping[str, Any] = {}
self._headers: MutableMapping[str, str] = {"X-Recharge-Version": "2021-11"}
def with_limit(self, limit: int) -> RequestBuilder:
self._query_params["limit"] = limit
return self
def with_updated_at_min(self, value: str) -> RequestBuilder:
self._query_params["updated_at_min"] = dt.datetime.strptime(value, DATE_TIME_FORMAT).strftime(DATE_TIME_FORMAT)
self._query_params["sort_by"] = "updated_at-asc"
return self
def with_next_page_token(self, next_page_token: str) -> RequestBuilder:
self._query_params["cursor"] = next_page_token
return self
def with_access_token(self, access_token: str) -> RequestBuilder:
self._headers["X-Recharge-Access-Token"] = access_token
return self
def with_old_api_version(self, api_version: str) -> RequestBuilder:
self._headers["X-Recharge-Version"] = api_version
return self
def with_created_min(self, value: str) -> RequestBuilder:
self._query_params["created_at_min"] = dt.datetime.strptime(value, DATE_TIME_FORMAT).strftime(DATE_TIME_FORMAT)
return self
def build(self) -> HttpRequest:
return HttpRequest(
url=f"https://api.rechargeapps.com/{self._endpoint}",
query_params=self._query_params,
headers=self._headers,
)
|
RequestBuilder
|
python
|
kamyu104__LeetCode-Solutions
|
Python/lexicographically-minimum-string-after-removing-stars.py
|
{
"start": 67,
"end": 589
}
|
class ____(object):
def clearStars(self, s):
"""
:type s: str
:rtype: str
"""
result = list(s)
lookup = [[] for _ in range(26)]
for i, x in enumerate(s):
if x != '*':
lookup[ord(x)-ord('a')].append(i)
continue
for stk in lookup:
if not stk:
continue
result[stk.pop()] = '*'
break
return "".join(x for x in result if x != '*')
|
Solution
|
python
|
pdm-project__pdm
|
src/pdm/pytest.py
|
{
"start": 7170,
"end": 8006
}
|
class ____:
"""A mock Distribution"""
def __init__(
self,
key: str,
version: str,
editable: bool = False,
metadata: Metadata | None = None,
):
self.version = version
self.link_file = "editable" if editable else None
self.dependencies: list[str] = []
self._metadata = {"Name": key, "Version": version}
if metadata:
self._metadata.update(metadata)
self.name = key
@property
def metadata(self) -> Metadata:
return Metadata(self._metadata)
def as_req(self) -> Requirement:
return parse_requirement(f"{self.name}=={self.version}")
@property
def requires(self) -> list[str]:
return self.dependencies
def read_text(self, path: Path | str) -> None:
return None
|
Distribution
|
python
|
scipy__scipy
|
scipy/fft/_pocketfft/tests/test_real_transforms.py
|
{
"start": 7398,
"end": 13323
}
|
class ____:
def test_definition(self, rdt, type, fftwdata_size,
reference_data, ref_lock):
with ref_lock:
x, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt, reference_data)
y = dct(x, type=type)
assert_equal(y.dtype, dt)
dec = dec_map[(dct, rdt, type)]
assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec))
@pytest.mark.parametrize('size', [7, 8, 9, 16, 32, 64])
def test_axis(self, rdt, type, size):
nt = 2
dec = dec_map[(dct, rdt, type)]
x = np.random.randn(nt, size)
y = dct(x, type=type)
for j in range(nt):
assert_array_almost_equal(y[j], dct(x[j], type=type),
decimal=dec)
x = x.T
y = dct(x, axis=0, type=type)
for j in range(nt):
assert_array_almost_equal(y[:,j], dct(x[:,j], type=type),
decimal=dec)
@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int])
def test_dct1_definition_ortho(rdt, mdata_x):
# Test orthornomal mode.
dec = dec_map[(dct, rdt, 1)]
x = np.array(mdata_x, dtype=rdt)
dt = np.result_type(np.float32, rdt)
y = dct(x, norm='ortho', type=1)
y2 = naive_dct1(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))
@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int])
def test_dct2_definition_matlab(mdata_xy, rdt):
# Test correspondence with matlab (orthornomal mode).
dt = np.result_type(np.float32, rdt)
x = np.array(mdata_xy[0], dtype=dt)
yr = mdata_xy[1]
y = dct(x, norm="ortho", type=2)
dec = dec_map[(dct, rdt, 2)]
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, yr, decimal=dec)
@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int])
def test_dct3_definition_ortho(mdata_x, rdt):
# Test orthornomal mode.
x = np.array(mdata_x, dtype=rdt)
dt = np.result_type(np.float32, rdt)
y = dct(x, norm='ortho', type=2)
xi = dct(y, norm="ortho", type=3)
dec = dec_map[(dct, rdt, 3)]
assert_equal(xi.dtype, dt)
assert_array_almost_equal(xi, x, decimal=dec)
@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int])
def test_dct4_definition_ortho(mdata_x, rdt):
# Test orthornomal mode.
x = np.array(mdata_x, dtype=rdt)
dt = np.result_type(np.float32, rdt)
y = dct(x, norm='ortho', type=4)
y2 = naive_dct4(x, norm='ortho')
dec = dec_map[(dct, rdt, 4)]
assert_equal(y.dtype, dt)
assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))
@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int])
@pytest.mark.parametrize('type', [1, 2, 3, 4])
def test_idct_definition(fftwdata_size, rdt, type, reference_data, ref_lock):
with ref_lock:
xr, yr, dt = fftw_dct_ref(type, fftwdata_size, rdt, reference_data)
x = idct(yr, type=type)
dec = dec_map[(idct, rdt, type)]
assert_equal(x.dtype, dt)
assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec))
@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int])
@pytest.mark.parametrize('type', [1, 2, 3, 4])
def test_definition(fftwdata_size, rdt, type, reference_data, ref_lock):
with ref_lock:
xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt, reference_data)
y = dst(xr, type=type)
dec = dec_map[(dst, rdt, type)]
assert_equal(y.dtype, dt)
assert_allclose(y, yr, rtol=0., atol=np.max(yr)*10**(-dec))
@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int])
def test_dst1_definition_ortho(rdt, mdata_x):
# Test orthornomal mode.
dec = dec_map[(dst, rdt, 1)]
x = np.array(mdata_x, dtype=rdt)
dt = np.result_type(np.float32, rdt)
y = dst(x, norm='ortho', type=1)
y2 = naive_dst1(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_allclose(y, y2, rtol=0., atol=np.max(y2)*10**(-dec))
@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int])
def test_dst4_definition_ortho(rdt, mdata_x):
# Test orthornomal mode.
dec = dec_map[(dst, rdt, 4)]
x = np.array(mdata_x, dtype=rdt)
dt = np.result_type(np.float32, rdt)
y = dst(x, norm='ortho', type=4)
y2 = naive_dst4(x, norm='ortho')
assert_equal(y.dtype, dt)
assert_array_almost_equal(y, y2, decimal=dec)
@pytest.mark.parametrize('rdt', [np.longdouble, np.float64, np.float32, int])
@pytest.mark.parametrize('type', [1, 2, 3, 4])
def test_idst_definition(fftwdata_size, rdt, type, reference_data, ref_lock):
with ref_lock:
xr, yr, dt = fftw_dst_ref(type, fftwdata_size, rdt, reference_data)
x = idst(yr, type=type)
dec = dec_map[(idst, rdt, type)]
assert_equal(x.dtype, dt)
assert_allclose(x, xr, rtol=0., atol=np.max(xr)*10**(-dec))
@pytest.mark.parametrize('routine', [dct, dst, idct, idst])
@pytest.mark.parametrize('dtype', [np.float32, np.float64, np.longdouble])
@pytest.mark.parametrize('shape, axis', [
((16,), -1), ((16, 2), 0), ((2, 16), 1)
])
@pytest.mark.parametrize('type', [1, 2, 3, 4])
@pytest.mark.parametrize('overwrite_x', [True, False])
@pytest.mark.parametrize('norm', [None, 'ortho'])
def test_overwrite(routine, dtype, shape, axis, type, norm, overwrite_x):
# Check input overwrite behavior
np.random.seed(1234)
if np.issubdtype(dtype, np.complexfloating):
x = np.random.randn(*shape) + 1j*np.random.randn(*shape)
else:
x = np.random.randn(*shape)
x = x.astype(dtype)
x2 = x.copy()
routine(x2, type, None, axis, norm, overwrite_x=overwrite_x)
sig = (f"{routine.__name__}({x.dtype}{x.shape!r}, {None!r}, axis={axis!r}, "
f"overwrite_x={overwrite_x!r})")
if not overwrite_x:
assert_equal(x2, x, err_msg=f"spurious overwrite in {sig}")
|
TestDCT
|
python
|
kamyu104__LeetCode-Solutions
|
Python/minimize-deviation-in-array.py
|
{
"start": 68,
"end": 642
}
|
class ____(object):
def minimumDeviation(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
max_heap = [-num*2 if num%2 else -num for num in nums]
heapq.heapify(max_heap)
min_elem = -max(max_heap)
result = float("inf")
while len(max_heap) == len(nums):
num = -heapq.heappop(max_heap)
result = min(result, num-min_elem)
if not num%2:
min_elem = min(min_elem, num//2)
heapq.heappush(max_heap, -num//2)
return result
|
Solution
|
python
|
astropy__astropy
|
astropy/extern/_strptime.py
|
{
"start": 1314,
"end": 8071
}
|
class ____(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError("locale changed during initialization")
if time.tzname != self.tzname or time.daylight != self.daylight:
raise ValueError("timezone changed during initialization")
def __pad(self, seq, front):
# Add '' to seq to either the front (is True), else the back.
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
# Set self.a_weekday and self.f_weekday using the calendar
# module.
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
# Set self.f_month and self.a_month using the calendar module.
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
# Set self.am_pm by using time.strftime().
# The magic date (1999,3,17,hour,44,55,2,76,0) is not really that
# magical; just happened to have used it everywhere else where a
# static date was needed.
am_pm = []
for hour in (1, 22):
time_tuple = time.struct_time((1999,3,17,hour,44,55,2,76,0))
am_pm.append(time.strftime("%p", time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
# Set self.date_time, self.date, & self.time by using
# time.strftime().
# Use (1999,3,17,22,44,55,2,76,0) for magic date because the amount of
# overloaded numbers is minimized. The order in which searches for
# values within the format string is very important; it eliminates
# possible ambiguity for what something represents.
time_tuple = time.struct_time((1999,3,17,22,44,55,2,76,0))
date_time = [None, None, None]
date_time[0] = time.strftime("%c", time_tuple).lower()
date_time[1] = time.strftime("%x", time_tuple).lower()
date_time[2] = time.strftime("%X", time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'),
(self.f_month[3], '%B'), (self.a_weekday[2], '%a'),
(self.a_month[3], '%b'), (self.am_pm[1], '%p'),
('1999', '%Y'), ('99', '%y'), ('22', '%H'),
('44', '%M'), ('55', '%S'), ('76', '%j'),
('17', '%d'), ('03', '%m'), ('3', '%m'),
# '3' needed for when no leading zero.
('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, "%Z") for tz_values in self.timezone
for tz in tz_values])
for offset,directive in ((0,'%c'), (1,'%x'), (2,'%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
# Must deal with possible lack of locale info
# manifesting itself as the empty string (e.g., Swedish's
# lack of AM/PM info) or a platform returning a tuple of empty
# strings (e.g., MacOS 9 having timezone as ('','')).
if old:
current_format = current_format.replace(old, new)
# If %W is used, then Sunday, 2005-01-03 will fall on week 0 since
# 2005-01-03 occurs before the first Monday of the year. Otherwise
# %U is used.
time_tuple = time.struct_time((1999,1,3,1,1,1,6,3,0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
# Set self.timezone by using time.tzname.
# Do not worry about possibility of time.tzname[0] == time.tzname[1]
# and time.daylight; handle that in strptime.
try:
time.tzset()
except AttributeError:
pass
self.tzname = time.tzname
self.daylight = time.daylight
no_saving = frozenset({"utc", "gmt", self.tzname[0].lower()})
if self.daylight:
has_saving = frozenset({self.tzname[1].lower()})
else:
has_saving = frozenset()
self.timezone = (no_saving, has_saving)
|
LocaleTime
|
python
|
google__jax
|
jax/_src/core.py
|
{
"start": 66265,
"end": 66321
}
|
class ____:
pass
@dataclass(frozen=True)
|
QuasiDynamicData
|
python
|
Pylons__pyramid
|
tests/test_config/test_assets.py
|
{
"start": 34658,
"end": 35303
}
|
class ____(unittest.TestCase):
def _getTargetClass(self):
from pyramid.config.assets import FileOverride
return FileOverride
def _makeOne(self, path, source):
klass = self._getTargetClass()
return klass(path, source)
def test_it_match(self):
source = DummyAssetSource()
o = self._makeOne('foo.pt', source)
result = o('foo.pt')
self.assertEqual(result, (source, ''))
def test_it_no_match(self):
source = DummyAssetSource()
o = self._makeOne('foo.pt', source)
result = o('notfound.pt')
self.assertEqual(result, None)
|
TestFileOverride
|
python
|
coleifer__peewee
|
tests/fields.py
|
{
"start": 25475,
"end": 25528
}
|
class ____(TestModel):
data = BlobField()
|
BlobModel
|
python
|
kamyu104__LeetCode-Solutions
|
Python/parallel-courses-ii.py
|
{
"start": 76,
"end": 1246
}
|
class ____(object):
def minNumberOfSemesters(self, n, dependencies, k):
"""
:type n: int
:type dependencies: List[List[int]]
:type k: int
:rtype: int
"""
reqs = [0]*n
for u, v in dependencies:
reqs[v-1] |= 1 << (u-1)
dp = [n]*(1<<n)
dp[0] = 0
for mask in xrange(1<<n):
candidates = []
for v in xrange(n):
if (mask&(1<<v)) == 0 and (mask&reqs[v]) == reqs[v]:
candidates.append(v)
for choice in itertools.combinations(candidates, min(len(candidates), k)):
new_mask = mask
for v in choice:
new_mask |= 1<<v
dp[new_mask] = min(dp[new_mask], dp[mask]+1)
return dp[-1]
# Time: O(nlogn + e), e is the number of edges in graph
# Space: O(n + e)
import collections
import heapq
# wrong greedy solution
# since the priority of courses are hard to decide especially for those courses with zero indegrees are of the same outdegrees and depths
# e.x.
# 9
# [[1,4],[1,5],[3,5],[3,6],[2,6],[2,7],[8,4],[8,5],[9,6],[9,7]]
# 3
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/api/endpoints/artifact_bundles.py
|
{
"start": 1357,
"end": 1926
}
|
class ____:
@classmethod
def derive_order_by(cls, sort_by: str) -> str | None:
is_desc = sort_by.startswith("-")
sort_by = sort_by.strip("-")
order_by = ORDER_BY_FIELDS_MAPPING.get(sort_by)
if order_by is not None:
return f"-{order_by}" if is_desc else order_by
raise InvalidSortByParameter
@classmethod
def is_valid_uuid(cls, value):
try:
uuid.UUID(str(value))
return True
except ValueError:
return False
@region_silo_endpoint
|
ArtifactBundlesMixin
|
python
|
scipy__scipy
|
scipy/special/tests/test_cdflib.py
|
{
"start": 607,
"end": 1144
}
|
class ____:
"""Generate a set of probabilities on [0, 1]."""
def __init__(self):
# Include the endpoints for compatibility with Arg et. al.
self.a = 0
self.b = 1
def values(self, n):
"""Return an array containing approximately n numbers."""
m = max(1, n//3)
v1 = np.logspace(-30, np.log10(0.3), m)
v2 = np.linspace(0.3, 0.7, m + 1, endpoint=False)[1:]
v3 = 1 - np.logspace(np.log10(0.3), -15, m)
v = np.r_[v1, v2, v3]
return np.unique(v)
|
ProbArg
|
python
|
astropy__astropy
|
astropy/wcs/wcsapi/tests/test_high_level_api.py
|
{
"start": 770,
"end": 1986
}
|
class ____(DoubleLowLevelWCS, HighLevelWCSMixin):
"""
This example WCS has two of the world coordinates that use the same class,
which triggers a different path in the high level WCS code.
"""
@property
def pixel_n_dim(self):
return 2
@property
def world_n_dim(self):
return 2
@property
def world_axis_physical_types(self):
return ["pos.eq.ra", "pos.eq.dec"]
@property
def world_axis_units(self):
return ["deg", "deg"]
@property
def world_axis_object_components(self):
return [("test1", 0, "value"), ("test2", 0, "value")]
@property
def world_axis_object_classes(self):
return {
"test1": (Quantity, (), {"unit": "deg"}),
"test2": (Quantity, (), {"unit": "deg"}),
}
def test_simple_duplicate():
# Make sure that things work properly when the low-level WCS uses the same
# class for two of the coordinates.
wcs = SimpleDuplicateWCS()
q1, q2 = wcs.pixel_to_world(1, 2)
assert isinstance(q1, Quantity)
assert isinstance(q2, Quantity)
x, y = wcs.world_to_pixel(q1, q2)
assert_allclose(x, 1)
assert_allclose(y, 2)
|
SimpleDuplicateWCS
|
python
|
pola-rs__polars
|
py-polars/src/polars/io/partition.py
|
{
"start": 16524,
"end": 17067
}
|
class ____:
"""
Holds parsed directory sink options.
For internal use.
"""
base_path: str
file_path_provider: (
Callable[[KeyedPartitionContext], Path | str | IO[bytes] | IO[str]] | None
)
partition_by: list[PyExpr] | None
partition_keys_sorted: bool | None
include_keys: bool | None
per_partition_sort_by: list[PyExpr] | None
per_file_sort_by: list[PyExpr] | None
max_rows_per_file: int | None
finish_callback: Callable[[PyDataFrame], None] | None
@dataclass
|
_SinkDirectoryInner
|
python
|
redis__redis-py
|
redis/maint_notifications.py
|
{
"start": 3267,
"end": 5794
}
|
class ____(MaintenanceNotification):
"""
This notification is received when a node is replaced with a new node
during cluster rebalancing or maintenance operations.
"""
def __init__(
self,
id: int,
new_node_host: Optional[str],
new_node_port: Optional[int],
ttl: int,
):
"""
Initialize a new NodeMovingNotification.
Args:
id (int): Unique identifier for this notification
new_node_host (str): Hostname or IP address of the new replacement node
new_node_port (int): Port number of the new replacement node
ttl (int): Time-to-live in seconds for this notification
"""
super().__init__(id, ttl)
self.new_node_host = new_node_host
self.new_node_port = new_node_port
def __repr__(self) -> str:
expiry_time = self.expire_at
remaining = max(0, expiry_time - time.monotonic())
return (
f"{self.__class__.__name__}("
f"id={self.id}, "
f"new_node_host='{self.new_node_host}', "
f"new_node_port={self.new_node_port}, "
f"ttl={self.ttl}, "
f"creation_time={self.creation_time}, "
f"expires_at={expiry_time}, "
f"remaining={remaining:.1f}s, "
f"expired={self.is_expired()}"
f")"
)
def __eq__(self, other) -> bool:
"""
Two NodeMovingNotification notifications are considered equal if they have the same
id, new_node_host, and new_node_port.
"""
if not isinstance(other, NodeMovingNotification):
return False
return (
self.id == other.id
and self.new_node_host == other.new_node_host
and self.new_node_port == other.new_node_port
)
def __hash__(self) -> int:
"""
Return a hash value for the notification to allow
instances to be used in sets and as dictionary keys.
Returns:
int: Hash value based on notification type class name, id,
new_node_host and new_node_port
"""
try:
node_port = int(self.new_node_port) if self.new_node_port else None
except ValueError:
node_port = 0
return hash(
(
self.__class__.__name__,
int(self.id),
str(self.new_node_host),
node_port,
)
)
|
NodeMovingNotification
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/definitions/decorators/op_decorator.py
|
{
"start": 10770,
"end": 13431
}
|
class ____(NamedTuple):
"""Wrapper around the decorated op function to provide commonly used util methods."""
decorated_fn: Callable[..., Any]
@property
def name(self):
return self.decorated_fn.__name__
@lru_cache(maxsize=1)
def has_context_arg(self) -> bool:
return is_context_provided(get_function_params(self.decorated_fn))
def get_context_arg(self) -> Parameter:
if self.has_context_arg():
return get_function_params(self.decorated_fn)[0]
check.failed("Requested context arg on function that does not have one")
@lru_cache(maxsize=1)
def _get_function_params(self) -> Sequence[Parameter]:
return get_function_params(self.decorated_fn)
def has_config_arg(self) -> bool:
for param in get_function_params(self.decorated_fn):
if param.name == "config":
return True
return False
def validate_malformed_config(self) -> None:
from dagster._config.pythonic_config.config import Config
from dagster._config.pythonic_config.type_check_utils import safe_is_subclass
positional_inputs = self.positional_inputs()
for param in get_function_params(self.decorated_fn):
if safe_is_subclass(param.annotation, Config) and param.name in positional_inputs:
config_argument_warning(param.name, self.name)
def get_config_arg(self) -> Parameter:
for param in get_function_params(self.decorated_fn):
if param.name == "config":
return param
check.failed("Requested config arg on function that does not have one")
def get_resource_args(self) -> Sequence[Parameter]:
return get_resource_args(self.decorated_fn)
def positional_inputs(self) -> Sequence[str]:
params = self._get_function_params()
input_args = params[1:] if self.has_context_arg() else params
resource_arg_names = [arg.name for arg in self.get_resource_args()]
input_args_filtered = [
input_arg
for input_arg in input_args
if input_arg.name != "config" and input_arg.name not in resource_arg_names
]
return positional_arg_name_list(input_args_filtered)
def has_var_kwargs(self) -> bool:
params = self._get_function_params()
# var keyword arg has to be the last argument
return len(params) > 0 and param_is_var_keyword(params[-1])
def get_output_annotation(self) -> Any:
from dagster._core.definitions.inference import infer_output_props
return infer_output_props(self.decorated_fn).annotation
|
DecoratedOpFunction
|
python
|
spack__spack
|
lib/spack/spack/util/web.py
|
{
"start": 2435,
"end": 5668
}
|
class ____(HTTPSHandler):
"""A custom HTTPS handler that shows more detailed error messages on connection failure."""
def https_open(self, req):
try:
return super().https_open(req)
except HTTPError:
raise
except URLError as e:
raise DetailedURLError(req, e.reason) from e
def custom_ssl_certs() -> Optional[Tuple[bool, str]]:
"""Returns a tuple (is_file, path) if custom SSL certifates are configured and valid."""
ssl_certs = spack.config.get("config:ssl_certs")
if not ssl_certs:
return None
path = spack.util.path.substitute_path_variables(ssl_certs)
if not os.path.isabs(path):
tty.debug(f"certs: relative path not allowed: {path}")
return None
try:
st = os.stat(path)
except OSError as e:
tty.debug(f"certs: error checking path {path}: {e}")
return None
file_type = stat.S_IFMT(st.st_mode)
if file_type != stat.S_IFREG and file_type != stat.S_IFDIR:
tty.debug(f"certs: not a file or directory: {path}")
return None
return (file_type == stat.S_IFREG, path)
def ssl_create_default_context():
"""Create the default SSL context for urllib with custom certificates if configured."""
certs = custom_ssl_certs()
if certs is None:
return ssl.create_default_context()
is_file, path = certs
if is_file:
tty.debug(f"urllib: certs: using cafile {path}")
return ssl.create_default_context(cafile=path)
else:
tty.debug(f"urllib: certs: using capath {path}")
return ssl.create_default_context(capath=path)
def set_curl_env_for_ssl_certs(curl: Executable) -> None:
"""configure curl to use custom certs in a file at runtime. See:
https://curl.se/docs/sslcerts.html item 4"""
certs = custom_ssl_certs()
if certs is None:
return
is_file, path = certs
if not is_file:
tty.debug(f"curl: {path} is not a file: default certs will be used.")
return
tty.debug(f"curl: using CURL_CA_BUNDLE={path}")
curl.add_default_env("CURL_CA_BUNDLE", path)
def _urlopen():
s3 = UrllibS3Handler()
gcs = GCSHandler()
error_handler = SpackHTTPDefaultErrorHandler()
# One opener with HTTPS ssl enabled
with_ssl = build_opener(
s3, gcs, SpackHTTPSHandler(context=ssl_create_default_context()), error_handler
)
# One opener with HTTPS ssl disabled
without_ssl = build_opener(
s3, gcs, SpackHTTPSHandler(context=ssl._create_unverified_context()), error_handler
)
# And dynamically dispatch based on the config:verify_ssl.
def dispatch_open(fullurl, data=None, timeout=None):
opener = with_ssl if spack.config.get("config:verify_ssl", True) else without_ssl
timeout = timeout or spack.config.get("config:connect_timeout", 10)
return opener.open(fullurl, data, timeout)
return dispatch_open
#: Dispatches to the correct OpenerDirector.open, based on Spack configuration.
urlopen = lang.Singleton(_urlopen)
#: User-Agent used in Request objects
SPACK_USER_AGENT = "Spackbot/{0}".format(spack.spack_version)
# Also, HTMLParseError is deprecated and never raised.
|
SpackHTTPSHandler
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/test_cli.py
|
{
"start": 37946,
"end": 42639
}
|
class ____(BoringModel):
def __init__(
self,
optimizer: OptimizerCallable = torch.optim.Adam,
scheduler: LRSchedulerCallable = torch.optim.lr_scheduler.ConstantLR,
activation: torch.nn.Module = lazy_instance(torch.nn.LeakyReLU, negative_slope=0.05),
):
super().__init__()
self.save_hyperparameters()
self.optimizer = optimizer
self.scheduler = scheduler
self.activation = activation
def configure_optimizers(self):
optimizer = self.optimizer(self.parameters())
scheduler = self.scheduler(optimizer)
return {"optimizer": optimizer, "lr_scheduler": scheduler}
def test_lightning_cli_load_from_checkpoint_dependency_injection(cleandir):
with mock.patch("sys.argv", ["any.py", "--trainer.max_epochs=1"]):
cli = LightningCLI(TestModelSaveHparams, run=False, auto_configure_optimizers=False)
cli.trainer.fit(cli.model)
hparams_path = Path(cli.trainer.log_dir) / "hparams.yaml"
assert hparams_path.is_file()
hparams = yaml.safe_load(hparams_path.read_text())
expected_keys = ["_instantiator", "activation", "optimizer", "scheduler"]
expected_instantiator = "lightning.pytorch.cli.instantiate_module"
expected_activation = "torch.nn.LeakyReLU"
expected_optimizer = "torch.optim.Adam"
expected_scheduler = "torch.optim.lr_scheduler.ConstantLR"
assert sorted(hparams.keys()) == expected_keys
assert hparams["_instantiator"] == expected_instantiator
assert hparams["activation"]["class_path"] == expected_activation
assert hparams["optimizer"] == expected_optimizer or hparams["optimizer"]["class_path"] == expected_optimizer
assert hparams["scheduler"] == expected_scheduler or hparams["scheduler"]["class_path"] == expected_scheduler
checkpoint_path = next(Path(cli.trainer.log_dir, "checkpoints").glob("*.ckpt"), None)
assert checkpoint_path.is_file()
hparams = torch.load(checkpoint_path, weights_only=True)["hyper_parameters"]
assert sorted(hparams.keys()) == expected_keys
assert hparams["_instantiator"] == expected_instantiator
assert hparams["activation"]["class_path"] == expected_activation
assert hparams["optimizer"] == expected_optimizer or hparams["optimizer"]["class_path"] == expected_optimizer
assert hparams["scheduler"] == expected_scheduler or hparams["scheduler"]["class_path"] == expected_scheduler
model = TestModelSaveHparams.load_from_checkpoint(checkpoint_path)
assert isinstance(model, TestModelSaveHparams)
assert isinstance(model.activation, torch.nn.LeakyReLU)
assert model.activation.negative_slope == 0.05
optimizer, lr_scheduler = model.configure_optimizers().values()
assert isinstance(optimizer, torch.optim.Adam)
assert isinstance(lr_scheduler, torch.optim.lr_scheduler.ConstantLR)
def test_lightning_cli_load_from_checkpoint_dependency_injection_subclass_mode(cleandir):
with mock.patch("sys.argv", ["any.py", "--trainer.max_epochs=1", "--model=TestModelSaveHparams"]):
cli = LightningCLI(TestModelSaveHparams, run=False, auto_configure_optimizers=False, subclass_mode_model=True)
cli.trainer.fit(cli.model)
expected_keys = ["_class_path", "_instantiator", "activation", "optimizer", "scheduler"]
expected_instantiator = "lightning.pytorch.cli.instantiate_module"
expected_class_path = f"{__name__}.TestModelSaveHparams"
expected_activation = "torch.nn.LeakyReLU"
expected_optimizer = "torch.optim.Adam"
expected_scheduler = "torch.optim.lr_scheduler.ConstantLR"
checkpoint_path = next(Path(cli.trainer.log_dir, "checkpoints").glob("*.ckpt"), None)
assert checkpoint_path.is_file()
hparams = torch.load(checkpoint_path, weights_only=True)["hyper_parameters"]
assert sorted(hparams.keys()) == expected_keys
assert hparams["_instantiator"] == expected_instantiator
assert hparams["_class_path"] == expected_class_path
assert hparams["activation"]["class_path"] == expected_activation
assert hparams["optimizer"] == expected_optimizer or hparams["optimizer"]["class_path"] == expected_optimizer
assert hparams["scheduler"] == expected_scheduler or hparams["scheduler"]["class_path"] == expected_scheduler
model = LightningModule.load_from_checkpoint(checkpoint_path)
assert isinstance(model, TestModelSaveHparams)
assert isinstance(model.activation, torch.nn.LeakyReLU)
assert model.activation.negative_slope == 0.05
optimizer, lr_scheduler = model.configure_optimizers().values()
assert isinstance(optimizer, torch.optim.Adam)
assert isinstance(lr_scheduler, torch.optim.lr_scheduler.ConstantLR)
|
TestModelSaveHparams
|
python
|
ray-project__ray
|
python/ray/experimental/channel/torch_tensor_accelerator_channel.py
|
{
"start": 15266,
"end": 34455
}
|
class ____(ChannelInterface):
def __init__(
self,
writer: ray.actor.ActorHandle,
reader_and_node_list: List[Tuple["ray.actor.ActorHandle", str]],
typ: "TorchTensorType",
_meta_channel: Optional["Channel"] = None,
):
"""
A helper channel for TorchTensorAcceleratorChannel that is used to transfer
lists of torch.Tensors via accelerator. This class can only transfer
torch.Tensors and cannot transfer other CPU data, such as Exception
objects or tensors nested inside of a dictionary.
Args:
writer: The actor that may write to the channel. None signifies the driver.
reader_and_node_list: A list of tuples, where each tuple contains a reader
actor handle and the node ID where the actor is located.
typ: Type information about the values passed through the channel.
_meta_channel: A channel used to send metadata for the tensors,
i.e. shape and dtype. If not provided, and if the typ does not
specify a static shape and dtype, then a metadata channel based
on shared memory will be created.
"""
import torch
self.torch: ModuleType = torch
self._writer = writer
self._writer_rank: Optional[int] = None
self._reader_and_node_list = reader_and_node_list
self._reader_ranks: Optional[List[int]] = None
self._writer_registered: bool = False
self._reader_registered: bool = False
ctx = ChannelContext.get_current()
assert isinstance(
typ.communicator_id, str
), f"accelerator group ID ({typ.communicator_id}) must be a str."
self._typ = typ
self._static_shape = typ.static_shape
assert self._typ.communicator_id is not None, "No accelerator group specified."
self._accelerator_group_id: str = self._typ.communicator_id
# If the communicators does not contain the group_id, it means the current
# process is the driver, and thereβs no need to fetch the comm_group.
if self._typ.communicator_id in ctx.communicators:
self._accelerator_group: "Communicator" = ctx.communicators[
self._typ.communicator_id
]
assert (
self._accelerator_group is not None
), "ChannelContext.accelerator_group is not initialized."
self._writer_rank = self._accelerator_group.get_rank(self._writer)
self._reader_ranks = [
self._accelerator_group.get_rank(reader)
for reader, _ in self._reader_and_node_list
]
if (
self._writer_rank is not None
and self._writer_rank == self._accelerator_group.get_self_rank()
):
self._writer_registered = True
if (
self._reader_ranks
and self._accelerator_group.get_self_rank() in self._reader_ranks
):
self._reader_registered = True
# If the channel type specifies that the tensor shape is static, then the
# receiver can allocate buffers without needing to coordinate with the
# sender. We set the metadata on the first send-recv op. Thereafter,
# the sender must ensure that sent tensors match this metadata, and the
# receiver will allocate tensors with this shape.
self._static_tensor_metadata: Optional[List[_TorchTensorMetadata]] = None
self._meta_channel: Optional[Channel] = _meta_channel
if self._meta_channel is None and self._writer_registered:
# We are the writer. Therefore, we also need to allocate a metadata
# channel that will be used to send the shape and dtype of the
# tensor to the receiver(s).
metadata_type = SharedMemoryType()
self._meta_channel = metadata_type.create_channel(
self._writer,
self._reader_and_node_list,
None,
)
def ensure_registered_as_writer(self):
assert (
self._accelerator_group is not None
), "Actor is not part of an accelerator group"
assert self._writer_registered
ctx = ChannelContext.get_current()
assert ctx.torch_device.type != "cpu"
def ensure_registered_as_reader(self) -> bool:
assert (
self._accelerator_group is not None
), "Actor is not part of an accelerator group"
assert self._reader_registered
ctx = ChannelContext.get_current()
assert ctx.torch_device.type != "cpu"
def __reduce__(self):
return (
self.__class__,
(
self._writer,
self._reader_and_node_list,
self._typ,
self._meta_channel,
),
)
def _get_send_tensors_metadata(
self, tensors: List["torch.Tensor"]
) -> Optional[List[_TorchTensorMetadata]]:
"""
Helper method to get the metadata that should be sent to the reader so
that they can allocate the proper-sized buffer(s). Throws error if
static_shape=True was set and the given tensors do not match the
inferred shapes.
Returns: The metadata to send to the reader. None means that we should
not send any metadata message to the reader.
"""
ctx = ChannelContext.get_current()
# TODO(swang): Currently any exceptions thrown during this method are
# fatal for the DAG because there is no way for the receiver to receive
# the exception. This can be improved by sending the exception through
# the CPU-based non-tensor-data channel, if one exists. The tensor
# channel can send empty data alongside the exception to avoid hanging.
# Get the shape and dtype of each tensor to send.
metadata_list = []
for tensor in tensors:
# Basic type checking.
if not isinstance(tensor, self.torch.Tensor):
raise ValueError("Task must return torch.Tensors")
if tensor.device != ctx.torch_device:
raise ValueError(
f"torch.Tensor must be on the default device: {ctx.torch_device}"
)
metadata = _TorchTensorMetadata(tensor.shape, tensor.dtype)
metadata_list.append(metadata)
if self._static_tensor_metadata is not None:
if metadata_list != self._static_tensor_metadata:
metadata_str = [
f"(shape={m.shape}, dtype={m.dtype})" for m in metadata_list
]
expected_str = [
f"(shape={m.shape}, dtype={m.dtype})"
for m in self._static_tensor_metadata
]
raise ValueError(
"Expected torch.Tensors with shapes and dtypes: "
"[" + ", ".join(expected_str) + "], "
"found: [" + ", ".join(metadata_str) + "]. "
"DAG will shut down."
)
# The receiver has already determined the shape and dtype of the
# tensors from a previous send, so no need to send the metadata
# again.
return None
if self._static_shape:
# The shape and dtype is static. This is the first send op and
# afterwards, a ValueError will be thrown if the sent tensors do
# not match this metadata.
self._static_tensor_metadata = metadata_list
return metadata_list
def write(
self,
tensors: List["torch.Tensor"],
timeout: Optional[float] = None,
):
"""
Write a list of tensors via accelerator:
1) Send the tensor metadata, i.e. the shape and dtypes of all tensors
via the shared-memory metadata channel.
2) Send the tensor data via accelerator.
If static_shape=True was set, then we only perform step (1) on the
first message. The reader is expected to reuse the sent metadata for
subsequent messages.
"""
self.ensure_registered_as_writer()
import torch
for tensor in tensors:
assert isinstance(
tensor, torch.Tensor
), f"{tensor} must be instance of torch.Tensor"
# Send the tensors metadata so that the receiver knows what buffers to
# allocate.
metadata = self._get_send_tensors_metadata(tensors)
if metadata is not None:
self._meta_channel.write(metadata)
# NOTE(swang): We must send the metadata *before* launching the accelerator
# send. We are using blocking accelerator ops, so the following calls will
# block until the kernel has been enqueued. Also, peers must launch the
# kernel together before either can proceed. Therefore, we send the
# metadata first so that the receiver can read the metadata and then
# launch the same accelerator op.
for tensor in tensors:
# TODO: If there are multiple readers, can replace with a
# broadcast.
for rank in self._reader_ranks:
self._accelerator_group.send(tensor, rank)
def _get_recv_tensors_metadata(
self, timeout: Optional[float] = None
) -> List[_TorchTensorMetadata]:
"""
Get the shape(s) and dtype(s) of the tensors to receive from the
metadata channel. If static_shape=True was set, then we reuse the first
metadata received.
"""
if self._static_tensor_metadata is not None:
return self._static_tensor_metadata
meta = self._meta_channel.read(timeout)
if self._static_shape:
self._static_tensor_metadata = meta
return meta
def read(
self,
timeout: Optional[float] = None,
) -> Union["torch.Tensor", List["torch.Tensor"]]:
"""
Receive a list of tensors.
(1) Receive the tensor metadata via the shared-memory metadata channel.
(2) Allocate buffers on our default device according to the received
tensor metadata.
(3) Receive the tensor data via accelerator.
If static_data=True was set, then we only perform step (1) on the first
message. Subsequent messages reuse the same metadata.
NOTE: Currently `timeout` only applies to receiving the CPU-based
tensor metadata. The GPU recv may exceed the timeout without throwing
an error.
"""
self.ensure_registered_as_reader()
meta_list: List[_TorchTensorMetadata] = self._get_recv_tensors_metadata(timeout)
bufs: List["torch.Tensor"] = []
for meta in meta_list:
buf = self._accelerator_group.recv(
meta.shape, meta.dtype, self._writer_rank, _torch_tensor_allocator
)
bufs.append(buf)
# TODO: Sync CUDA stream after receiving all tensors, instead of after
# each tensor.
return bufs
def close(self) -> None:
self._meta_channel.close()
self._accelerator_group.destroy()
ctx = ChannelContext.get_current()
if self._accelerator_group_id in ctx.communicators:
del ctx.communicators[self._accelerator_group_id]
def _do_init_communicator(
self,
group_id,
world_size,
comm_id,
rank,
actor_handles,
use_communication_streams,
custom_communicator: Optional[Communicator] = None,
):
if not custom_communicator:
assert (
AcceleratorContext.get().accelerator_count > 0
), "Actors participating in Communication group must have at least one Accelerator assigned"
ctx = ChannelContext.get_current()
if custom_communicator is not None:
custom_communicator.initialize(rank)
ctx.communicators[group_id] = custom_communicator
else:
# default to CommGroup
ctx.communicators[group_id] = AcceleratorContext.get().create_communicator(
world_size,
comm_id,
rank,
actor_handles,
AcceleratorContext.get().current_stream(),
use_communication_streams,
)
def _do_destroy_communicator(self, group_id):
ctx = ChannelContext.get_current()
if group_id not in ctx.communicators:
return
ctx.communicators[group_id].destroy()
# Keep the communicator group in the map after destruction in case there is
# still a task loop running.
def _do_check_has_accelerators(self) -> str:
return AcceleratorContext.get().accelerator_count > 0
def do_register_accelerator_context(self, name: str, communicator: Type[Communicator]):
register_accelerator_context(name, communicator)
def _do_get_unique_communication_id(self) -> bool:
return AcceleratorContext.get().generate_communicator_id()
def _get_ranks(
actors: List[ray.actor.ActorHandle], custom_comm_group: Optional[Communicator]
) -> List[int]:
"""
Get ranks for the communicator group to use. If custom_comm_group is specified,
return the ranks of the actors in the custom communicator group, in the same
order of the actors; otherwise, return list(range(len(actors))).
Args:
actors: A list of actors that participate in the communicator group.
custom_comm_group: The custom communicator group to use.
"""
if custom_comm_group is None:
return list(range(len(actors)))
assert len(actors) == custom_comm_group.get_world_size(), (
"The world size of the custom communicator group does not match the "
"number of actors."
)
ranks = []
for actor in actors:
rank = custom_comm_group.get_rank(actor)
assert rank not in ranks, "Duplicate rank in custom communicator group"
ranks.append(rank)
assert custom_comm_group.get_world_size() == len(actors), (
"The world size of the custom communicator group "
f"({custom_comm_group.get_world_size()}) "
"does not match the number of actors "
f"({len(actors)})."
)
return ranks
def _init_communicator(
actors: List[ray.actor.ActorHandle],
custom_communicator: Optional[Communicator] = None,
use_communication_streams: bool = False,
accelerator_module_name: Optional[str] = None,
accelerator_communicator_cls: Optional[Type[Communicator]] = None,
) -> str:
"""
Initialize a communicator group with the given actors. If a custom communicator
group is provided, then it will be used, otherwise a new communicator group
will be created.
Args:
actors: A list of actors that participate in the communicator group.
custom_communicator: A custom communicator group to initialize.
use_communication_streams: Whether to use dedicated send and recv
streams for communication. If True, communication and computation
can be overlapped to improve performance.
accelerator_module_name: Optional name of the accelerator module to use.
accelerator_communicator_cls: Optional communicator class for the accelerator.
"""
ctx = ChannelContext.get_current()
is_cpu_communicator = custom_communicator and isinstance(
custom_communicator, CPUCommunicator
)
# Register accelerator context for all actors if accelerator is not default
if accelerator_module_name and accelerator_communicator_cls:
if is_accelerator_context_registered():
ray.get(
[
actor.__ray_call__.remote(
do_register_accelerator_context,
accelerator_module_name,
accelerator_communicator_cls,
)
for actor in actors
]
)
has_accelerators = ray.get(
[actor.__ray_call__.remote(_do_check_has_accelerators) for actor in actors]
)
for has_accelerator, actor in zip(has_accelerators, actors):
if not has_accelerator and not is_cpu_communicator:
raise ValueError(
f"Actor {actor} returns a tensor with type hint "
'TorchTensor(transport="accelerator") or '
"TorchTensor(transport=accelerator_group_handle) "
"but actor does not have an accelerator assigned by Ray."
)
actor_ids = {actor._ray_actor_id for actor in actors}
assert len(actor_ids) == len(actors), "Actors must be unique"
# Allocate a communicator ID on one of the actors that will participate in
# the group. This is in case the driver is not on the same node as one of
# the communicator actors.
comm_id = ray.get(actors[0].__ray_call__.remote(_do_get_unique_communication_id))
# Used to uniquely identify this communicator group.
group_id = str(uuid.uuid4())
if custom_communicator is not None:
logger.info(
f"Initializing custom communicator group {group_id} on actors: {actors}"
)
else:
logger.info(f"Creating communicator group {group_id} on actors: {actors}")
world_size = len(actors)
ranks = _get_ranks(actors, custom_communicator)
init_tasks = [
actor.__ray_call__.remote(
_do_init_communicator,
group_id,
world_size,
comm_id,
rank,
actors,
use_communication_streams,
custom_communicator,
)
for rank, actor in zip(ranks, actors)
]
try:
ray.get(init_tasks, timeout=30)
except ray.exceptions.GetTimeoutError:
logger.warning(
"Communicator group creation not done after 30s. communicator group"
"creation may be hung."
)
ray.get(init_tasks)
logger.info("Communicator group initialized.")
if custom_communicator is not None:
ctx.communicator_handles[group_id] = CommunicatorHandle(
actor_handles=custom_communicator.get_actor_handles(),
)
else:
ctx.communicator_handles[group_id] = CommunicatorHandle(
actor_handles=actors,
)
return group_id
def _destroy_communicator(group_id: str) -> None:
"""
Destroy the communicator group with the given ID.
"""
ctx = ChannelContext.get_current()
if group_id not in ctx.communicator_handles:
return
group = ctx.communicator_handles[group_id]
actors = group.get_actor_handles()
destroy_tasks = [
actor.__ray_call__.remote(
_do_destroy_communicator,
group_id,
)
for actor in actors
]
_, unready = ray.wait(destroy_tasks, timeout=30, num_returns=len(destroy_tasks))
if unready:
logger.warning(
"Communicator group destruction not done after 30s. Communicator"
"group destruction may be hung."
)
del ctx.communicator_handles[group_id]
|
_TorchTensorAcceleratorChannel
|
python
|
pytorch__pytorch
|
test/distributed/algorithms/ddp_comm_hooks/test_ddp_hooks.py
|
{
"start": 1334,
"end": 1527
}
|
class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x ** (1 + rank))
|
TestDdpCommHook
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/input_test.py
|
{
"start": 2811,
"end": 3708
}
|
class ____(test_lib.TestCase):
def testNoLimit(self):
with ops.Graph().as_default(), self.cached_session():
seven = constant_op.constant(7)
seven_forever = inp.limit_epochs(seven)
variables.local_variables_initializer().run()
for _ in range(100):
self.assertEqual(7, self.evaluate(seven_forever))
def testLimit(self):
with ops.Graph().as_default(), self.cached_session():
love_me = constant_op.constant("Love Me")
love_me_two_times = inp.limit_epochs(love_me, num_epochs=2)
self.evaluate(variables.global_variables_initializer())
variables.local_variables_initializer().run()
self.assertEqual(b"Love Me", self.evaluate(love_me_two_times))
self.assertEqual(b"Love Me", self.evaluate(love_me_two_times))
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(love_me_two_times)
|
LimitEpochsTest
|
python
|
huggingface__transformers
|
src/transformers/models/groupvit/modeling_groupvit.py
|
{
"start": 5656,
"end": 6240
}
|
class ____(nn.Module):
def __init__(self, config: GroupViTVisionConfig):
super().__init__()
self.attn = GroupViTAttention(config)
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = GroupViTMLP(config)
self.norm_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, query, key):
x = query
x = x + self.attn(query, encoder_hidden_states=key)[0]
x = x + self.mlp(self.norm2(x))
x = self.norm_post(x)
return x
|
GroupViTCrossAttentionLayer
|
python
|
Textualize__textual
|
src/textual/css/model.py
|
{
"start": 792,
"end": 2353
}
|
class ____(Enum):
"""Type of combinator."""
SAME = 1
"""Selector is combined with previous selector"""
DESCENDENT = 2
"""Selector is a descendant of the previous selector"""
CHILD = 3
"""Selector is an immediate child of the previous selector"""
def _check_universal(name: str, node: DOMNode) -> bool:
"""Check node matches universal selector.
Args:
name: Selector name.
node: A DOM node.
Returns:
`True` if the selector matches.
"""
return not node.has_class("-textual-system")
def _check_type(name: str, node: DOMNode) -> bool:
"""Check node matches a type selector.
Args:
name: Selector name.
node: A DOM node.
Returns:
`True` if the selector matches.
"""
return name in node._css_type_names
def _check_class(name: str, node: DOMNode) -> bool:
"""Check node matches a class selector.
Args:
name: Selector name.
node: A DOM node.
Returns:
`True` if the selector matches.
"""
return name in node._classes
def _check_id(name: str, node: DOMNode) -> bool:
"""Check node matches an ID selector.
Args:
name: Selector name.
node: A DOM node.
Returns:
`True` if the selector matches.
"""
return node.id == name
_CHECKS = {
SelectorType.UNIVERSAL: _check_universal,
SelectorType.TYPE: _check_type,
SelectorType.CLASS: _check_class,
SelectorType.ID: _check_id,
SelectorType.NESTED: _check_universal,
}
@dataclass
|
CombinatorType
|
python
|
kamyu104__LeetCode-Solutions
|
Python/interleaving-string.py
|
{
"start": 838,
"end": 1667
}
|
class ____(object):
# @return a boolean
def isInterleave(self, s1, s2, s3):
if len(s1) + len(s2) != len(s3):
return False
match = [[False for i in xrange(len(s2) + 1)] for j in xrange(len(s1) + 1)]
match[0][0] = True
for i in xrange(1, len(s1) + 1):
match[i][0] = match[i - 1][0] and s1[i - 1] == s3[i - 1]
for j in xrange(1, len(s2) + 1):
match[0][j] = match[0][j - 1] and s2[j - 1] == s3[j - 1]
for i in xrange(1, len(s1) + 1):
for j in xrange(1, len(s2) + 1):
match[i][j] = (match[i - 1][j] and s1[i - 1] == s3[i + j - 1]) \
or (match[i][j - 1] and s2[j - 1] == s3[i + j - 1])
return match[-1][-1]
# Time: O(m * n)
# Space: O(m * n)
# Recursive + Hash
|
Solution2
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/linalg/linalg_ops_test.py
|
{
"start": 15938,
"end": 16068
}
|
class ____(test.TestCase, _LUReconstruct):
use_static_shape = True
@test_util.run_all_in_graph_and_eager_modes
|
LUReconstructStatic
|
python
|
realpython__materials
|
python-contact-book/source_code_step_4/rpcontacts/model.py
|
{
"start": 160,
"end": 706
}
|
class ____:
def __init__(self):
self.model = self._createModel()
@staticmethod
def _createModel():
"""Create and set up the model."""
tableModel = QSqlTableModel()
tableModel.setTable("contacts")
tableModel.setEditStrategy(QSqlTableModel.OnFieldChange)
tableModel.select()
headers = ("ID", "Name", "Job", "Email")
for columnIndex, header in enumerate(headers):
tableModel.setHeaderData(columnIndex, Qt.Horizontal, header)
return tableModel
|
ContactsModel
|
python
|
huggingface__transformers
|
src/transformers/models/visual_bert/modeling_visual_bert.py
|
{
"start": 11985,
"end": 12645
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->VisualBert
|
VisualBertIntermediate
|
python
|
getsentry__sentry
|
src/sentry_plugins/github/webhooks/events/pull_request.py
|
{
"start": 322,
"end": 4200
}
|
class ____(Webhook):
# https://developer.github.com/v3/activity/events/types/#pullrequestevent
def __call__(self, event, organization):
# TODO(maxbittker) handle is_apps correctly (What does this comment mean?)
is_apps = "installation" in event
try:
repo = Repository.objects.get(
organization_id=organization.id,
provider="github_apps" if is_apps else "github",
external_id=str(event["repository"]["id"]),
)
except Repository.DoesNotExist:
raise Http404()
# We need to track GitHub's "full_name" which is the repository slug.
# This is needed to access the API since `external_id` isn't sufficient.
if repo.config.get("name") != event["repository"]["full_name"]:
repo.config["name"] = event["repository"]["full_name"]
repo.save()
pull_request = event["pull_request"]
number = pull_request["number"]
title = pull_request["title"]
body = pull_request["body"]
user = pull_request["user"]
# The value of the merge_commit_sha attribute changes depending on the
# state of the pull request. Before a pull request is merged, the
# merge_commit_sha attribute holds the SHA of the test merge commit.
# After a pull request is merged, the attribute changes depending on how
# the pull request was merged:
# - If the pull request was merged as a merge commit, the attribute
# represents the SHA of the merge commit.
# - If the pull request was merged via a squash, the attribute
# represents the SHA of the squashed commit on the base branch.
# - If the pull request was rebased, the attribute represents the commit
# that the base branch was updated to.
# https://developer.github.com/v3/pulls/#get-a-single-pull-request
merge_commit_sha = pull_request["merge_commit_sha"] if pull_request["merged"] else None
author_email = "{}@localhost".format(user["login"][:65])
try:
commit_author = CommitAuthor.objects.get(
external_id=get_external_id(user["login"]), organization_id=organization.id
)
author_email = commit_author.email
except CommitAuthor.DoesNotExist:
rpc_user = user_service.get_user_by_social_auth(
organization_id=organization.id,
provider="github",
uid=user["id"],
)
if rpc_user is not None:
author_email = rpc_user.email
try:
author = CommitAuthor.objects.get(
organization_id=organization.id, external_id=get_external_id(user["login"])
)
except CommitAuthor.DoesNotExist:
try:
author = CommitAuthor.objects.get(
organization_id=organization.id, email=author_email
)
except CommitAuthor.DoesNotExist:
author = CommitAuthor.objects.create(
organization_id=organization.id,
email=author_email,
external_id=get_external_id(user["login"]),
name=user["login"][:128],
)
author.preload_users()
try:
PullRequest.objects.update_or_create(
organization_id=organization.id,
repository_id=repo.id,
key=number,
defaults={
"organization_id": organization.id,
"title": title,
"author": author,
"message": body,
"merge_commit_sha": merge_commit_sha,
},
)
except IntegrityError:
pass
|
PullRequestEventWebhook
|
python
|
PyCQA__isort
|
tests/unit/test_exceptions.py
|
{
"start": 359,
"end": 645
}
|
class ____(TestISortError):
def setup_class(self):
self.instance: exceptions.ExistingSyntaxErrors = exceptions.ExistingSyntaxErrors(
"file_path"
)
def test_variables(self):
assert self.instance.file_path == "file_path"
|
TestExistingSyntaxErrors
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/primitives/asymmetric/ec.py
|
{
"start": 8195,
"end": 8437
}
|
class ____(EllipticCurve):
name = "secp521r1"
key_size = 521
group_order = 0x1FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFA51868783BF2F966B7FCC0148F709A5D03BB5C9B8899C47AEBB6FB71E91386409 # noqa: E501
|
SECP521R1
|
python
|
django__django
|
django/contrib/auth/hashers.py
|
{
"start": 10669,
"end": 12813
}
|
class ____(BasePasswordHasher):
"""
Secure password hashing using the PBKDF2 algorithm (recommended)
Configured to use PBKDF2 + HMAC + SHA256.
The result is a 64 byte binary string. Iterations may be changed
safely but you must rename the algorithm if you change SHA256.
"""
algorithm = "pbkdf2_sha256"
iterations = 1_500_000
digest = hashlib.sha256
def encode(self, password, salt, iterations=None):
self._check_encode_args(password, salt)
iterations = iterations or self.iterations
password = force_str(password)
salt = force_str(salt)
hash = pbkdf2(password, salt, iterations, digest=self.digest)
hash = base64.b64encode(hash).decode("ascii").strip()
return "%s$%d$%s$%s" % (self.algorithm, iterations, salt, hash)
def decode(self, encoded):
algorithm, iterations, salt, hash = encoded.split("$", 3)
assert algorithm == self.algorithm
return {
"algorithm": algorithm,
"hash": hash,
"iterations": int(iterations),
"salt": salt,
}
def verify(self, password, encoded):
decoded = self.decode(encoded)
encoded_2 = self.encode(password, decoded["salt"], decoded["iterations"])
return constant_time_compare(encoded, encoded_2)
def safe_summary(self, encoded):
decoded = self.decode(encoded)
return {
_("algorithm"): decoded["algorithm"],
_("iterations"): decoded["iterations"],
_("salt"): mask_hash(decoded["salt"]),
_("hash"): mask_hash(decoded["hash"]),
}
def must_update(self, encoded):
decoded = self.decode(encoded)
update_salt = must_update_salt(decoded["salt"], self.salt_entropy)
return (decoded["iterations"] != self.iterations) or update_salt
def harden_runtime(self, password, encoded):
decoded = self.decode(encoded)
extra_iterations = self.iterations - decoded["iterations"]
if extra_iterations > 0:
self.encode(password, decoded["salt"], extra_iterations)
|
PBKDF2PasswordHasher
|
python
|
Textualize__textual
|
src/textual/await_complete.py
|
{
"start": 376,
"end": 2598
}
|
class ____:
"""An 'optionally-awaitable' object which runs one or more coroutines (or other awaitables) concurrently."""
def __init__(
self, *awaitables: Awaitable, pre_await: CallbackType | None = None
) -> None:
"""Create an AwaitComplete.
Args:
awaitables: One or more awaitables to run concurrently.
"""
self._awaitables = awaitables
self._future: Future[Any] = gather(*awaitables)
self._pre_await: CallbackType | None = pre_await
self._caller = get_caller_file_and_line()
def __rich_repr__(self) -> rich.repr.Result:
yield self._awaitables
yield "pre_await", self._pre_await, None
yield "caller", self._caller, None
def set_pre_await_callback(self, pre_await: CallbackType | None) -> None:
"""Set a callback to run prior to awaiting.
This is used by Textual, mainly to check for possible deadlocks.
You are unlikely to need to call this method in an app.
Args:
pre_await: A callback.
"""
self._pre_await = pre_await
def call_next(self, node: MessagePump) -> Self:
"""Await after the next message.
Args:
node: The node which created the object.
"""
node.call_next(self)
return self
async def __call__(self) -> Any:
return await self
def __await__(self) -> Generator[Any, None, Any]:
_rich_traceback_omit = True
if self._pre_await is not None:
self._pre_await()
return self._future.__await__()
@property
def is_done(self) -> bool:
"""`True` if the task has completed."""
return self._future.done()
@property
def exception(self) -> BaseException | None:
"""An exception if the awaitables failed."""
if self._future.done():
return self._future.exception()
return None
@classmethod
def nothing(cls):
"""Returns an already completed instance of AwaitComplete."""
instance = cls()
instance._future = Future()
instance._future.set_result(None) # Mark it as completed with no result
return instance
|
AwaitComplete
|
python
|
allegroai__clearml
|
clearml/backend_api/services/v2_20/models.py
|
{
"start": 4285,
"end": 18903
}
|
class ____(NonStrictDataModel):
"""
:param id: Model id
:type id: str
:param name: Model name
:type name: str
:param user: Associated user id
:type user: str
:param company: Company id
:type company: str
:param created: Model creation time
:type created: datetime.datetime
:param last_update: Model last update time
:type last_update: datetime.datetime
:param task: Task ID of task in which the model was created
:type task: str
:param parent: Parent model ID
:type parent: str
:param project: Associated project ID
:type project: str
:param comment: Model comment
:type comment: str
:param tags: User-defined tags
:type tags: Sequence[str]
:param system_tags: System tags. This field is reserved for system use, please
don't use it.
:type system_tags: Sequence[str]
:param framework: Framework on which the model is based. Should be identical to
the framework of the task which created the model
:type framework: str
:param design: Json object representing the model design. Should be identical
to the network design of the task which created the model
:type design: dict
:param labels: Json object representing the ids of the labels in the model. The
keys are the layers' names and the values are the ids.
:type labels: dict
:param uri: URI for the model, pointing to the destination storage.
:type uri: str
:param ready: Indication if the model is final and can be used by other tasks
:type ready: bool
:param ui_cache: UI cache for this model
:type ui_cache: dict
:param metadata: Model metadata
:type metadata: dict
:param stats: Model statistics
:type stats: dict
"""
_schema = {
"properties": {
"comment": {"description": "Model comment", "type": ["string", "null"]},
"company": {"description": "Company id", "type": ["string", "null"]},
"created": {
"description": "Model creation time",
"format": "date-time",
"type": ["string", "null"],
},
"design": {
"additionalProperties": True,
"description": "Json object representing the model design. Should be identical to the network design of the task which created the model",
"type": ["object", "null"],
},
"framework": {
"description": "Framework on which the model is based. Should be identical to the framework of the task which created the model",
"type": ["string", "null"],
},
"id": {"description": "Model id", "type": ["string", "null"]},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model. The keys are the layers' names and the values are the ids.",
"type": ["object", "null"],
},
"last_update": {
"description": "Model last update time",
"format": "date-time",
"type": ["string", "null"],
},
"metadata": {
"additionalProperties": {"$ref": "#/definitions/metadata_item"},
"description": "Model metadata",
"type": ["object", "null"],
},
"name": {"description": "Model name", "type": ["string", "null"]},
"parent": {"description": "Parent model ID", "type": ["string", "null"]},
"project": {
"description": "Associated project ID",
"type": ["string", "null"],
},
"ready": {
"description": "Indication if the model is final and can be used by other tasks",
"type": ["boolean", "null"],
},
"stats": {
"description": "Model statistics",
"properties": {
"labels_count": {
"description": "Number of the model labels",
"type": "integer",
}
},
"type": ["object", "null"],
},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task ID of task in which the model was created",
"type": ["string", "null"],
},
"ui_cache": {
"additionalProperties": True,
"description": "UI cache for this model",
"type": ["object", "null"],
},
"uri": {
"description": "URI for the model, pointing to the destination storage.",
"type": ["string", "null"],
},
"user": {"description": "Associated user id", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(
self,
id: Optional[str] = None,
name: Optional[str] = None,
user: Optional[str] = None,
company: Optional[str] = None,
created: Optional[str] = None,
last_update: Optional[str] = None,
task: Optional[str] = None,
parent: Optional[str] = None,
project: Optional[str] = None,
comment: Optional[str] = None,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
framework: Optional[str] = None,
design: Optional[dict] = None,
labels: Optional[dict] = None,
uri: Optional[str] = None,
ready: Optional[bool] = None,
ui_cache: Optional[dict] = None,
metadata: Optional[dict] = None,
stats: Optional[dict] = None,
**kwargs: Any
) -> None:
super(Model, self).__init__(**kwargs)
self.id = id
self.name = name
self.user = user
self.company = company
self.created = created
self.last_update = last_update
self.task = task
self.parent = parent
self.project = project
self.comment = comment
self.tags = tags
self.system_tags = system_tags
self.framework = framework
self.design = design
self.labels = labels
self.uri = uri
self.ready = ready
self.ui_cache = ui_cache
self.metadata = metadata
self.stats = stats
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self) -> Optional[str]:
return self._property_name
@name.setter
def name(self, value: Optional[str]) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("user")
def user(self) -> Optional[str]:
return self._property_user
@user.setter
def user(self, value: Optional[str]) -> None:
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property("company")
def company(self) -> Optional[str]:
return self._property_company
@company.setter
def company(self, value: Optional[str]) -> None:
if value is None:
self._property_company = None
return
self.assert_isinstance(value, "company", six.string_types)
self._property_company = value
@schema_property("created")
def created(self) -> Optional[str]:
return self._property_created
@created.setter
def created(self, value: Optional[str]) -> None:
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property("last_update")
def last_update(self) -> Optional[str]:
return self._property_last_update
@last_update.setter
def last_update(self, value: Optional[str]) -> None:
if value is None:
self._property_last_update = None
return
self.assert_isinstance(value, "last_update", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_update = value
@schema_property("task")
def task(self) -> Optional[str]:
return self._property_task
@task.setter
def task(self, value: Optional[str]) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("framework")
def framework(self) -> Optional[str]:
return self._property_framework
@framework.setter
def framework(self, value: Optional[str]) -> None:
if value is None:
self._property_framework = None
return
self.assert_isinstance(value, "framework", six.string_types)
self._property_framework = value
@schema_property("design")
def design(self) -> Optional[dict]:
return self._property_design
@design.setter
def design(self, value: Optional[dict]) -> None:
if value is None:
self._property_design = None
return
self.assert_isinstance(value, "design", (dict,))
self._property_design = value
@schema_property("labels")
def labels(self) -> Optional[dict]:
return self._property_labels
@labels.setter
def labels(self, value: Optional[dict]) -> None:
if value is None:
self._property_labels = None
return
self.assert_isinstance(value, "labels", (dict,))
self._property_labels = value
@schema_property("uri")
def uri(self) -> Optional[str]:
return self._property_uri
@uri.setter
def uri(self, value: Optional[str]) -> None:
if value is None:
self._property_uri = None
return
self.assert_isinstance(value, "uri", six.string_types)
self._property_uri = value
@schema_property("ready")
def ready(self) -> Optional[bool]:
return self._property_ready
@ready.setter
def ready(self, value: Optional[bool]) -> None:
if value is None:
self._property_ready = None
return
self.assert_isinstance(value, "ready", (bool,))
self._property_ready = value
@schema_property("ui_cache")
def ui_cache(self) -> Optional[dict]:
return self._property_ui_cache
@ui_cache.setter
def ui_cache(self, value: Optional[dict]) -> None:
if value is None:
self._property_ui_cache = None
return
self.assert_isinstance(value, "ui_cache", (dict,))
self._property_ui_cache = value
@schema_property("metadata")
def metadata(self) -> Optional[dict]:
return self._property_metadata
@metadata.setter
def metadata(self, value: Optional[dict]) -> None:
if value is None:
self._property_metadata = None
return
self.assert_isinstance(value, "metadata", (dict,))
self._property_metadata = value
@schema_property("stats")
def stats(self) -> Optional[dict]:
return self._property_stats
@stats.setter
def stats(self, value: Optional[dict]) -> None:
if value is None:
self._property_stats = None
return
self.assert_isinstance(value, "stats", (dict,))
self._property_stats = value
|
Model
|
python
|
walkccc__LeetCode
|
solutions/2429. Minimize XOR/2429.py
|
{
"start": 0,
"end": 624
}
|
class ____:
def minimizeXor(self, num1: int, num2: int) -> int:
MAX_BIT = 30
bits = num2.bit_count()
# Can turn off all the bits in `num1`.
if num1.bit_count() == bits:
return num1
ans = 0
# Turn off the MSB if we have `bits` quota.
for i in reversed(range(MAX_BIT)):
if num1 >> i & 1:
ans |= 1 << i
bits -= 1
if bits == 0:
return ans
# Turn on the LSB if we still have `bits`.
for i in range(MAX_BIT):
if (num1 >> i & 1) == 0:
ans |= 1 << i
bits -= 1
if bits == 0:
return ans
return ans
|
Solution
|
python
|
pypa__pipenv
|
pipenv/patched/pip/_internal/network/lazy_wheel.py
|
{
"start": 749,
"end": 1588
}
|
class ____(Exception):
pass
def dist_from_wheel_url(name: str, url: str, session: PipSession) -> BaseDistribution:
"""Return a distribution object from the given wheel URL.
This uses HTTP range requests to only fetch the portion of the wheel
containing metadata, just enough for the object to be constructed.
If such requests are not supported, HTTPRangeRequestUnsupported
is raised.
"""
with LazyZipOverHTTP(url, session) as zf:
# For read-only ZIP files, ZipFile only needs methods read,
# seek, seekable and tell, not the whole IO protocol.
wheel = MemoryWheel(zf.name, zf) # type: ignore
# After context manager exit, wheel.name
# is an invalid file by intention.
return get_wheel_distribution(wheel, canonicalize_name(name))
|
HTTPRangeRequestUnsupported
|
python
|
fastapi__sqlmodel
|
docs_src/tutorial/offset_and_limit/tutorial002.py
|
{
"start": 100,
"end": 1628
}
|
class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero).offset(3).limit(3)
results = session.exec(statement)
heroes = results.all()
print(heroes)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
|
Hero
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_node_config_status.py
|
{
"start": 383,
"end": 7722
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'active': 'V1NodeConfigSource',
'assigned': 'V1NodeConfigSource',
'error': 'str',
'last_known_good': 'V1NodeConfigSource'
}
attribute_map = {
'active': 'active',
'assigned': 'assigned',
'error': 'error',
'last_known_good': 'lastKnownGood'
}
def __init__(self, active=None, assigned=None, error=None, last_known_good=None, local_vars_configuration=None): # noqa: E501
"""V1NodeConfigStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._active = None
self._assigned = None
self._error = None
self._last_known_good = None
self.discriminator = None
if active is not None:
self.active = active
if assigned is not None:
self.assigned = assigned
if error is not None:
self.error = error
if last_known_good is not None:
self.last_known_good = last_known_good
@property
def active(self):
"""Gets the active of this V1NodeConfigStatus. # noqa: E501
:return: The active of this V1NodeConfigStatus. # noqa: E501
:rtype: V1NodeConfigSource
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this V1NodeConfigStatus.
:param active: The active of this V1NodeConfigStatus. # noqa: E501
:type: V1NodeConfigSource
"""
self._active = active
@property
def assigned(self):
"""Gets the assigned of this V1NodeConfigStatus. # noqa: E501
:return: The assigned of this V1NodeConfigStatus. # noqa: E501
:rtype: V1NodeConfigSource
"""
return self._assigned
@assigned.setter
def assigned(self, assigned):
"""Sets the assigned of this V1NodeConfigStatus.
:param assigned: The assigned of this V1NodeConfigStatus. # noqa: E501
:type: V1NodeConfigSource
"""
self._assigned = assigned
@property
def error(self):
"""Gets the error of this V1NodeConfigStatus. # noqa: E501
Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions. # noqa: E501
:return: The error of this V1NodeConfigStatus. # noqa: E501
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this V1NodeConfigStatus.
Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions. # noqa: E501
:param error: The error of this V1NodeConfigStatus. # noqa: E501
:type: str
"""
self._error = error
@property
def last_known_good(self):
"""Gets the last_known_good of this V1NodeConfigStatus. # noqa: E501
:return: The last_known_good of this V1NodeConfigStatus. # noqa: E501
:rtype: V1NodeConfigSource
"""
return self._last_known_good
@last_known_good.setter
def last_known_good(self, last_known_good):
"""Sets the last_known_good of this V1NodeConfigStatus.
:param last_known_good: The last_known_good of this V1NodeConfigStatus. # noqa: E501
:type: V1NodeConfigSource
"""
self._last_known_good = last_known_good
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeConfigStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NodeConfigStatus):
return True
return self.to_dict() != other.to_dict()
|
V1NodeConfigStatus
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_vertex_ai.py
|
{
"start": 107961,
"end": 109012
}
|
class ____:
@mock.patch(VERTEX_AI_PATH.format("model_service.ModelServiceHook"))
def test_execute(self, mock_hook):
op = ExportModelOperator(
task_id=TASK_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
region=GCP_LOCATION,
project_id=GCP_PROJECT,
model_id=TEST_MODEL_ID,
output_config=TEST_OUTPUT_CONFIG,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
op.execute(context={"ti": mock.MagicMock(), "task": mock.MagicMock()})
mock_hook.assert_called_once_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN)
mock_hook.return_value.export_model.assert_called_once_with(
region=GCP_LOCATION,
project_id=GCP_PROJECT,
model=TEST_MODEL_ID,
output_config=TEST_OUTPUT_CONFIG,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
|
TestVertexAIExportModelOperator
|
python
|
google__pytype
|
pytype/rewrite/vm_test.py
|
{
"start": 400,
"end": 1865
}
|
class ____(unittest.TestCase):
def test_run_module_frame(self):
block = [
opcodes.LOAD_CONST(0, 0, 0, 0, 0, 0, None),
opcodes.RETURN_VALUE(0, 0, 0, 0, 0),
]
code = test_utils.FakeOrderedCode([block], [None])
vm = vm_lib.VirtualMachine(context.Context(src=''), code.Seal(), {})
self.assertIsNone(vm._module_frame)
vm._run_module()
self.assertIsNotNone(vm._module_frame)
def test_globals(self):
vm = _make_vm("""
x = 42
def f():
global y
y = None
def g():
global z
z = x
g()
f()
""")
vm._run_module()
def get_const(val):
return cast(abstract.PythonConstant, val).constant
x = get_const(vm._module_frame.final_locals['x'])
y = get_const(vm._module_frame.final_locals['y'])
z = get_const(vm._module_frame.final_locals['z'])
self.assertEqual(x, 42)
self.assertIsNone(y)
self.assertEqual(z, 42)
def test_propagate_nonlocal(self):
vm = _make_vm("""
def f():
x = None
def g():
def h():
nonlocal x
x = 5
h()
g()
global y
y = x
f()
""")
vm._run_module()
with self.assertRaises(KeyError):
_ = vm._module_frame.final_locals['x']
y = cast(abstract.PythonConstant, vm._module_frame.final_locals['y'])
self.assertEqual(y.constant, 5)
if __name__ == '__main__':
unittest.main()
|
VmTest
|
python
|
getsentry__sentry
|
src/sentry/core/endpoints/project_team_details.py
|
{
"start": 1175,
"end": 4706
}
|
class ____(ProjectEndpoint):
publish_status = {
"DELETE": ApiPublishStatus.PUBLIC,
"POST": ApiPublishStatus.PUBLIC,
}
owner = ApiOwner.ENTERPRISE
permission_classes = (ProjectTeamsPermission,)
def convert_args(
self,
request: Request,
organization_id_or_slug: int | str,
project_id_or_slug: int | str,
team_id_or_slug: int | str,
*args,
**kwargs,
):
(args, kwargs) = super().convert_args(
request, organization_id_or_slug, project_id_or_slug, *args, **kwargs
)
project = kwargs["project"]
try:
team = Team.objects.get(
organization__slug__id_or_slug=project.organization.slug,
slug__id_or_slug=team_id_or_slug,
)
except Team.DoesNotExist:
raise ResourceDoesNotExist(detail="Team does not exist.")
kwargs["team"] = team
return (args, kwargs)
@extend_schema(
operation_id="Add a Team to a Project",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
GlobalParams.TEAM_ID_OR_SLUG,
],
request=None,
responses={
201: ProjectWithTeamSerializer,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=ProjectExamples.ADD_TEAM_TO_PROJECT,
)
def post(self, request: Request, project, team: Team) -> Response:
"""
Give a team access to a project.
"""
# A user with project:write can grant access to this project to other user/teams
project.add_team(team)
self.create_audit_entry(
request=self.request,
organization_id=project.organization_id,
target_object=project.id,
event=audit_log.get_event_id("PROJECT_TEAM_ADD"),
data={"team_slug": team.slug, "project_slug": project.slug},
)
return Response(
serialize(
project, request.user, ProjectWithTeamSerializer(collapse=["unusedFeatures"])
),
status=201,
)
@extend_schema(
operation_id="Delete a Team from a Project",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
GlobalParams.TEAM_ID_OR_SLUG,
],
responses={
200: ProjectWithTeamSerializer,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=ProjectExamples.DELETE_TEAM_FROM_PROJECT,
)
def delete(self, request: Request, project, team: Team) -> Response:
"""
Revoke a team's access to a project.
Note that Team Admins can only revoke access to teams they are admins of.
"""
if not request.access.has_team_scope(team, "project:write"):
return Response(
{"detail": ["You do not have permission to perform this action."]}, status=403
)
project.remove_team(team)
self.create_audit_entry(
request=self.request,
organization_id=project.organization_id,
target_object=project.id,
event=audit_log.get_event_id("PROJECT_TEAM_REMOVE"),
data={"team_slug": team.slug, "project_slug": project.slug},
)
return Response(serialize(project, request.user, ProjectWithTeamSerializer()), status=200)
|
ProjectTeamDetailsEndpoint
|
python
|
spack__spack
|
var/spack/test_repos/spack_repo/builtin_mock/packages/failing_empty_install/package.py
|
{
"start": 216,
"end": 566
}
|
class ____(Package):
"""This package installs nothing, install should fail."""
homepage = "http://www.example.com/trivial_install"
url = "http://www.unit-test-should-replace-this-url/trivial_install-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
def install(self, spec, prefix):
pass
|
FailingEmptyInstall
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 440584,
"end": 440810
}
|
class ____(VegaLiteSchema):
"""GeoJsonProperties schema wrapper."""
_schema = {"$ref": "#/definitions/GeoJsonProperties"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
|
GeoJsonProperties
|
python
|
sympy__sympy
|
sympy/physics/mechanics/pathway.py
|
{
"start": 17832,
"end": 26555
}
|
class ____(PathwayBase):
"""Pathway that wraps a geometry object.
Explanation
===========
A wrapping pathway interacts with a geometry object and forms a path that
wraps smoothly along its surface. The wrapping pathway along the geometry
object will be the geodesic that the geometry object defines based on the
two points. It will not interact with any other objects in the system, i.e.
a ``WrappingPathway`` will intersect other objects to ensure that the path
between its two ends (its attachments) is the shortest possible.
To explain the sign conventions used for pathway length, extension
velocity, and direction of applied forces, we can ignore the geometry with
which the wrapping pathway interacts. A wrapping pathway is made up of two
points that can move relative to each other, and a pair of equal and
opposite forces acting on the points. If the positive time-varying
Euclidean distance between the two points is defined, then the "extension
velocity" is the time derivative of this distance. The extension velocity
is positive when the two points are moving away from each other and
negative when moving closer to each other. The direction for the force
acting on either point is determined by constructing a unit vector directed
from the other point to this point. This establishes a sign convention such
that a positive force magnitude tends to push the points apart. The
following diagram shows the positive force sense and the distance between
the points::
P Q
o<--- F --->o
| |
|<--l(t)--->|
Examples
========
>>> from sympy.physics.mechanics import WrappingPathway
To construct a wrapping pathway, like other pathways, a pair of points must
be passed, followed by an instance of a wrapping geometry class as a
keyword argument. We'll use a cylinder with radius ``r`` and its axis
parallel to ``N.x`` passing through a point ``pO``.
>>> from sympy import symbols
>>> from sympy.physics.mechanics import Point, ReferenceFrame, WrappingCylinder
>>> r = symbols('r')
>>> N = ReferenceFrame('N')
>>> pA, pB, pO = Point('pA'), Point('pB'), Point('pO')
>>> cylinder = WrappingCylinder(r, pO, N.x)
>>> wrapping_pathway = WrappingPathway(pA, pB, cylinder)
>>> wrapping_pathway
WrappingPathway(pA, pB, geometry=WrappingCylinder(radius=r, point=pO,
axis=N.x))
Parameters
==========
attachment_1 : Point
First of the pair of ``Point`` objects between which the wrapping
pathway spans.
attachment_2 : Point
Second of the pair of ``Point`` objects between which the wrapping
pathway spans.
geometry : WrappingGeometryBase
Geometry about which the pathway wraps.
"""
def __init__(self, attachment_1, attachment_2, geometry):
"""Initializer for ``WrappingPathway``.
Parameters
==========
attachment_1 : Point
First of the pair of ``Point`` objects between which the wrapping
pathway spans.
attachment_2 : Point
Second of the pair of ``Point`` objects between which the wrapping
pathway spans.
geometry : WrappingGeometryBase
Geometry about which the pathway wraps.
The geometry about which the pathway wraps.
"""
super().__init__(attachment_1, attachment_2)
self.geometry = geometry
@property
def geometry(self):
"""Geometry around which the pathway wraps."""
return self._geometry
@geometry.setter
def geometry(self, geometry):
if hasattr(self, '_geometry'):
msg = (
f'Can\'t set attribute `geometry` to {repr(geometry)} as it '
f'is immutable.'
)
raise AttributeError(msg)
if not isinstance(geometry, WrappingGeometryBase):
msg = (
f'Value {repr(geometry)} passed to `geometry` was of type '
f'{type(geometry)}, must be {WrappingGeometryBase}.'
)
raise TypeError(msg)
self._geometry = geometry
@property
def length(self):
"""Exact analytical expression for the pathway's length."""
return self.geometry.geodesic_length(*self.attachments)
@property
def extension_velocity(self):
"""Exact analytical expression for the pathway's extension velocity."""
return self.length.diff(dynamicsymbols._t)
def to_loads(self, force):
"""Loads required by the equations of motion method classes.
Explanation
===========
``KanesMethod`` requires a list of ``Point``-``Vector`` tuples to be
passed to the ``loads`` parameters of its ``kanes_equations`` method
when constructing the equations of motion. This method acts as a
utility to produce the correctly-structred pairs of points and vectors
required so that these can be easily concatenated with other items in
the list of loads and passed to ``KanesMethod.kanes_equations``. These
loads are also in the correct form to also be passed to the other
equations of motion method classes, e.g. ``LagrangesMethod``.
Examples
========
The below example shows how to generate the loads produced in an
actuator that produces an expansile force ``F`` while wrapping around a
cylinder. First, create a cylinder with radius ``r`` and an axis
parallel to the ``N.z`` direction of the global frame ``N`` that also
passes through a point ``pO``.
>>> from sympy import symbols
>>> from sympy.physics.mechanics import (Point, ReferenceFrame,
... WrappingCylinder)
>>> N = ReferenceFrame('N')
>>> r = symbols('r', positive=True)
>>> pO = Point('pO')
>>> cylinder = WrappingCylinder(r, pO, N.z)
Create the pathway of the actuator using the ``WrappingPathway`` class,
defined to span between two points ``pA`` and ``pB``. Both points lie
on the surface of the cylinder and the location of ``pB`` is defined
relative to ``pA`` by the dynamics symbol ``q``.
>>> from sympy import cos, sin
>>> from sympy.physics.mechanics import WrappingPathway, dynamicsymbols
>>> q = dynamicsymbols('q')
>>> pA = Point('pA')
>>> pB = Point('pB')
>>> pA.set_pos(pO, r*N.x)
>>> pB.set_pos(pO, r*(cos(q)*N.x + sin(q)*N.y))
>>> pB.pos_from(pA)
(r*cos(q(t)) - r)*N.x + r*sin(q(t))*N.y
>>> pathway = WrappingPathway(pA, pB, cylinder)
Now create a symbol ``F`` to describe the magnitude of the (expansile)
force that will be produced along the pathway. The list of loads that
``KanesMethod`` requires can be produced by calling the pathway's
``to_loads`` method with ``F`` passed as the only argument.
>>> F = symbols('F')
>>> loads = pathway.to_loads(F)
>>> [load.__class__(load.location, load.vector.simplify()) for load in loads]
[(pA, F*N.y), (pB, F*sin(q(t))*N.x - F*cos(q(t))*N.y),
(pO, - F*sin(q(t))*N.x + F*(cos(q(t)) - 1)*N.y)]
Parameters
==========
force : Expr
Magnitude of the force acting along the length of the pathway. It
is assumed that this ``Expr`` represents an expansile force.
"""
pA, pB = self.attachments
pO = self.geometry.point
pA_force, pB_force = self.geometry.geodesic_end_vectors(pA, pB)
pO_force = -(pA_force + pB_force)
loads = [
Force(pA, force * pA_force),
Force(pB, force * pB_force),
Force(pO, force * pO_force),
]
return loads
def __repr__(self):
"""Representation of a ``WrappingPathway``."""
attachments = ', '.join(str(a) for a in self.attachments)
return (
f'{self.__class__.__name__}({attachments}, '
f'geometry={self.geometry})'
)
def _point_pair_relative_position(point_1, point_2):
"""The relative position between a pair of points."""
return point_2.pos_from(point_1)
def _point_pair_length(point_1, point_2):
"""The length of the direct linear path between two points."""
return _point_pair_relative_position(point_1, point_2).magnitude()
def _point_pair_extension_velocity(point_1, point_2):
"""The extension velocity of the direct linear path between two points."""
return _point_pair_length(point_1, point_2).diff(dynamicsymbols._t)
|
WrappingPathway
|
python
|
anthropics__anthropic-sdk-python
|
src/anthropic/lib/bedrock/_beta_messages.py
|
{
"start": 2749,
"end": 2966
}
|
class ____:
def __init__(self, messages: Messages) -> None:
self._messages = messages
self.create = to_streamed_response_wrapper(
messages.create,
)
|
MessagesWithStreamingResponse
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/triggers/redshift_cluster.py
|
{
"start": 2648,
"end": 4060
}
|
class ____(AwsBaseWaiterTrigger):
"""
Trigger for RedshiftPauseClusterOperator.
The trigger will asynchronously poll the boto3 API and wait for the
Redshift cluster to be in the `paused` state.
:param cluster_identifier: A unique identifier for the cluster.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param aws_conn_id: The Airflow connection used for AWS credentials.
"""
def __init__(
self,
cluster_identifier: str,
aws_conn_id: str | None = "aws_default",
waiter_delay: int = 15,
waiter_max_attempts: int = 999999,
):
super().__init__(
serialized_fields={"cluster_identifier": cluster_identifier},
waiter_name="cluster_paused",
waiter_args={"ClusterIdentifier": cluster_identifier},
failure_message="Error while pausing the redshift cluster",
status_message="Redshift cluster pausing in progress",
status_queries=["Clusters[].ClusterStatus"],
return_value=None,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return RedshiftHook(aws_conn_id=self.aws_conn_id)
|
RedshiftPauseClusterTrigger
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/apple/client.py
|
{
"start": 484,
"end": 537
}
|
class ____:
EMAIL = "email"
NAME = "name"
|
Scope
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 300771,
"end": 300948
}
|
class ____(VegaLiteSchema):
"""Cursor schema wrapper."""
_schema = {"$ref": "#/definitions/Cursor"}
def __init__(self, *args):
super().__init__(*args)
|
Cursor
|
python
|
graphql-python__graphene
|
graphene/relay/tests/test_global_id.py
|
{
"start": 342,
"end": 1566
}
|
class ____:
def __init__(self, parent_type):
self.parent_type = GrapheneObjectType(
graphene_type=parent_type,
name=parent_type._meta.name,
description=parent_type._meta.description,
fields=None,
is_type_of=parent_type.is_type_of,
interfaces=None,
)
def test_global_id_defaults_to_required_and_node():
gid = GlobalID()
assert isinstance(gid.type, NonNull)
assert gid.type.of_type == ID
assert gid.node == Node
def test_global_id_allows_overriding_of_node_and_required():
gid = GlobalID(node=CustomNode, required=False)
assert gid.type == ID
assert gid.node == CustomNode
def test_global_id_defaults_to_info_parent_type():
my_id = "1"
gid = GlobalID()
id_resolver = gid.wrap_resolve(lambda *_: my_id)
my_global_id = id_resolver(None, Info(User))
assert my_global_id == to_global_id(User._meta.name, my_id)
def test_global_id_allows_setting_customer_parent_type():
my_id = "1"
gid = GlobalID(parent_type=User)
id_resolver = gid.wrap_resolve(lambda *_: my_id)
my_global_id = id_resolver(None, None)
assert my_global_id == to_global_id(User._meta.name, my_id)
|
Info
|
python
|
run-llama__llama_index
|
llama-index-core/tests/tools/tool_spec/test_base.py
|
{
"start": 443,
"end": 5316
}
|
class ____(BaseToolSpec):
spec_functions: List[Union[str, Tuple[str, str]]] = [
"foo",
"bar",
"abc",
"abc_with_ctx",
"async_only_fn",
]
def foo(self, arg1: str, arg2: int) -> str:
"""Foo."""
return f"foo {arg1} {arg2}"
def bar(self, arg1: bool) -> str:
"""
Bar.
With extra.
"""
return f"bar {arg1}"
async def afoo(self, arg1: str, arg2: int) -> str:
"""Afoo."""
return self.foo(arg1=arg1, arg2=arg2)
async def abar(self, arg1: bool) -> str:
"""Abar."""
return self.bar(arg1=arg1)
async def async_only_fn(self) -> str:
"""Async only fn."""
return "async only fn"
def abc(self, arg1: str) -> str:
# NOTE: no docstring
return f"bar {arg1}"
def abc_with_ctx(self, arg1: str, ctx: Context) -> str:
return f"bar {arg1}"
def unused_function(self, arg1: str) -> str:
return f"unused {arg1}"
def test_tool_spec() -> None:
"""Test tool spec."""
tool_spec = TestToolSpec()
# first is foo, second is bar
tools = tool_spec.to_tool_list()
assert len(tools) == 5
assert tools[0].metadata.name == "foo"
assert tools[0].metadata.description == "foo(arg1: str, arg2: int) -> str\nFoo."
assert tools[0].fn("hello", 1) == "foo hello 1"
assert tools[0].ctx_param_name is None
assert not tools[0].requires_context
assert tools[1].metadata.name == "bar"
assert (
tools[1].metadata.description
== "bar(arg1: bool) -> str\n\n Bar.\n\n With extra."
)
assert str(tools[1](True)) == "bar True"
assert tools[1].ctx_param_name is None
assert not tools[1].requires_context
assert tools[2].metadata.name == "abc"
assert tools[2].metadata.description == "abc(arg1: str) -> str"
assert (
tools[2].metadata.fn_schema.model_json_schema()["properties"]
== AbcSchema.model_json_schema()["properties"]
)
assert tools[2].ctx_param_name is None
assert not tools[2].requires_context
assert tools[3].metadata.name == "abc_with_ctx"
assert tools[3].metadata.description == "abc_with_ctx(arg1: str) -> str"
assert (
tools[3].metadata.fn_schema.model_json_schema()["properties"]
== AbcSchema.model_json_schema()["properties"]
)
assert tools[3].ctx_param_name == "ctx"
assert tools[3].requires_context
# test metadata mapping
tools = tool_spec.to_tool_list(
func_to_metadata_mapping={
"foo": ToolMetadata(
"foo_description", name="foo_name", fn_schema=FooSchema
),
}
)
assert len(tools) == 5
assert tools[0].metadata.name == "foo_name"
assert tools[0].metadata.description == "foo_description"
assert tools[0].metadata.fn_schema is not None
fn_schema = tools[0].metadata.fn_schema.model_json_schema()
print(fn_schema)
assert fn_schema["properties"]["arg1"]["type"] == "string"
assert fn_schema["properties"]["arg2"]["type"] == "integer"
assert tools[1].metadata.name == "bar"
assert (
tools[1].metadata.description
== "bar(arg1: bool) -> str\n\n Bar.\n\n With extra."
)
assert tools[1].metadata.fn_schema is not None
fn_schema = tools[1].metadata.fn_schema.model_json_schema()
assert fn_schema["properties"]["arg1"]["type"] == "boolean"
@pytest.mark.asyncio
async def test_tool_spec_async() -> None:
"""Test async_fn of tool spec."""
tool_spec = TestToolSpec()
tools = tool_spec.to_tool_list()
assert len(tools) == 5
assert await tools[0].async_fn("hello", 1) == "foo hello 1"
assert str(await tools[1].acall(True)) == "bar True"
assert tools[0].fn("hello", 1) == "foo hello 1"
assert str(tools[1](True)) == "bar True"
def test_async_patching() -> None:
# test sync patching of async function
tool_spec = TestToolSpec()
tool_spec.spec_functions = ["afoo", "async_only_fn"]
tools = tool_spec.to_tool_list()
assert len(tools) == 2
assert tools[0].fn("hello", 1) == "foo hello 1"
assert tools[0].metadata.name == "afoo"
assert tools[0].metadata.description == "afoo(arg1: str, arg2: int) -> str\nAfoo."
assert tools[1].metadata.name == "async_only_fn"
assert tools[1].metadata.description == "async_only_fn() -> str\nAsync only fn."
def test_tool_spec_subset() -> None:
"""Test tool spec subset."""
tool_spec = TestToolSpec()
tools = tool_spec.to_tool_list(spec_functions=["abc"])
assert len(tools) == 1
assert tools[0].metadata.name == "abc"
assert tools[0].metadata.description == "abc(arg1: str) -> str"
assert (
tools[0].metadata.fn_schema.model_json_schema()["properties"]
== AbcSchema.model_json_schema()["properties"]
)
|
TestToolSpec
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/components/lib/metadata_checks_component.py
|
{
"start": 1553,
"end": 1939
}
|
class ____(Resolvable):
type: Literal["column_schema_change"]
severity: ResolvedAssetCheckSeverity = AssetCheckSeverity.WARN
def build_checks(self, key: AssetKey) -> Sequence[AssetChecksDefinition]:
return build_column_schema_change_checks(
assets=[key], **{k: v for k, v in as_dict(self).items() if k != "type"}
)
@record
|
ColumnSchemaChangeParams
|
python
|
numpy__numpy
|
tools/swig/test/testSuperTensor.py
|
{
"start": 15217,
"end": 15492
}
|
class ____(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "float"
self.typeCode = "f"
######################################################################
|
floatTestCase
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py
|
{
"start": 43772,
"end": 44484
}
|
class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, api_token: str, start_date: str):
"""Airbyte Source for Kustomer Singer.
Documentation can be found at https://docs.airbyte.com/integrations/sources/kustomer-singer
Args:
name (str): The name of the destination.
api_token (str): Kustomer API Token. See the docs on how to obtain this
start_date (str): The date from which you'd like to replicate the data
"""
self.api_token = check.str_param(api_token, "api_token")
self.start_date = check.str_param(start_date, "start_date")
super().__init__("Kustomer Singer", name)
|
KustomerSingerSource
|
python
|
PyCQA__pylint
|
tests/functional/m/member/member_checks.py
|
{
"start": 4764,
"end": 4917
}
|
class ____(enum.IntEnum):
BAR = 0
SOME_VALUE = Cls.BAZ # [no-member]
# Does not crash when inferring the `append` attribute on the slice object
|
Cls
|
python
|
great-expectations__great_expectations
|
contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_column_values_to_be_equal_to_or_less_than_profile_max.py
|
{
"start": 2774,
"end": 6925
}
|
class ____(ColumnMapExpectation):
"""Expect the column values to be less than or equal to the maximum value of the respective column within the DataProfiler report.
This function builds upon the custom column map expectations of Great Expectations. This function asks a yes/no question of each row in the user-specified column;
namely, is the value less than or equal to the maximum value of the respective column within the provided profile report generated from the DataProfiler.
Args:
column(str): The column that you want to check.
profile(dict(str, Any)): The report, which is assumed to contain a column of the same name, previously generated using the DataProfiler.
df.expect_column_values_to_be_equal_to_or_less_than_profile_max(
column,
profile
)
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
data = [
[-36, -25, -44],
[18, 45, 46],
[-16, -29, -49],
[21, 4, 35],
[-18, -7, -40],
[22, -4, -37],
[-17, -21, 11],
[48, -32, -48],
[0, -44, 20],
]
cols = ["col_a", "col_b", "col_c"]
df = pd.DataFrame(data, columns=cols)
profiler_opts = dp.ProfilerOptions()
profiler_opts.structured_options.multiprocess.is_enabled = False
profileObj = dp.Profiler(df, options=profiler_opts)
profileReport = profileObj.report(report_options={"output_format": "serializable"})
profileReport["global_stats"]["profile_schema"] = dict(
profileReport["global_stats"]["profile_schema"]
)
examples = [
{
"data": {
"col_a": [-3, 21, 20, 5],
"col_b": [-7, 54, -47, 12],
"col_c": [54, -10, 19, 19],
},
"tests": [
{
"title": "column_upper_bounded_by_max",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col_a",
"profile": profileReport,
},
"out": {"success": True},
},
{
"title": "column_has_value_greater_than_max",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "col_b",
"profile": profileReport,
},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.less_than_or_equal_to_profile_max"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = (
"profile",
"mostly",
)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {
"profile": None,
"result_format": "BASIC",
"catch_exceptions": False,
}
# This object contains metadata for display in the public Gallery
library_metadata = {
"requirements": ["dataprofiler", "tensorflow", "scikit-learn", "numpy"],
"maturity": "experimental", # "concept_only", "experimental", "beta", or "production"
"tags": ["dataprofiler"], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@stevensecreti", # Don't forget to add your github handle here!
],
}
if __name__ == "__main__":
# ExpectColumnValuesToBeEqualToOrGreaterThanProfileMin().print_diagnostic_checklist()
diagnostics_report = ExpectColumnValuesToBeEqualToOrLessThanProfileMax().run_diagnostics()
print(diagnostics_report.generate_checklist())
|
ExpectColumnValuesToBeEqualToOrLessThanProfileMax
|
python
|
crytic__slither
|
slither/core/expressions/elementary_type_name_expression.py
|
{
"start": 266,
"end": 707
}
|
class ____(Expression):
def __init__(self, t: ElementaryType) -> None:
assert isinstance(t, Type)
super().__init__()
self._type = t
@property
def type(self) -> Type:
return self._type
@type.setter
def type(self, new_type: Type):
assert isinstance(new_type, Type)
self._type = new_type
def __str__(self) -> str:
return str(self._type)
|
ElementaryTypeNameExpression
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/metadata.py
|
{
"start": 3575,
"end": 3778
}
|
class ____(graphene.ObjectType):
boolValue = graphene.Field(graphene.Boolean)
class Meta:
interfaces = (GrapheneMetadataEntry,)
name = "BoolMetadataEntry"
|
GrapheneBoolMetadataEntry
|
python
|
django__django
|
tests/backends/tests.py
|
{
"start": 6410,
"end": 7123
}
|
class ____(TestCase):
def test_bad_parameter_count(self):
"""
An executemany call with too many/not enough parameters will raise an
exception.
"""
with connection.cursor() as cursor:
query = "INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % (
connection.introspection.identifier_converter("backends_square"),
connection.ops.quote_name("root"),
connection.ops.quote_name("square"),
)
with self.assertRaises(Exception):
cursor.executemany(query, [(1, 2, 3)])
with self.assertRaises(Exception):
cursor.executemany(query, [(1,)])
|
ParameterHandlingTest
|
python
|
ray-project__ray
|
python/ray/util/client/common.py
|
{
"start": 31367,
"end": 35219
}
|
class ____:
"""
Cache for streaming RPCs, i.e. the DataServicer. Relies on explicit
ack's from the client to determine when it can clean up cache entries.
"""
def __init__(self):
self.last_received = 0
self.cv = threading.Condition()
self.cache: Dict[int, Any] = OrderedDict()
def check_cache(self, req_id: int) -> Optional[Any]:
"""
Check the cache for a given thread, and see if the entry in the cache
matches the current request_id. Returns None if the request_id has
not been seen yet, otherwise returns the cached result.
"""
with self.cv:
if _id_is_newer(self.last_received, req_id) or self.last_received == req_id:
# Request is for an id that has already been cleared from
# cache/acknowledged.
raise RuntimeError(
"Attempting to accesss a cache entry that has already "
"cleaned up. The client has already acknowledged "
f"receiving this response. ({req_id}, "
f"{self.last_received})"
)
if req_id in self.cache:
cached_resp = self.cache[req_id]
while cached_resp is None:
# The call was started, but the response hasn't yet been
# added to the cache. Let go of the lock and wait until
# the response is ready
self.cv.wait()
if req_id not in self.cache:
raise RuntimeError(
"Cache entry was removed. This likely means that "
"the result of this call is no longer needed."
)
cached_resp = self.cache[req_id]
return cached_resp
self.cache[req_id] = None
return None
def update_cache(self, req_id: int, resp: Any) -> None:
"""
Inserts `response` into the cache for `request_id`.
"""
with self.cv:
self.cv.notify_all()
if req_id not in self.cache:
raise RuntimeError(
"Attempting to update the cache, but placeholder is "
"missing. This might happen on a redundant call to "
f"update_cache. ({req_id})"
)
self.cache[req_id] = resp
def invalidate(self, e: Exception) -> bool:
"""
Invalidate any partially populated cache entries, replacing their
placeholders with the passed in exception. Useful to prevent a thread
from waiting indefinitely on a failed call.
Returns True if the cache contains an error, False otherwise
"""
with self.cv:
invalid = False
for req_id in self.cache:
if self.cache[req_id] is None:
self.cache[req_id] = e
if isinstance(self.cache[req_id], Exception):
invalid = True
self.cv.notify_all()
return invalid
def cleanup(self, last_received: int) -> None:
"""
Cleanup all of the cached requests up to last_received. Assumes that
the cache entries were inserted in ascending order.
"""
with self.cv:
if _id_is_newer(last_received, self.last_received):
self.last_received = last_received
to_remove = []
for req_id in self.cache:
if _id_is_newer(last_received, req_id) or last_received == req_id:
to_remove.append(req_id)
else:
break
for req_id in to_remove:
del self.cache[req_id]
self.cv.notify_all()
|
OrderedResponseCache
|
python
|
pytorch__pytorch
|
torch/fx/experimental/proxy_tensor.py
|
{
"start": 65309,
"end": 67891
}
|
class ____(fx.Interpreter):
def __init__(
self,
module: fx.GraphModule,
new_graph: fx.Graph,
decomposition_table: Optional[Mapping[OpOverload, Callable]] = None,
**kwargs: object,
) -> None:
super().__init__(module, **kwargs) # type: ignore[arg-type]
self.new_graph = new_graph
self.tracer = _GraphAppendingTracerEx(self.new_graph)
# Blegh
self.decomposition_table = decomposition_table or {}
self.mode = ProxyTorchDispatchMode(self.tracer, tracing_mode="real")
# pyrefly: ignore [bad-override]
def placeholder(
self,
target: str, # type: ignore[override]
args: tuple[object, ...],
kwargs: dict[str, object],
) -> object:
out = super().placeholder(target, args, kwargs) # type: ignore[arg-type]
proxy = fx.Proxy(self.new_graph.placeholder(target), self.tracer)
track_tensor_tree(out, proxy, constant=None, tracer=self.tracer)
# TODO handle case where the first character of target is '*'
return out
# pyrefly: ignore [bad-override]
def get_attr(
self,
target: str, # type: ignore[override]
args: tuple[object, ...],
kwargs: dict[str, object],
) -> object:
out = super().get_attr(target, args, kwargs) # type: ignore[arg-type]
proxy = fx.Proxy(self.new_graph.get_attr(target), self.tracer)
track_tensor_tree(out, proxy, constant=None, tracer=self.tracer)
return out
# call_function, call_method, call_module get traced automatically by the outer mode.
# pyrefly: ignore [bad-override]
def output(
self,
target: str, # type: ignore[override]
args: tuple[object, ...],
kwargs: dict[str, object],
) -> object:
out = super().output(target, args, kwargs) # type: ignore[arg-type]
def get_proxy_node(x: _ProxyTensor) -> fx.node.Node:
return x.proxy.node
def unwrap(e: Tensor) -> Union[Tensor, fx.Node]:
return get_proxy_slot(e, self.tracer, e, get_proxy_node)
self.new_graph.output(pytree.tree_map(unwrap, out))
return out
def run(self, *args: object, **kwargs: object) -> object:
# Should enter the mode at least once for being able to restore it later
# See: https://github.com/pytorch/pytorch/pull/82549#discussion_r934782025
with decompose(self.decomposition_table), self.mode:
return super().run(*args, **kwargs) # type: ignore[arg-type]
|
DecompositionInterpreter
|
python
|
pypa__pip
|
src/pip/_vendor/urllib3/exceptions.py
|
{
"start": 634,
"end": 964
}
|
class ____(PoolError):
"""Base exception for PoolErrors that have associated URLs."""
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
|
RequestError
|
python
|
celery__celery
|
t/unit/utils/test_functional.py
|
{
"start": 2253,
"end": 8567
}
|
class ____:
def test_list(self):
l = [1, 2]
r = regen(iter(l))
assert regen(l) is l
assert r == l
assert r == l # again
assert r.__length_hint__() == 0
fun, args = r.__reduce__()
assert fun(*args) == l
@pytest.fixture
def g(self):
return regen(iter(list(range(10))))
def test_gen(self, g):
assert g[7] == 7
assert g[6] == 6
assert g[5] == 5
assert g[4] == 4
assert g[3] == 3
assert g[2] == 2
assert g[1] == 1
assert g[0] == 0
assert g.data, list(range(10))
assert g[8] == 8
assert g[0] == 0
def test_gen__index_2(self, g):
assert g[0] == 0
assert g[1] == 1
assert g.data == list(range(10))
def test_gen__index_error(self, g):
assert g[0] == 0
with pytest.raises(IndexError):
g[11]
assert list(iter(g)) == list(range(10))
def test_gen__negative_index(self, g):
assert g[-1] == 9
assert g[-2] == 8
assert g[-3] == 7
assert g[-4] == 6
assert g[-5] == 5
assert g[5] == 5
assert g.data == list(range(10))
assert list(iter(g)) == list(range(10))
def test_nonzero__does_not_consume_more_than_first_item(self):
def build_generator():
yield 1
pytest.fail("generator should not consume past first item")
yield 2
g = regen(build_generator())
assert bool(g)
assert g[0] == 1
def test_nonzero__empty_iter(self):
assert not regen(iter([]))
def test_deque(self):
original_list = [42]
d = collections.deque(original_list)
# Confirm that concretising a `regen()` instance repeatedly for an
# equality check always returns the original list
g = regen(d)
assert g == original_list
assert g == original_list
def test_repr(self):
def die():
raise AssertionError("Generator died")
yield None
# Confirm that `regen()` instances are not concretised when represented
g = regen(die())
assert "..." in repr(g)
def test_partial_reconcretisation(self):
class WeirdIterator():
def __init__(self, iter_):
self.iter_ = iter_
self._errored = False
def __iter__(self):
yield from self.iter_
if not self._errored:
try:
# This should stop the regen instance from marking
# itself as being done
raise AssertionError("Iterator errored")
finally:
self._errored = True
original_list = list(range(42))
g = regen(WeirdIterator(original_list))
iter_g = iter(g)
for e in original_list:
assert e == next(iter_g)
with pytest.raises(AssertionError, match="Iterator errored"):
next(iter_g)
# The following checks are for the known "misbehaviour"
assert getattr(g, "_regen__done") is False
# If the `regen()` instance doesn't think it's done then it'll dupe the
# elements from the underlying iterator if it can be reused
iter_g = iter(g)
for e in original_list * 2:
assert next(iter_g) == e
with pytest.raises(StopIteration):
next(iter_g)
assert getattr(g, "_regen__done") is True
# Finally we xfail this test to keep track of it
raise pytest.xfail(reason="#6794")
def test_length_hint_passthrough(self, g):
assert g.__length_hint__() == 10
def test_getitem_repeated(self, g):
halfway_idx = g.__length_hint__() // 2
assert g[halfway_idx] == halfway_idx
# These are now concretised so they should be returned without any work
assert g[halfway_idx] == halfway_idx
for i in range(halfway_idx + 1):
assert g[i] == i
# This should only need to concretise one more element
assert g[halfway_idx + 1] == halfway_idx + 1
def test_done_does_not_lag(self, g):
"""
Don't allow regen to return from `__iter__()` and check `__done`.
"""
# The range we zip with here should ensure that the `regen.__iter__`
# call never gets to return since we never attempt a failing `next()`
len_g = g.__length_hint__()
for i, __ in zip(range(len_g), g):
assert getattr(g, "_regen__done") is (i == len_g - 1)
# Just for sanity, check against a specific `bool` here
assert getattr(g, "_regen__done") is True
def test_lookahead_consume(self, subtests):
"""
Confirm that regen looks ahead by a single item as expected.
"""
def g():
yield from ["foo", "bar"]
raise pytest.fail("This should never be reached")
with subtests.test(msg="bool does not overconsume"):
assert bool(regen(g()))
with subtests.test(msg="getitem 0th does not overconsume"):
assert regen(g())[0] == "foo"
with subtests.test(msg="single iter does not overconsume"):
assert next(iter(regen(g()))) == "foo"
class ExpectedException(BaseException):
pass
def g2():
yield from ["foo", "bar"]
raise ExpectedException()
with subtests.test(msg="getitem 1th does overconsume"):
r = regen(g2())
with pytest.raises(ExpectedException):
r[1]
# Confirm that the item was concretised anyway
assert r[1] == "bar"
with subtests.test(msg="full iter does overconsume"):
r = regen(g2())
with pytest.raises(ExpectedException):
for _ in r:
pass
# Confirm that the items were concretised anyway
assert r == ["foo", "bar"]
with subtests.test(msg="data access does overconsume"):
r = regen(g2())
with pytest.raises(ExpectedException):
r.data
# Confirm that the items were concretised anyway
assert r == ["foo", "bar"]
|
test_regen
|
python
|
allegroai__clearml
|
clearml/utilities/pigar/modules.py
|
{
"start": 2450,
"end": 3251
}
|
class ____(dict):
"""_Locations store code locations(file, linenos)."""
def __init__(self) -> None:
super(_Locations, self).__init__()
self._sorted = None
def add(self, file: str, lineno: int) -> None:
if file in self and lineno not in self[file]:
self[file].append(lineno)
else:
self[file] = [lineno]
def extend(self, obj: "_Locations") -> None:
for file, linenos in obj.items():
for lineno in linenos:
self.add(file, lineno)
def sorted_items(self) -> List[str]:
if self._sorted is None:
self._sorted = [
"{0}: {1}".format(f, ",".join([str(n) for n in sorted(ls)])) for f, ls in sorted(self.items())
]
return self._sorted
|
_Locations
|
python
|
Unity-Technologies__ml-agents
|
ml-agents/mlagents/trainers/sac/optimizer_torch.py
|
{
"start": 1213,
"end": 1662
}
|
class ____(OffPolicyHyperparamSettings):
batch_size: int = 128
buffer_size: int = 50000
buffer_init_steps: int = 0
tau: float = 0.005
steps_per_update: float = 1
save_replay_buffer: bool = False
init_entcoef: float = 1.0
reward_signal_steps_per_update: float = attr.ib()
@reward_signal_steps_per_update.default
def _reward_signal_steps_per_update_default(self):
return self.steps_per_update
|
SACSettings
|
python
|
apache__airflow
|
providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/executors/kubernetes_executor_types.py
|
{
"start": 1751,
"end": 2100
}
|
class ____(NamedTuple):
"""Watch event data from Kubernetes pods."""
pod_name: str
namespace: str
state: TaskInstanceState | str | None
annotations: dict[str, str]
resource_version: str
failure_details: FailureDetails | None
# TODO: Remove after Airflow 2 support is removed
CommandType = "Sequence[str]"
|
KubernetesWatch
|
python
|
kamyu104__LeetCode-Solutions
|
Python/making-file-names-unique.py
|
{
"start": 50,
"end": 628
}
|
class ____(object):
def getFolderNames(self, names):
"""
:type names: List[str]
:rtype: List[str]
"""
count = collections.Counter()
result, lookup = [], set()
for name in names:
while True:
name_with_suffix = "{}({})".format(name, count[name]) if count[name] else name
count[name] += 1
if name_with_suffix not in lookup:
break
result.append(name_with_suffix)
lookup.add(name_with_suffix)
return result
|
Solution
|
python
|
mlflow__mlflow
|
mlflow/types/responses_helpers.py
|
{
"start": 1231,
"end": 1400
}
|
class ____(BaseModel):
end_index: int | None = None
start_index: int | None = None
title: str
type: str = "url_citation"
url: str
|
AnnotationURLCitation
|
python
|
joke2k__faker
|
faker/providers/date_time/ja_JP/__init__.py
|
{
"start": 46,
"end": 1121
}
|
class ____(DateTimeProvider):
MONTH_NAMES = {
"01": "δΈζ",
"02": "δΊζ",
"03": "δΈζ",
"04": "εζ",
"05": "δΊζ",
"06": "ε
ζ",
"07": "δΈζ",
"08": "ε
«ζ",
"09": "δΉζ",
"10": "εζ",
"11": "εδΈζ",
"12": "εδΊζ",
}
TRADITIONAL_MONTH_NAMES = {
"01": "η¦ζ",
"02": "ε¦ζ",
"03": "εΌ₯η",
"04": "ε―ζ",
"05": "ηζ",
"06": "ζ°΄η‘ζ",
"07": "ζζ",
"08": "θζ",
"09": "ι·ζ",
"10": "η₯η‘ζ",
"11": "ιζ",
"12": "εΈ«θ΅°",
}
DAY_NAMES = {
"0": "ζ₯ζζ₯",
"1": "ζζζ₯",
"2": "η«ζζ₯",
"3": "ζ°΄ζζ₯",
"4": "ζ¨ζζ₯",
"5": "ιζζ₯",
"6": "εζζ₯",
}
def day_of_week(self) -> str:
day = self.date("%w")
return self.DAY_NAMES[day]
def month_name(self) -> str:
month = self.month()
return self.MONTH_NAMES[month]
def traditional_month_name(self) -> str:
month = self.month()
return self.TRADITIONAL_MONTH_NAMES[month]
|
Provider
|
python
|
sympy__sympy
|
sympy/plotting/pygletplot/plot_mode.py
|
{
"start": 291,
"end": 14156
}
|
class ____(PlotObject):
"""
Grandparent class for plotting
modes. Serves as interface for
registration, lookup, and init
of modes.
To create a new plot mode,
inherit from PlotModeBase
or one of its children, such
as PlotSurface or PlotCurve.
"""
## Class-level attributes
## used to register and lookup
## plot modes. See PlotModeBase
## for descriptions and usage.
i_vars, d_vars = '', ''
intervals = []
aliases = []
is_default = False
## Draw is the only method here which
## is meant to be overridden in child
## classes, and PlotModeBase provides
## a base implementation.
def draw(self):
raise NotImplementedError()
## Everything else in this file has to
## do with registration and retrieval
## of plot modes. This is where I've
## hidden much of the ugliness of automatic
## plot mode divination...
## Plot mode registry data structures
_mode_alias_list = []
_mode_map = {
1: {1: {}, 2: {}},
2: {1: {}, 2: {}},
3: {1: {}, 2: {}},
} # [d][i][alias_str]: class
_mode_default_map = {
1: {},
2: {},
3: {},
} # [d][i]: class
_i_var_max, _d_var_max = 2, 3
def __new__(cls, *args, **kwargs):
"""
This is the function which interprets
arguments given to Plot.__init__ and
Plot.__setattr__. Returns an initialized
instance of the appropriate child class.
"""
newargs, newkwargs = PlotMode._extract_options(args, kwargs)
mode_arg = newkwargs.get('mode', '')
# Interpret the arguments
d_vars, intervals = PlotMode._interpret_args(newargs)
i_vars = PlotMode._find_i_vars(d_vars, intervals)
i, d = max([len(i_vars), len(intervals)]), len(d_vars)
# Find the appropriate mode
subcls = PlotMode._get_mode(mode_arg, i, d)
# Create the object
o = object.__new__(subcls)
# Do some setup for the mode instance
o.d_vars = d_vars
o._fill_i_vars(i_vars)
o._fill_intervals(intervals)
o.options = newkwargs
return o
@staticmethod
def _get_mode(mode_arg, i_var_count, d_var_count):
"""
Tries to return an appropriate mode class.
Intended to be called only by __new__.
mode_arg
Can be a string or a class. If it is a
PlotMode subclass, it is simply returned.
If it is a string, it can an alias for
a mode or an empty string. In the latter
case, we try to find a default mode for
the i_var_count and d_var_count.
i_var_count
The number of independent variables
needed to evaluate the d_vars.
d_var_count
The number of dependent variables;
usually the number of functions to
be evaluated in plotting.
For example, a Cartesian function y = f(x) has
one i_var (x) and one d_var (y). A parametric
form x,y,z = f(u,v), f(u,v), f(u,v) has two
two i_vars (u,v) and three d_vars (x,y,z).
"""
# if the mode_arg is simply a PlotMode class,
# check that the mode supports the numbers
# of independent and dependent vars, then
# return it
try:
m = None
if issubclass(mode_arg, PlotMode):
m = mode_arg
except TypeError:
pass
if m:
if not m._was_initialized:
raise ValueError(("To use unregistered plot mode %s "
"you must first call %s._init_mode().")
% (m.__name__, m.__name__))
if d_var_count != m.d_var_count:
raise ValueError(("%s can only plot functions "
"with %i dependent variables.")
% (m.__name__,
m.d_var_count))
if i_var_count > m.i_var_count:
raise ValueError(("%s cannot plot functions "
"with more than %i independent "
"variables.")
% (m.__name__,
m.i_var_count))
return m
# If it is a string, there are two possibilities.
if isinstance(mode_arg, str):
i, d = i_var_count, d_var_count
if i > PlotMode._i_var_max:
raise ValueError(var_count_error(True, True))
if d > PlotMode._d_var_max:
raise ValueError(var_count_error(False, True))
# If the string is '', try to find a suitable
# default mode
if not mode_arg:
return PlotMode._get_default_mode(i, d)
# Otherwise, interpret the string as a mode
# alias (e.g. 'cartesian', 'parametric', etc)
else:
return PlotMode._get_aliased_mode(mode_arg, i, d)
else:
raise ValueError("PlotMode argument must be "
"a class or a string")
@staticmethod
def _get_default_mode(i, d, i_vars=-1):
if i_vars == -1:
i_vars = i
try:
return PlotMode._mode_default_map[d][i]
except KeyError:
# Keep looking for modes in higher i var counts
# which support the given d var count until we
# reach the max i_var count.
if i < PlotMode._i_var_max:
return PlotMode._get_default_mode(i + 1, d, i_vars)
else:
raise ValueError(("Couldn't find a default mode "
"for %i independent and %i "
"dependent variables.") % (i_vars, d))
@staticmethod
def _get_aliased_mode(alias, i, d, i_vars=-1):
if i_vars == -1:
i_vars = i
if alias not in PlotMode._mode_alias_list:
raise ValueError(("Couldn't find a mode called"
" %s. Known modes: %s.")
% (alias, ", ".join(PlotMode._mode_alias_list)))
try:
return PlotMode._mode_map[d][i][alias]
except TypeError:
# Keep looking for modes in higher i var counts
# which support the given d var count and alias
# until we reach the max i_var count.
if i < PlotMode._i_var_max:
return PlotMode._get_aliased_mode(alias, i + 1, d, i_vars)
else:
raise ValueError(("Couldn't find a %s mode "
"for %i independent and %i "
"dependent variables.")
% (alias, i_vars, d))
@classmethod
def _register(cls):
"""
Called once for each user-usable plot mode.
For Cartesian2D, it is invoked after the
class definition: Cartesian2D._register()
"""
name = cls.__name__
cls._init_mode()
try:
i, d = cls.i_var_count, cls.d_var_count
# Add the mode to _mode_map under all
# given aliases
for a in cls.aliases:
if a not in PlotMode._mode_alias_list:
# Also track valid aliases, so
# we can quickly know when given
# an invalid one in _get_mode.
PlotMode._mode_alias_list.append(a)
PlotMode._mode_map[d][i][a] = cls
if cls.is_default:
# If this mode was marked as the
# default for this d,i combination,
# also set that.
PlotMode._mode_default_map[d][i] = cls
except Exception as e:
raise RuntimeError(("Failed to register "
"plot mode %s. Reason: %s")
% (name, (str(e))))
@classmethod
def _init_mode(cls):
"""
Initializes the plot mode based on
the 'mode-specific parameters' above.
Only intended to be called by
PlotMode._register(). To use a mode without
registering it, you can directly call
ModeSubclass._init_mode().
"""
def symbols_list(symbol_str):
return [Symbol(s) for s in symbol_str]
# Convert the vars strs into
# lists of symbols.
cls.i_vars = symbols_list(cls.i_vars)
cls.d_vars = symbols_list(cls.d_vars)
# Var count is used often, calculate
# it once here
cls.i_var_count = len(cls.i_vars)
cls.d_var_count = len(cls.d_vars)
if cls.i_var_count > PlotMode._i_var_max:
raise ValueError(var_count_error(True, False))
if cls.d_var_count > PlotMode._d_var_max:
raise ValueError(var_count_error(False, False))
# Try to use first alias as primary_alias
if len(cls.aliases) > 0:
cls.primary_alias = cls.aliases[0]
else:
cls.primary_alias = cls.__name__
di = cls.intervals
if len(di) != cls.i_var_count:
raise ValueError("Plot mode must provide a "
"default interval for each i_var.")
for i in range(cls.i_var_count):
# default intervals must be given [min,max,steps]
# (no var, but they must be in the same order as i_vars)
if len(di[i]) != 3:
raise ValueError("length should be equal to 3")
# Initialize an incomplete interval,
# to later be filled with a var when
# the mode is instantiated.
di[i] = PlotInterval(None, *di[i])
# To prevent people from using modes
# without these required fields set up.
cls._was_initialized = True
_was_initialized = False
## Initializer Helper Methods
@staticmethod
def _find_i_vars(functions, intervals):
i_vars = []
# First, collect i_vars in the
# order they are given in any
# intervals.
for i in intervals:
if i.v is None:
continue
elif i.v in i_vars:
raise ValueError(("Multiple intervals given "
"for %s.") % (str(i.v)))
i_vars.append(i.v)
# Then, find any remaining
# i_vars in given functions
# (aka d_vars)
for f in functions:
for a in f.free_symbols:
if a not in i_vars:
i_vars.append(a)
return i_vars
def _fill_i_vars(self, i_vars):
# copy default i_vars
self.i_vars = [Symbol(str(i)) for i in self.i_vars]
# replace with given i_vars
for i in range(len(i_vars)):
self.i_vars[i] = i_vars[i]
def _fill_intervals(self, intervals):
# copy default intervals
self.intervals = [PlotInterval(i) for i in self.intervals]
# track i_vars used so far
v_used = []
# fill copy of default
# intervals with given info
for i in range(len(intervals)):
self.intervals[i].fill_from(intervals[i])
if self.intervals[i].v is not None:
v_used.append(self.intervals[i].v)
# Find any orphan intervals and
# assign them i_vars
for i in range(len(self.intervals)):
if self.intervals[i].v is None:
u = [v for v in self.i_vars if v not in v_used]
if len(u) == 0:
raise ValueError("length should not be equal to 0")
self.intervals[i].v = u[0]
v_used.append(u[0])
@staticmethod
def _interpret_args(args):
interval_wrong_order = "PlotInterval %s was given before any function(s)."
interpret_error = "Could not interpret %s as a function or interval."
functions, intervals = [], []
if isinstance(args[0], GeometryEntity):
for coords in list(args[0].arbitrary_point()):
functions.append(coords)
intervals.append(PlotInterval.try_parse(args[0].plot_interval()))
else:
for a in args:
i = PlotInterval.try_parse(a)
if i is not None:
if len(functions) == 0:
raise ValueError(interval_wrong_order % (str(i)))
else:
intervals.append(i)
else:
if is_sequence(a, include=str):
raise ValueError(interpret_error % (str(a)))
try:
f = sympify(a)
functions.append(f)
except TypeError:
raise ValueError(interpret_error % str(a))
return functions, intervals
@staticmethod
def _extract_options(args, kwargs):
newkwargs, newargs = {}, []
for a in args:
if isinstance(a, str):
newkwargs = dict(newkwargs, **parse_option_string(a))
else:
newargs.append(a)
newkwargs = dict(newkwargs, **kwargs)
return newargs, newkwargs
def var_count_error(is_independent, is_plotting):
"""
Used to format an error message which differs
slightly in 4 places.
"""
if is_plotting:
v = "Plotting"
else:
v = "Registering plot modes"
if is_independent:
n, s = PlotMode._i_var_max, "independent"
else:
n, s = PlotMode._d_var_max, "dependent"
return ("%s with more than %i %s variables "
"is not supported.") % (v, n, s)
|
PlotMode
|
python
|
huggingface__transformers
|
src/transformers/models/eomt/modular_eomt.py
|
{
"start": 10873,
"end": 12442
}
|
class ____(Dinov2Embeddings):
def __init__(self, config: EomtConfig) -> None:
nn.Module.__init__(self)
self.config = config
self.patch_size = config.patch_size
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
self.register_tokens = nn.Parameter(torch.zeros(1, config.num_register_tokens, config.hidden_size))
self.patch_embeddings = EomtPatchEmbeddings(config)
num_patches = self.patch_embeddings.num_patches
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.num_prefix_tokens = 1 + config.num_register_tokens # 1 for [CLS]
self.position_embeddings = nn.Embedding(num_patches, config.hidden_size)
self.register_buffer("position_ids", torch.arange(num_patches).expand((1, -1)), persistent=False)
def interpolate_pos_encoding(self):
raise AttributeError("Not needed for Eomt Model")
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
batch_size, _, _, _ = pixel_values.shape
target_dtype = self.patch_embeddings.projection.weight.dtype
embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
register_tokens = self.register_tokens.expand(batch_size, -1, -1)
embeddings = embeddings + self.position_embeddings(self.position_ids)
embeddings = torch.cat([cls_tokens, register_tokens, embeddings], dim=1)
embeddings = self.dropout(embeddings)
return embeddings
|
EomtEmbeddings
|
python
|
encode__django-rest-framework
|
tests/test_renderers.py
|
{
"start": 1612,
"end": 1847
}
|
class ____(TestCase):
def test_expected_results(self):
for value, renderer_cls, expected in expected_results:
output = renderer_cls().render(value)
self.assertEqual(output, expected)
|
BasicRendererTests
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.