language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | release/nightly_tests/multimodal_inference_benchmarks/image_classification/daft_main.py | {
"start": 1178,
"end": 3274
} | class ____:
def __init__(self):
self.weights = weights
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = resnet18(weights=weights).to(self.device)
self.model.eval()
def __call__(self, images):
if len(images) == 0:
return []
torch_batch = torch.from_numpy(np.array(images.to_pylist())).to(self.device)
with torch.inference_mode():
prediction = self.model(torch_batch)
predicted_classes = prediction.argmax(dim=1).detach().cpu()
predicted_labels = [
self.weights.meta["categories"][i] for i in predicted_classes
]
return predicted_labels
start_time = time.time()
df = daft.read_parquet(INPUT_PATH)
# NOTE: Limit to the 803,580 images Daft uses in their benchmark.
df = df.limit(803_580)
# NOTE: We need to manually repartition the DataFrame to achieve good performance. This
# code isn't in Daft's benchmark, possibly because their Parquet metadata is
# pre-partitioned. Note we're using `repartition(NUM_GPUS)` instead of
# `into_partitions(NUM_CPUS * 2)` as suggested in Daft's documentation. In our
# experiments, the recommended approach led to OOMs, crashes, and slower performance.
df = df.repartition(NUM_GPU_NODES)
df = df.with_column(
"decoded_image",
df["image_url"]
.url.download()
.image.decode(on_error="null", mode=daft.ImageMode.RGB),
)
# NOTE: At least one image encounters this error: https://github.com/etemesi254/zune-image/issues/244.
# So, we need to return "null" for errored files and filter them out.
df = df.where(df["decoded_image"].not_null())
df = df.with_column(
"norm_image",
df["decoded_image"].apply(
func=lambda image: transform(image),
return_dtype=daft.DataType.tensor(
dtype=daft.DataType.float32(), shape=IMAGE_DIM
),
),
)
df = df.with_column("label", ResNetModel(col("norm_image")))
df = df.select("image_url", "label")
df.write_parquet(OUTPUT_PATH)
print("Runtime:", time.time() - start_time)
| ResNetModel |
python | sanic-org__sanic | sanic/worker/constants.py | {
"start": 200,
"end": 465
} | class ____(IntEnum):
"""Process states."""
NONE = auto()
IDLE = auto()
RESTARTING = auto()
STARTING = auto()
STARTED = auto()
ACKED = auto()
JOINED = auto()
TERMINATED = auto()
FAILED = auto()
COMPLETED = auto()
| ProcessState |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP046_0.py | {
"start": 1799,
"end": 2033
} | class ____(Generic[T, T]): # duplicate generic variable, runtime error
pass
# TODO(brent) we should also apply the fix to methods, but it will need a
# little more work. these should be left alone for now but be fixed eventually.
| D |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 241556,
"end": 243699
} | class ____(ExternKernel):
"""
An IR node representing a generic host-side TMA descriptor in the Triton API
Mostly useful for user-defined Triton kernels relying on host-side TMA;
but can, in principle, be used for Inductor's Triton templates, too.
See TMADescriptorExperimental and TMADescriptorStable for the two implementations
(the old API and the new API)
"""
# as TMA descriptors are immutable,
# we can dedup them by the input args
_CACHE: dict[Any, TMADescriptor] = {}
@classmethod
def _create_impl(
cls, tensor: IRNode, tma_meta: tuple[str, tuple[Any, ...]]
) -> TMADescriptor:
assert len(tma_meta) == 2
if tma_meta[0] == "experimental":
return TMADescriptorExperimental(tensor, *tma_meta[1])
else:
assert tma_meta[0] == "stable"
return TMADescriptorStable(tensor, *tma_meta[1])
@classmethod
def create(
cls, tensor: IRNode, tma_meta: tuple[str, tuple[Any, ...]]
) -> TMADescriptor:
key = (id(tensor), tma_meta)
if key not in cls._CACHE:
cls._CACHE[key] = cls._create_impl(tensor, tma_meta)
return cls._CACHE[key]
def __init__(
self, tensor: IRNode, inputs: Sequence[Any], constant_args: Sequence[Any]
) -> None:
super().__init__(
None,
# link back to the underlying tensor in terms of ownership
# to avoid getting the underlying tensor deleted *before*
# the TMADescriptor node can be deleted.
NonOwningLayout(
ReinterpretView(
data=tensor,
layout=tensor.get_layout(),
)
),
cast(Sequence[Buffer], inputs),
tuple(constant_args),
None,
)
self.tensor = tensor
self.name = V.graph.register_buffer(self)
V.graph.register_operation(self)
def codegen(self, wrapper: PythonWrapperCodegen) -> None:
wrapper.generate_tma_descriptor(self)
def get_tensor(self) -> IRNode:
return self.tensor
| TMADescriptor |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_set.py | {
"start": 58031,
"end": 58233
} | class ____(_TestCopying, __TestCase):
def setUp(self):
self.set = set()
super().setUp()
#------------------------------------------------------------------------------
| TestCopyingEmpty |
python | pytorch__pytorch | test/distributed/test_local_tensor.py | {
"start": 19477,
"end": 20130
} | class ____(LocalTensorTestBase):
world_size = 4
def test_dtensor_cat(self):
with LocalTensorMode(self.world_size):
device_mesh = self.build_device_mesh()
t1 = torch.arange(16).view(4, 4).float()
d1 = distribute_tensor(t1, device_mesh, [Replicate()])
t2 = (torch.arange(16) + 16).view(4, 4).float()
d2 = distribute_tensor(t2, device_mesh, [Shard(0)])
local_res = torch.cat([t1, t2], dim=-1)
dist_res = torch.cat([d1, d2], dim=-1)
full_tensor = dist_res.full_tensor()
self.assertEqual(full_tensor, local_res)
| TestLocalTensorWorld4 |
python | huggingface__transformers | src/transformers/models/vjepa2/modeling_vjepa2.py | {
"start": 13923,
"end": 14390
} | class ____(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None):
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
| VJEPA2DropPath |
python | huggingface__transformers | tests/quantization/ggml/test_ggml.py | {
"start": 11378,
"end": 50218
} | class ____(unittest.TestCase):
mistral_model_id = "TheBloke/Mistral-7B-Instruct-v0.2-GGUF"
qwen2_model_id = "Qwen/Qwen1.5-0.5B-Chat-GGUF"
qwen2moe_model_id = "gdax/Qwen1.5-MoE-A2.7B_gguf"
qwen2moe_original_model_id = "Qwen/Qwen1.5-MoE-A2.7B"
llama3_model_id = "NousResearch/Meta-Llama-3-8B-GGUF"
tinyllama_model_id = "PenutChen/TinyLlama-1.1B-Chat-v1.0-GGUF"
phi3_model_id = "microsoft/Phi-3-mini-4k-instruct-gguf"
bloom_model_id = "afrideva/bloom-560m-GGUF"
original_bloom_model_id = "bigscience/bloom-560m"
falcon7b_model_id_q2 = "xaviviro/falcon-7b-quantized-gguf"
falcon7b_model_id_fp16 = "medmekk/falcon-7b-gguf"
falcon40b_model_id = "maddes8cht/tiiuae-falcon-40b-gguf"
original_flacon7b_model_id = "tiiuae/falcon-7b"
t5_model_id = "Felladrin/gguf-flan-t5-small"
original_t5_model_id = "google/flan-t5-small"
stablelm_model_id = "afrideva/stablelm-3b-4e1t-GGUF"
stablelm2_model_id = "afrideva/stablelm-2-1_6b-GGUF"
original_stablelm2_model_id = "stabilityai/stablelm-2-1_6b"
gpt2_model_id = "mradermacher/gpt2-GGUF"
gpt2_original_model_id = "openai-community/gpt2"
gpt2_xl_model_id = "RichardErkhov/openai-community_-_gpt2-xl-gguf"
starcoder2_model_id = "QuantFactory/starcoder2-3b-GGUF"
starcoder2_fp16_model_id = "brittlewis12/starcoder2-3b-GGUF"
starcoder2_original_model_id = "bigcode/starcoder2-3b"
mamba_original_model_id = "state-spaces/mamba-2.8b-hf"
mamba_model_id = "jpodivin/mamba-2.8b-hf-GGUF"
nemotron_original_model_id = "nvidia/Nemotron-Mini-4B-Instruct"
nemotron_model_id = "bartowski/Nemotron-Mini-4B-Instruct-GGUF"
original_gemma2_model_id = "google/gemma-2-2b-it"
gemma2_model_id = "bartowski/gemma-2-2b-it-GGUF"
original_gemma3_text_model_id = "google/gemma-3-1b-it"
original_gemma3_vision_model_id = "google/gemma-3-4b-it"
gemma3_qat_model_id = "google/gemma-3-1b-it-qat-q4_0-gguf"
gemma3_text_model_id = "unsloth/gemma-3-1b-it-GGUF"
gemma3_vision_model_id = "unsloth/gemma-3-4b-it-GGUF"
qwen3_model_id = "Qwen/Qwen3-0.6B-GGUF"
qwen3moe_model_id = "Qwen/Qwen3-30B-A3B-GGUF"
umt5_encoder_model_id = "city96/umt5-xxl-encoder-gguf"
lfm2_model_id = "LiquidAI/LFM2-1.2B-GGUF"
q4_0_phi3_model_id = "Phi-3-mini-4k-instruct-q4.gguf"
q4_0_mistral_model_id = "mistral-7b-instruct-v0.2.Q4_0.gguf"
q4_0_qwen2_model_id = "qwen1_5-0_5b-chat-q4_0.gguf"
q8_qwen2moe_model_id = "Qwen1.5-MoE-A2.7B_Q8_0.gguf"
q4_llama3_model_id = "Meta-Llama-3-8B-Q4_K_M.gguf"
fp16_bloom_model_id = "bloom-560m.fp16.gguf"
q4_k_m_stablelm_model_id = "stablelm-3b-4e1t.q4_k_m.gguf"
fp16_stablelm2_model_id = "stablelm-2-1_6b.fp16.gguf"
q8_bloom_model_id = "bloom-560m.q8_0.gguf"
f16_tinyllama_model_id = "TinyLlama-1.1B-Chat-v1.0.FP16.gguf"
q2_k_falcon7b_model_id = "falcon-7b-q2_k.gguf"
fp16_falcon7b_model_id = "falcon-7b-fp16.gguf"
q2_k_falcon40b_model_id = "tiiuae-falcon-40b-Q2_K.gguf"
fp16_t5_model_id = "flan-t5-small.F16.gguf"
q8_0_t5_model_id = "flan-t5-small.Q8_0.gguf"
fp16_qwen2moe_model_id = "Qwen1.5-MoE-A2.7B.gguf"
fp16_gpt2_model_id = "gpt2.f16.gguf"
q8_gpt2_model_id = "gpt2.Q8_0.gguf"
q6_k_gpt2_xl_model_id = "gpt2-xl.Q6_K.gguf"
q6_k_starcoder2_model_id = "starcoder2-3b.Q6_K.gguf"
fp16_starcoder2_gguf_model_id = "starcoder2-3b.fp16.gguf"
q6_k_mamba_model_id = "ggml-model-Q6_K.gguf"
fp16_mamba_model_id = "ggml-model-f16.gguf"
q6_k_nemotron_model_id = "Nemotron-Mini-4B-Instruct-Q6_K.gguf"
fp16_nemotron_model_id = "Nemotron-Mini-4B-Instruct-f16.gguf"
q3_k_gemma2_model_id = "gemma-2-2b-it-Q3_K_L.gguf"
q8_0_gemma2_model_id = "gemma-2-2b-it-Q8_0.gguf"
fp32_gemma2_model_id = "gemma-2-2b-it-f32.gguf"
q4_0_gemma3_qat_model_id = "gemma-3-1b-it-q4_0.gguf"
bf16_gemma3_text_model_id = "gemma-3-1b-it-BF16.gguf"
bf16_gemma3_vision_model_id = "gemma-3-4b-it-BF16.gguf"
deci_original_model_id = "Deci/DeciLM-7B"
deci_model_id = "Deci/DeciLM-7B-instruct-GGUF"
q8_0_deci_model_id = "decilm-7b-uniform-gqa-q8_0.gguf"
fp16_deci_model_id = "decilm-7b-uniform-gqa-f16.gguf"
q8_0_qwen3_model_id = "Qwen3-0.6B-Q8_0.gguf"
q4_k_m_qwen3moe_model_id = "Qwen3-30B-A3B-Q4_K_M.gguf"
q8_0_umt5_encoder_model_id = "umt5-xxl-encoder-Q8_0.gguf"
q4_k_m_lfm2_model_id = "LFM2-1.2B-Q4_K_M.gguf"
example_text = "Hello"
def test_mistral_q4_0(self):
tokenizer = AutoTokenizer.from_pretrained(self.mistral_model_id, gguf_file=self.q4_0_mistral_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.mistral_model_id,
gguf_file=self.q4_0_mistral_model_id,
device_map="auto",
dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello,\n\nI'm trying to create a"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_qwen2_q4_0(self):
tokenizer = AutoTokenizer.from_pretrained(self.qwen2_model_id, gguf_file=self.q4_0_qwen2_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.qwen2_model_id,
gguf_file=self.q4_0_qwen2_model_id,
device_map="auto",
dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello.jsoup\n\nI am a beginner"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_qwen2moe_q8(self):
tokenizer = AutoTokenizer.from_pretrained(self.qwen2moe_model_id, gguf_file=self.q8_qwen2moe_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.qwen2moe_model_id,
gguf_file=self.q8_qwen2moe_model_id,
dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt")
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello, I am a 20 year old male"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_qwen2moe_weights_conversion_fp16(self):
quantized_model = AutoModelForCausalLM.from_pretrained(
self.qwen2moe_model_id,
gguf_file=self.fp16_qwen2moe_model_id,
dtype=torch.float16,
)
original_model = AutoModelForCausalLM.from_pretrained(
self.qwen2moe_original_model_id,
dtype=torch.float16,
)
quantized_state_dict = quantized_model.state_dict()
original_state_dict = original_model.state_dict()
for layer_name, original_params in original_state_dict.items():
if layer_name in quantized_state_dict:
self.assertTrue(original_params.shape == quantized_state_dict[layer_name].shape)
torch.testing.assert_close(original_params, quantized_state_dict[layer_name])
def test_phi3_q4_0(self):
tokenizer = AutoTokenizer.from_pretrained(self.phi3_model_id, gguf_file=self.q4_0_phi3_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.phi3_model_id, gguf_file=self.q4_0_phi3_model_id, device_map="auto", dtype=torch.float16
)
text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello, I've been reading about the impact of"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_llama3_q4_0_tokenizer(self):
tokenizer = AutoTokenizer.from_pretrained(self.llama3_model_id, gguf_file=self.q4_llama3_model_id)
with tempfile.TemporaryDirectory() as tmpdirname:
tokenizer.save_pretrained(tmpdirname)
tokenizer = AutoTokenizer.from_pretrained(tmpdirname)
special_sentence = "สวัสดี"
predicted_text = tokenizer.decode(tokenizer.encode(special_sentence, return_tensors="pt")[0])
self.assertEqual(predicted_text, "<|begin_of_text|>" + special_sentence)
def test_llama3_q4_0(self):
tokenizer = AutoTokenizer.from_pretrained(self.llama3_model_id, gguf_file=self.q4_llama3_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.llama3_model_id,
gguf_file=self.q4_llama3_model_id,
device_map="auto",
dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello, I am interested in [The Park]\nThe"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_bloom_fp16(self):
tokenizer = AutoTokenizer.from_pretrained(self.bloom_model_id, gguf_file=self.fp16_bloom_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.bloom_model_id,
gguf_file=self.fp16_bloom_model_id,
device_map="auto",
dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello, I just want to say that I am very"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_bloom_q8_0(self):
tokenizer = AutoTokenizer.from_pretrained(self.bloom_model_id, gguf_file=self.q8_bloom_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.bloom_model_id,
gguf_file=self.q8_bloom_model_id,
device_map="auto",
dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello, I just want to say that I am just"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_bloom_weights_conversion_fp16(self):
quantized_model = AutoModelForCausalLM.from_pretrained(
self.bloom_model_id,
gguf_file=self.fp16_bloom_model_id,
device_map="auto",
dtype=torch.float16,
)
original_model = AutoModelForCausalLM.from_pretrained(
self.original_bloom_model_id,
device_map="auto",
dtype=torch.float16,
)
quantized_state_dict = quantized_model.state_dict()
original_state_dict = original_model.state_dict()
for (quantized_name, quantized_param), (original_name, original_param) in zip(
quantized_state_dict.items(), original_state_dict.items()
):
if (
"self_attention.query_key_value" in quantized_name
and "self_attention.query_key_value" in original_name
):
self.assertTrue(quantized_param.shape == original_param.shape)
torch.testing.assert_close(quantized_param, original_param)
def test_t5_f16(self):
tokenizer = AutoTokenizer.from_pretrained(self.t5_model_id, gguf_file=self.fp16_t5_model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(
self.t5_model_id, gguf_file=self.fp16_t5_model_id, device_map="auto", dtype=torch.float16
)
T5_EXAMPLE_TEXT = "translate English to German: How old are you?"
text = tokenizer(T5_EXAMPLE_TEXT, return_tensors="pt").to(torch_device)
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Wie ich er?"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_t5_q8_0(self):
tokenizer = AutoTokenizer.from_pretrained(self.t5_model_id, gguf_file=self.q8_0_t5_model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(
self.t5_model_id, gguf_file=self.q8_0_t5_model_id, device_map="auto", dtype=torch.float16
)
T5_EXAMPLE_TEXT = "translate English to German: How old are you?"
text = tokenizer(T5_EXAMPLE_TEXT, return_tensors="pt").to(torch_device)
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Wie ich er?"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_t5_weights_conversion_fp16(self):
quantized_model = AutoModelForSeq2SeqLM.from_pretrained(
self.t5_model_id,
gguf_file=self.fp16_t5_model_id,
device_map="auto",
dtype=torch.float16,
)
original_model = AutoModelForSeq2SeqLM.from_pretrained(
self.original_t5_model_id,
device_map="auto",
dtype=torch.float16,
)
quantized_state_dict = quantized_model.state_dict()
original_state_dict = original_model.state_dict()
for (quantized_name, quantized_param), (original_name, original_param) in zip(
quantized_state_dict.items(), original_state_dict.items()
):
self.assertTrue(quantized_param.shape == original_param.shape)
torch.testing.assert_close(quantized_param, original_param, rtol=5e-04, atol=5e-04)
def test_gpt2_q8(self):
tokenizer = AutoTokenizer.from_pretrained(self.gpt2_model_id, gguf_file=self.q8_gpt2_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.gpt2_model_id,
gguf_file=self.q8_gpt2_model_id,
dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt")
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello, I'm sorry. I'm sorry. I"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_gpt2_weights_conversion_fp16(self):
quantized_model = AutoModelForCausalLM.from_pretrained(
self.gpt2_model_id,
gguf_file=self.fp16_gpt2_model_id,
dtype=torch.float16,
)
original_model = AutoModelForCausalLM.from_pretrained(
self.gpt2_original_model_id,
dtype=torch.float16,
)
quantized_state_dict = quantized_model.state_dict()
original_state_dict = original_model.state_dict()
for layer_name, original_params in original_state_dict.items():
if layer_name in quantized_state_dict:
self.assertTrue(original_params.shape == quantized_state_dict[layer_name].shape)
torch.testing.assert_close(original_params, quantized_state_dict[layer_name])
else:
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
def test_gpt2_xl_Q6_K(self):
tokenizer = AutoTokenizer.from_pretrained(self.gpt2_xl_model_id, gguf_file=self.q6_k_gpt2_xl_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.gpt2_xl_model_id,
gguf_file=self.q6_k_gpt2_xl_model_id,
dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt")
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello, I'm a newbie to the world of"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
@unittest.skip(reason="Heavy memory")
def test_falcon40b_q2_k(self):
tokenizer = AutoTokenizer.from_pretrained(self.falcon40b_model_id, gguf_file=self.q2_k_falcon40b_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.falcon40b_model_id,
gguf_file=self.q2_k_falcon40b_model_id,
device_map="auto",
dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello All,\nI am new to this forum."
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_falcon7b_q2_k(self):
tokenizer = AutoTokenizer.from_pretrained(self.falcon7b_model_id_q2, gguf_file=self.q2_k_falcon7b_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.falcon7b_model_id_q2,
gguf_file=self.q2_k_falcon7b_model_id,
device_map="auto",
dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt")["input_ids"].to(torch_device)
out = model.generate(text, max_new_tokens=16)
EXPECTED_TEXT = "Hello All,\nI am new to this forum.\nI am using the "
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
@unittest.skip("The test causes a torch.OutOfMemoryError on the CI but it passes with enough memory")
def test_falcon7b_weights_conversion_fp16(self):
quantized_model = AutoModelForCausalLM.from_pretrained(
self.falcon7b_model_id_fp16,
gguf_file=self.fp16_falcon7b_model_id,
device_map="auto",
dtype=torch.float16,
)
original_model = AutoModelForCausalLM.from_pretrained(
self.original_flacon7b_model_id,
device_map="auto",
dtype=torch.float16,
)
quantized_state_dict = quantized_model.state_dict()
original_state_dict = original_model.state_dict()
for layer_name, original_params in original_state_dict.items():
if layer_name in quantized_state_dict:
self.assertTrue(original_params.shape == quantized_state_dict[layer_name].shape)
torch.testing.assert_close(original_params, quantized_state_dict[layer_name])
else:
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
def test_stablelm_q4_k_m(self):
model = AutoModelForCausalLM.from_pretrained(
self.stablelm_model_id,
gguf_file=self.q4_k_m_stablelm_model_id,
device_map="auto",
dtype=torch.float16,
)
tokenizer = AutoTokenizer.from_pretrained(self.stablelm_model_id, gguf_file=self.q4_k_m_stablelm_model_id)
text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello-\nI am trying to create a new user"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_stablelm_fp16(self):
original_model = AutoModelForCausalLM.from_pretrained(
self.original_stablelm2_model_id,
dtype=torch.float16,
)
converted_model = AutoModelForCausalLM.from_pretrained(
self.stablelm2_model_id,
gguf_file=self.fp16_stablelm2_model_id,
dtype=torch.float16,
)
tokenizer = AutoTokenizer.from_pretrained(self.stablelm2_model_id, gguf_file=self.fp16_stablelm2_model_id)
text = tokenizer(self.example_text, return_tensors="pt")
original_out = original_model.generate(**text, max_new_tokens=10)
converted_out = converted_model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello, I am a 20 year old male"
self.assertEqual(tokenizer.decode(converted_out[0], skip_special_tokens=True), EXPECTED_TEXT)
self.assertEqual(
tokenizer.decode(converted_out[0], skip_special_tokens=True),
tokenizer.decode(original_out[0], skip_special_tokens=True),
)
def test_stablelm_weights_conversion_fp16(self):
original_model = AutoModelForCausalLM.from_pretrained(
self.original_stablelm2_model_id,
device_map="auto",
dtype=torch.float16,
)
converted_model = AutoModelForCausalLM.from_pretrained(
self.stablelm2_model_id,
gguf_file=self.fp16_stablelm2_model_id,
device_map="auto",
dtype=torch.float16,
)
converted_state_dict = converted_model.state_dict()
original_state_dict = original_model.state_dict()
for layer_name, original_params in original_state_dict.items():
if layer_name in converted_state_dict:
self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape)
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
else:
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
def test_starcoder2_weights_conversion_fp16(self):
original_model = AutoModelForCausalLM.from_pretrained(
self.starcoder2_original_model_id,
device_map="auto",
dtype=torch.float16,
)
converted_model = AutoModelForCausalLM.from_pretrained(
self.starcoder2_fp16_model_id,
gguf_file=self.fp16_starcoder2_gguf_model_id,
device_map="auto",
dtype=torch.float16,
)
converted_state_dict = converted_model.state_dict()
original_state_dict = original_model.state_dict()
for layer_name, original_params in original_state_dict.items():
if layer_name in converted_state_dict:
self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape)
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
else:
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
def test_starcoder2_q6_k(self):
example_function_text = "def print_hello_world():"
model = AutoModelForCausalLM.from_pretrained(
self.starcoder2_model_id,
gguf_file=self.q6_k_starcoder2_model_id,
device_map="auto",
dtype=torch.float16,
)
tokenizer = AutoTokenizer.from_pretrained(self.starcoder2_model_id, gguf_file=self.q6_k_starcoder2_model_id)
text = tokenizer(example_function_text, return_tensors="pt").to(torch_device)
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = 'def print_hello_world():\n print("Hello World")\n\ndef print'
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_mamba_weights_conversion_fp16(self):
original_model = AutoModelForCausalLM.from_pretrained(
self.mamba_original_model_id,
dtype=torch.float16,
)
converted_model = AutoModelForCausalLM.from_pretrained(
self.mamba_model_id,
gguf_file=self.fp16_mamba_model_id,
dtype=torch.float16,
)
converted_state_dict = converted_model.state_dict()
original_state_dict = original_model.state_dict()
for layer_name, original_params in original_state_dict.items():
if layer_name in converted_state_dict:
self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape)
if "mixer.A_log" in layer_name:
# we should increase tolerance after exponential reversing
# and performing np.log(-weights) operation as numbers are slightly different
torch.testing.assert_close(original_params, converted_state_dict[layer_name], rtol=1e-3, atol=1e-3)
else:
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
else:
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
def test_mamba_q6_k(self):
model = AutoModelForCausalLM.from_pretrained(
self.mamba_model_id,
gguf_file=self.q6_k_mamba_model_id,
dtype=torch.float16,
)
tokenizer = AutoTokenizer.from_pretrained(self.mamba_model_id, gguf_file=self.q6_k_mamba_model_id)
text = tokenizer(self.example_text, return_tensors="pt")["input_ids"]
out = model.generate(text, max_new_tokens=10)
EXPECTED_TEXT = "Hello,I answerthe question.\n\nA"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_nemotron_weights_conversion_fp16(self):
original_model = AutoModelForCausalLM.from_pretrained(
self.nemotron_original_model_id,
dtype=torch.float16,
)
converted_model = AutoModelForCausalLM.from_pretrained(
self.nemotron_model_id,
gguf_file=self.fp16_nemotron_model_id,
dtype=torch.float16,
)
converted_state_dict = converted_model.state_dict()
original_state_dict = original_model.state_dict()
for layer_name, original_params in original_state_dict.items():
if layer_name in converted_state_dict:
self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape)
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
else:
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
def test_nemotron_q6_k(self):
model = AutoModelForCausalLM.from_pretrained(
self.nemotron_model_id,
gguf_file=self.q6_k_nemotron_model_id,
dtype=torch.float16,
)
# use the original tokenizer from nvidia to avoid long load times
tokenizer = AutoTokenizer.from_pretrained("nvidia/Nemotron-Mini-4B-Instruct")
text = tokenizer(self.example_text, return_tensors="pt")["input_ids"]
out = model.generate(text, max_new_tokens=16)
EXPECTED_TEXT = "Hello.▁hotmail.com</s>"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_gemma2_q3_k(self):
model = AutoModelForCausalLM.from_pretrained(
self.gemma2_model_id,
gguf_file=self.q3_k_gemma2_model_id,
dtype=torch.float16,
)
tokenizer = AutoTokenizer.from_pretrained(self.gemma2_model_id, gguf_file=self.q3_k_gemma2_model_id)
text = tokenizer(self.example_text, return_tensors="pt")["input_ids"]
out = model.generate(text, max_new_tokens=10)
EXPECTED_TEXT = "Hello! 👋\n\nI'm trying to create a"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_gemma2_q8_0(self):
model = AutoModelForCausalLM.from_pretrained(
self.gemma2_model_id,
gguf_file=self.q8_0_gemma2_model_id,
dtype=torch.float16,
)
tokenizer = AutoTokenizer.from_pretrained(self.gemma2_model_id, gguf_file=self.q8_0_gemma2_model_id)
text = tokenizer(self.example_text, return_tensors="pt")["input_ids"]
out = model.generate(text, max_new_tokens=10)
EXPECTED_TEXT = "Hello! 👋\n\nI'm a large language model"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_gemma2_fp32(self):
model = AutoModelForCausalLM.from_pretrained(
self.gemma2_model_id,
gguf_file=self.fp32_gemma2_model_id,
dtype=torch.float16,
)
tokenizer = AutoTokenizer.from_pretrained(self.gemma2_model_id, gguf_file=self.fp32_gemma2_model_id)
text = tokenizer(self.example_text, return_tensors="pt")["input_ids"]
out = model.generate(text, max_new_tokens=10)
EXPECTED_TEXT = "Hello! 👋\n\nI'm a large language model"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
@require_read_token
def test_gemma2_weights_conversion_fp32(self):
original_model = AutoModelForCausalLM.from_pretrained(
self.original_gemma2_model_id,
dtype=torch.float16,
)
converted_model = AutoModelForCausalLM.from_pretrained(
self.gemma2_model_id,
gguf_file=self.fp32_gemma2_model_id,
dtype=torch.float16,
)
converted_state_dict = converted_model.state_dict()
original_state_dict = original_model.state_dict()
for layer_name, original_params in original_state_dict.items():
if layer_name in converted_state_dict:
self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape)
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
else:
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
@require_read_token
@unittest.skipUnless(is_gguf_available("0.16.0"), "test requires gguf version >= 0.16.0")
def test_gemma3_qat_q4_0(self):
model = AutoModelForCausalLM.from_pretrained(
self.gemma3_qat_model_id,
gguf_file=self.q4_0_gemma3_qat_model_id,
dtype=torch.float16,
)
tokenizer = AutoTokenizer.from_pretrained(self.gemma3_qat_model_id, gguf_file=self.q4_0_gemma3_qat_model_id)
text = tokenizer(self.example_text, return_tensors="pt")["input_ids"]
out = model.generate(text, max_new_tokens=10)
EXPECTED_TEXT = 'Hello with the prompt, "What is the best way'
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
@require_read_token
@unittest.skipUnless(is_gguf_available("0.16.0"), "test requires gguf version >= 0.16.0")
def test_gemma3_text_weights_conversion_bf16(self):
original_model = AutoModelForCausalLM.from_pretrained(
self.original_gemma3_text_model_id,
dtype=torch.float16,
)
converted_model = AutoModelForCausalLM.from_pretrained(
self.gemma3_text_model_id,
gguf_file=self.bf16_gemma3_text_model_id,
dtype=torch.float16,
)
converted_state_dict = converted_model.state_dict()
original_state_dict = original_model.state_dict()
for layer_name, original_params in original_state_dict.items():
if layer_name in converted_state_dict:
self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape)
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
else:
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
# Test text backbone conversion for gemma3 vision models
@require_read_token
@unittest.skipUnless(is_gguf_available("0.16.0"), "test requires gguf version >= 0.16.0")
def test_gemma3_vision_weights_conversion_bf16(self):
original_model = AutoModelForCausalLM.from_pretrained(
self.original_gemma3_vision_model_id,
dtype=torch.float16,
).language_model
converted_model = AutoModelForCausalLM.from_pretrained(
self.gemma3_vision_model_id,
gguf_file=self.bf16_gemma3_vision_model_id,
dtype=torch.float16,
).model
converted_state_dict = converted_model.state_dict()
original_state_dict = original_model.state_dict()
for layer_name, original_params in original_state_dict.items():
if layer_name in converted_state_dict:
self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape)
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
else:
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
def test_deci_q8_0(self):
"""Test Deci model loading and inference with Q4_0 quantization."""
tokenizer = AutoTokenizer.from_pretrained(self.deci_model_id, gguf_file=self.q8_0_deci_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.deci_model_id,
gguf_file=self.q8_0_deci_model_id,
device_map="auto",
torch_dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt").to(torch_device)
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello, I am a language model developed"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_deci_weights_conversion_fp16(self):
"""Test that GGUF Deci model weights match the original model weights."""
original_model_id = "Deci/DeciLM-7B"
original_model = AutoModelForCausalLM.from_pretrained(
original_model_id,
torch_dtype=torch.float16,
trust_remote_code=True,
device_map="auto",
)
# You need to have an FP16 version of your GGUF model for accurate comparison
converted_model = AutoModelForCausalLM.from_pretrained(
self.deci_model_id,
gguf_file=self.fp16_deci_model_id,
torch_dtype=torch.float16,
device_map="auto",
)
converted_state_dict = converted_model.state_dict()
original_state_dict = original_model.state_dict()
for layer_name, original_params in original_state_dict.items():
if layer_name in converted_state_dict:
self.assertTrue(original_params.shape == converted_state_dict[layer_name].shape)
torch.testing.assert_close(original_params, converted_state_dict[layer_name])
else:
raise ValueError(f"Layer {layer_name} is not presented in GGUF model")
def test_deci_config_mapping(self):
"""Test that Deci GGUF config mapping is correctly applied."""
from transformers.integrations.ggml import GGUF_CONFIG_MAPPING
self.assertIn("deci", GGUF_CONFIG_MAPPING)
deci_mapping = GGUF_CONFIG_MAPPING["deci"]
expected_mappings = {
"context_length": "max_position_embeddings",
"block_count": "num_hidden_layers",
"feed_forward_length": "intermediate_size",
"embedding_length": "hidden_size",
"rope.freq_base": "rope_theta",
"attention.head_count": "num_attention_heads",
"attention.head_count_kv": "num_key_value_heads",
"attention.layer_norm_rms_epsilon": "rms_norm_eps",
"vocab_size": "vocab_size",
}
for gguf_key, transformers_key in expected_mappings.items():
self.assertEqual(deci_mapping[gguf_key], transformers_key)
self.assertIsNone(deci_mapping["rope.dimension_count"])
def test_deci_architecture_mapping(self):
"""Test that Deci architectures are mapped to GGUFLlamaConverter."""
from transformers.integrations.ggml import GGUF_TO_FAST_CONVERTERS, GGUFLlamaConverter
self.assertIn("deci", GGUF_TO_FAST_CONVERTERS)
self.assertIn("decilm", GGUF_TO_FAST_CONVERTERS)
self.assertEqual(GGUF_TO_FAST_CONVERTERS["deci"], GGUFLlamaConverter)
self.assertEqual(GGUF_TO_FAST_CONVERTERS["decilm"], GGUFLlamaConverter)
@require_read_token
@unittest.skipUnless(is_gguf_available("0.16.0"), "test requires gguf version >= 0.16.0")
def test_qwen3_q8_0(self):
tokenizer = AutoTokenizer.from_pretrained(self.qwen3_model_id, gguf_file=self.q8_0_qwen3_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.qwen3_model_id,
gguf_file=self.q8_0_qwen3_model_id,
dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt")["input_ids"]
out = model.generate(text, max_new_tokens=10)
EXPECTED_TEXT = "HelloED\nI need to find the value of the"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_qwen3moe_q4_k_m(self):
tokenizer = AutoTokenizer.from_pretrained(self.qwen3moe_model_id, gguf_file=self.q4_k_m_qwen3moe_model_id)
model = AutoModelForCausalLM.from_pretrained(
self.qwen3moe_model_id,
gguf_file=self.q4_k_m_qwen3moe_model_id,
dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt")
out = model.generate(**text, max_new_tokens=10)
EXPECTED_TEXT = "Hello, I am a 20 year old male"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
def test_umt5_encoder_q8_0(self):
"""
Verifies that a UMT5 encoder loads directly from a GGUF file using
UMT5EncoderModel.from_pretrained(...), and the config is correctly UMT5.
"""
model = UMT5EncoderModel.from_pretrained(
self.umt5_encoder_model_id,
gguf_file=self.q8_0_umt5_encoder_model_id,
dtype=torch.float16,
device_map="auto",
)
model.eval()
self.assertIsInstance(model, UMT5EncoderModel)
self.assertIsInstance(model.config, UMT5Config)
self.assertEqual(model.config.model_type, "umt5")
self.assertIn("UMT5EncoderModel", getattr(model.config, "architectures", []))
input_ids = torch.tensor([[1, 2, 3, 4]], dtype=torch.long).to(torch_device)
with torch.no_grad():
outputs = model(input_ids=input_ids)
self.assertTrue(hasattr(outputs, "last_hidden_state"))
self.assertEqual(outputs.last_hidden_state.dim(), 3) # (batch, seq_len, hidden)
EXPECTED_OUTPUT = torch.tensor(
[
[-0.0010, -0.0145, 0.0133],
[-0.0006, 0.1814, 0.1132],
[0.0005, 0.0083, -0.0285],
]
).to(torch_device)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], EXPECTED_OUTPUT, rtol=6e-3, atol=4e-4)
@require_read_token
## to be precise, it currently require upstream gguf-py to be installed as lfm2 is not yet present in gguf 0.17.1
@unittest.skipUnless(is_gguf_available("0.17.0"), "test requires gguf version >= 0.17.0")
def test_lfm2_q4_k_m(self):
tokenizer = AutoTokenizer.from_pretrained("LiquidAI/LFM2-1.2B")
model = AutoModelForCausalLM.from_pretrained(
self.lfm2_model_id,
gguf_file=self.q4_k_m_lfm2_model_id,
dtype=torch.float16,
)
text = tokenizer(self.example_text, return_tensors="pt")["input_ids"]
out = model.generate(text, max_new_tokens=10)
EXPECTED_TEXT = "Hello Atari 2600! es un videoj"
self.assertEqual(tokenizer.decode(out[0], skip_special_tokens=True), EXPECTED_TEXT)
| GgufModelTests |
python | numba__numba | numba/tests/gdb/test_array_arg.py | {
"start": 264,
"end": 1723
} | class ____(TestCase):
def test(self):
@njit(debug=True)
def foo(x):
z = np.ones_like(x) # break here
return x, z
tmp = np.ones(5)
foo(tmp)
driver = GdbMIDriver(__file__)
driver.set_breakpoint(line=15)
driver.run()
driver.check_hit_breakpoint(1)
driver.stack_list_arguments(2)
llvm_intp = f"i{types.intp.bitwidth}"
expect = (
'[frame={level="0",args=[{name="x",type="array(float64, 1d, C) '
f'({{i8*, i8*, {llvm_intp}, {llvm_intp}, double*, '
f'[1 x {llvm_intp}], [1 x {llvm_intp}]}})"}}]}}]'
)
driver.assert_output(expect)
driver.stack_list_variables(1)
# 'z' should be zero-init
expect = ('{name="z",value="{meminfo = 0x0, parent = 0x0, nitems = 0, '
'itemsize = 0, data = 0x0, shape = {0}, strides = {0}}"}')
driver.assert_output(expect)
driver.set_breakpoint(line=16)
driver.cont()
driver.check_hit_breakpoint(2)
driver.stack_list_variables(1)
# 'z' should be populated
expect = (r'^.*\{name="z",value="\{meminfo = 0x[0-9a-f]+ .*, '
r'parent = 0x0, nitems = 5, itemsize = 8, '
r'data = 0x[0-9a-f]+, shape = \{5\}, strides = \{8\}\}.*$')
driver.assert_regex_output(expect)
driver.quit()
if __name__ == '__main__':
unittest.main()
| Test |
python | pennersr__django-allauth | allauth/headless/mfa/inputs.py | {
"start": 1714,
"end": 1802
} | class ____(ReauthenticateWebAuthnForm, inputs.Input):
pass
| ReauthenticateWebAuthnInput |
python | google__pytype | pytype/rewrite/flow/frame_base_test.py | {
"start": 358,
"end": 446
} | class ____(conditions.Condition):
name: str
# pylint: disable=invalid-name
| FakeCondition |
python | doocs__leetcode | solution/1700-1799/1710.Maximum Units on a Truck/Solution.py | {
"start": 0,
"end": 309
} | class ____:
def maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:
ans = 0
for a, b in sorted(boxTypes, key=lambda x: -x[1]):
ans += b * min(truckSize, a)
truckSize -= a
if truckSize <= 0:
break
return ans
| Solution |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/step_function.py | {
"start": 973,
"end": 3981
} | class ____(AwsBaseHook):
"""
Interact with an AWS Step Functions State Machine.
Provide thin wrapper around :external+boto3:py:class:`boto3.client("stepfunctions") <SFN.Client>`.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["client_type"] = "stepfunctions"
super().__init__(*args, **kwargs)
def start_execution(
self,
state_machine_arn: str,
name: str | None = None,
state_machine_input: dict | str | None = None,
is_redrive_execution: bool = False,
) -> str:
"""
Start Execution of the State Machine.
.. seealso::
- :external+boto3:py:meth:`SFN.Client.start_execution`
:param state_machine_arn: AWS Step Function State Machine ARN.
:param is_redrive_execution: Restarts unsuccessful executions of Standard workflows that did not
complete successfully in the last 14 days.
:param name: The name of the execution.
:param state_machine_input: JSON data input to pass to the State Machine.
:return: Execution ARN.
"""
if is_redrive_execution:
if not name:
raise AirflowFailException(
"Execution name is required to start RedriveExecution for %s.", state_machine_arn
)
elements = state_machine_arn.split(":stateMachine:")
execution_arn = f"{elements[0]}:execution:{elements[1]}:{name}"
self.conn.redrive_execution(executionArn=execution_arn)
self.log.info(
"Successfully started RedriveExecution for Step Function State Machine: %s.",
state_machine_arn,
)
return execution_arn
execution_args = {"stateMachineArn": state_machine_arn}
if name is not None:
execution_args["name"] = name
if state_machine_input is not None:
if isinstance(state_machine_input, str):
execution_args["input"] = state_machine_input
elif isinstance(state_machine_input, dict):
execution_args["input"] = json.dumps(state_machine_input)
self.log.info("Executing Step Function State Machine: %s", state_machine_arn)
response = self.conn.start_execution(**execution_args)
return response.get("executionArn")
def describe_execution(self, execution_arn: str) -> dict:
"""
Describe a State Machine Execution.
.. seealso::
- :external+boto3:py:meth:`SFN.Client.describe_execution`
:param execution_arn: ARN of the State Machine Execution.
:return: Dict with execution details.
"""
return self.get_conn().describe_execution(executionArn=execution_arn)
| StepFunctionHook |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/history.py | {
"start": 6924,
"end": 7648
} | class ____(History):
"""
:class:`.History` class that keeps a list of all strings in memory.
In order to prepopulate the history, it's possible to call either
`append_string` for all items or pass a list of strings to `__init__` here.
"""
def __init__(self, history_strings: Sequence[str] | None = None) -> None:
super().__init__()
# Emulating disk storage.
if history_strings is None:
self._storage = []
else:
self._storage = list(history_strings)
def load_history_strings(self) -> Iterable[str]:
yield from self._storage[::-1]
def store_string(self, string: str) -> None:
self._storage.append(string)
| InMemoryHistory |
python | tornadoweb__tornado | tornado/test/httpclient_test.py | {
"start": 34548,
"end": 36111
} | class ____(unittest.TestCase):
def test_headers(self):
request = HTTPRequest("http://example.com", headers={"foo": "bar"})
self.assertEqual(request.headers, {"foo": "bar"})
def test_headers_setter(self):
request = HTTPRequest("http://example.com")
request.headers = {"bar": "baz"} # type: ignore
self.assertEqual(request.headers, {"bar": "baz"})
def test_null_headers_setter(self):
request = HTTPRequest("http://example.com")
request.headers = None # type: ignore
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest("http://example.com", body="foo")
self.assertEqual(request.body, utf8("foo"))
def test_body_setter(self):
request = HTTPRequest("http://example.com")
request.body = "foo" # type: ignore
self.assertEqual(request.body, utf8("foo"))
def test_if_modified_since(self):
http_date = datetime.datetime.now(datetime.timezone.utc)
request = HTTPRequest("http://example.com", if_modified_since=http_date)
self.assertEqual(
request.headers, {"If-Modified-Since": format_timestamp(http_date)}
)
def test_if_modified_since_naive_deprecated(self):
with ignore_deprecation():
http_date = datetime.datetime.utcnow()
request = HTTPRequest("http://example.com", if_modified_since=http_date)
self.assertEqual(
request.headers, {"If-Modified-Since": format_timestamp(http_date)}
)
| HTTPRequestTestCase |
python | doocs__leetcode | solution/2500-2599/2564.Substring XOR Queries/Solution.py | {
"start": 0,
"end": 515
} | class ____:
def substringXorQueries(self, s: str, queries: List[List[int]]) -> List[List[int]]:
d = {}
n = len(s)
for i in range(n):
x = 0
for j in range(32):
if i + j >= n:
break
x = x << 1 | int(s[i + j])
if x not in d:
d[x] = [i, i + j]
if x == 0:
break
return [d.get(first ^ second, [-1, -1]) for first, second in queries]
| Solution |
python | pytorch__pytorch | torch/_higher_order_ops/auto_functionalize.py | {
"start": 2997,
"end": 3245
} | class ____(ViewInfo):
def __init__(self, base_index):
super().__init__(base_index)
def regenerate_view(self, bases_list: list[Tensor]):
return torch.ops.aten.alias.default(bases_list[self.base_index])
@dataclass
| AliasViewInfo |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_Z.py | {
"start": 79,
"end": 1476
} | class ____(Benchmark):
r"""
Zacharov objective function.
This class defines the Zacharov [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Zacharov}}(x) = \sum_{i=1}^{n} x_i^2 + \left ( \frac{1}{2}
\sum_{i=1}^{n} i x_i \right )^2
+ \left ( \frac{1}{2} \sum_{i=1}^{n} i x_i
\right )^4
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [10.0] * self.N))
self.custom_bounds = ([-1, 1], [-1, 1])
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
u = sum(x ** 2)
v = sum(arange(1, self.N + 1) * x)
return u + (0.5 * v) ** 2 + (0.5 * v) ** 4
| Zacharov |
python | celery__celery | celery/worker/consumer/heart.py | {
"start": 156,
"end": 930
} | class ____(bootsteps.StartStopStep):
"""Bootstep sending event heartbeats.
This service sends a ``worker-heartbeat`` message every n seconds.
Note:
Not to be confused with AMQP protocol level heartbeats.
"""
requires = (Events,)
def __init__(self, c,
without_heartbeat=False, heartbeat_interval=None, **kwargs):
self.enabled = not without_heartbeat
self.heartbeat_interval = heartbeat_interval
c.heart = None
super().__init__(c, **kwargs)
def start(self, c):
c.heart = heartbeat.Heart(
c.timer, c.event_dispatcher, self.heartbeat_interval,
)
c.heart.start()
def stop(self, c):
c.heart = c.heart and c.heart.stop()
shutdown = stop
| Heart |
python | django__django | tests/i18n/test_compilation.py | {
"start": 819,
"end": 915
} | class ____(RunInTmpDirMixin, SimpleTestCase):
work_subdir = "commands"
| MessageCompilationTests |
python | PyCQA__pylint | pylint/utils/linterstats.py | {
"start": 905,
"end": 1072
} | class ____(TypedDict):
"""TypedDict to store counts of lines of duplicated code."""
nb_duplicated_lines: int
percent_duplicated_lines: float
| DuplicatedLines |
python | apache__airflow | providers/apache/kafka/src/airflow/providers/apache/kafka/hooks/consume.py | {
"start": 1369,
"end": 2321
} | class ____(KafkaBaseHook):
"""
A hook for creating a Kafka Consumer.
:param kafka_config_id: The connection object to use, defaults to "kafka_default"
:param topics: A list of topics to subscribe to.
"""
def __init__(self, topics: Sequence[str], kafka_config_id=KafkaBaseHook.default_conn_name) -> None:
super().__init__(kafka_config_id=kafka_config_id)
self.topics = topics
def _get_client(self, config) -> Consumer:
config_shallow = config.copy()
if config.get("error_cb") is None:
config_shallow["error_cb"] = error_callback
else:
config_shallow["error_cb"] = import_string(config["error_cb"])
return Consumer(config_shallow)
def get_consumer(self) -> Consumer:
"""Return a Consumer that has been subscribed to topics."""
consumer = self.get_conn
consumer.subscribe(self.topics)
return consumer
| KafkaConsumerHook |
python | getsentry__sentry | tests/sentry/users/api/bases/test_user.py | {
"start": 3767,
"end": 4100
} | class ____(DRFPermissionTestCase):
def test_allows_active_staff(self) -> None:
# The user passed in and the user on the request must be different to check staff.
assert UserAndStaffPermission().has_object_permission(
self.staff_request, APIView(), self.create_user()
)
| UserAndStaffPermissionTest |
python | ray-project__ray | python/ray/serve/_private/grpc_util.py | {
"start": 724,
"end": 6768
} | class ____(Server):
"""Custom gRPC server that will override all service method handlers.
Original implementation see: https://github.com/grpc/grpc/blob/
60c1701f87cacf359aa1ad785728549eeef1a4b0/src/python/grpcio/grpc/aio/_server.py
"""
def __init__(
self,
service_handler_factory: Callable,
*,
extra_options: Optional[List[Tuple[str, str]]] = None,
):
super().__init__(
thread_pool=None,
generic_handlers=(),
interceptors=(),
maximum_concurrent_rpcs=None,
compression=None,
options=DEFAULT_GRPC_SERVER_OPTIONS + (extra_options or []),
)
self.generic_rpc_handlers = []
self.service_handler_factory = service_handler_factory
def add_generic_rpc_handlers(
self, generic_rpc_handlers: Sequence[grpc.GenericRpcHandler]
):
"""Override generic_rpc_handlers before adding to the gRPC server.
This function will override all user defined handlers to have
1. None `response_serializer` so the server can pass back the
raw protobuf bytes to the user.
2. `unary_unary` is always calling the unary function generated via
`self.service_handler_factory`
3. `unary_stream` is always calling the streaming function generated via
`self.service_handler_factory`
"""
serve_rpc_handlers = {}
rpc_handler = generic_rpc_handlers[0]
for service_method, method_handler in rpc_handler._method_handlers.items():
serve_method_handler = method_handler._replace(
response_serializer=None,
unary_unary=self.service_handler_factory(
service_method=service_method,
stream=False,
),
unary_stream=self.service_handler_factory(
service_method=service_method,
stream=True,
),
)
serve_rpc_handlers[service_method] = serve_method_handler
generic_rpc_handlers[0]._method_handlers = serve_rpc_handlers
self.generic_rpc_handlers.append(generic_rpc_handlers)
super().add_generic_rpc_handlers(generic_rpc_handlers)
async def start_grpc_server(
service_handler_factory: Callable,
grpc_options: gRPCOptions,
*,
event_loop: asyncio.AbstractEventLoop,
enable_so_reuseport: bool = False,
) -> asyncio.Task:
"""Start a gRPC server that handles requests with the service handler factory.
Returns a task that blocks until the server exits (e.g., due to error).
"""
from ray.serve._private.default_impl import add_grpc_address
server = gRPCGenericServer(
service_handler_factory,
extra_options=[("grpc.so_reuseport", str(int(enable_so_reuseport)))],
)
add_grpc_address(server, f"[::]:{grpc_options.port}")
# Add built-in gRPC service and user-defined services to the server.
# We pass a mock servicer because the actual implementation will be overwritten
# in the gRPCGenericServer implementation.
mock_servicer = Mock()
for servicer_fn in [
add_RayServeAPIServiceServicer_to_server
] + grpc_options.grpc_servicer_func_callable:
servicer_fn(mock_servicer, server)
await server.start()
return event_loop.create_task(server.wait_for_termination())
def get_grpc_response_status(
exc: BaseException, request_timeout_s: float, request_id: str
) -> ResponseStatus:
if isinstance(exc, TimeoutError):
message = f"Request timed out after {request_timeout_s}s."
return ResponseStatus(
code=grpc.StatusCode.DEADLINE_EXCEEDED,
is_error=True,
message=message,
)
elif isinstance(exc, asyncio.CancelledError):
message = f"Client for request {request_id} disconnected."
return ResponseStatus(
code=grpc.StatusCode.CANCELLED,
is_error=True,
message=message,
)
elif isinstance(exc, BackPressureError):
return ResponseStatus(
code=grpc.StatusCode.RESOURCE_EXHAUSTED,
is_error=True,
message=exc.message,
)
elif isinstance(exc, DeploymentUnavailableError):
if isinstance(exc, RayTaskError):
logger.warning(f"Request failed: {exc}", extra={"log_to_stderr": False})
return ResponseStatus(
code=grpc.StatusCode.UNAVAILABLE,
is_error=True,
message=exc.message,
)
else:
if isinstance(exc, (RayActorError, RayTaskError)):
logger.warning(f"Request failed: {exc}", extra={"log_to_stderr": False})
else:
logger.exception("Request failed due to unexpected error.")
return ResponseStatus(
code=grpc.StatusCode.INTERNAL,
is_error=True,
message=str(exc),
)
def set_grpc_code_and_details(
context: grpc._cython.cygrpc._ServicerContext, status: ResponseStatus
):
# Only the latest code and details will take effect. If the user already
# set them to a truthy value in the context, skip setting them with Serve's
# default values. By default, if nothing is set, the code is 0 and the
# details is "", which both are falsy. So if the user did not set them or
# if they're explicitly set to falsy values, such as None, Serve will
# continue to set them with our default values.
if not context.code():
context.set_code(status.code)
if not context.details():
context.set_details(status.message)
def set_proxy_default_grpc_options(grpc_options) -> gRPCOptions:
grpc_options = deepcopy(grpc_options) or gRPCOptions()
if grpc_options.request_timeout_s or RAY_SERVE_REQUEST_PROCESSING_TIMEOUT_S:
grpc_options.request_timeout_s = (
grpc_options.request_timeout_s or RAY_SERVE_REQUEST_PROCESSING_TIMEOUT_S
)
return grpc_options
| gRPCGenericServer |
python | kamyu104__LeetCode-Solutions | Python/palindrome-pairs.py | {
"start": 1459,
"end": 3209
} | class ____(object):
def palindromePairs(self, words):
"""
:type words: List[str]
:rtype: List[List[int]]
"""
def manacher(s, P):
def preProcess(s):
if not s:
return ['^', '$']
T = ['^']
for c in s:
T += ["#", c]
T += ['#', '$']
return T
T = preProcess(s)
center, right = 0, 0
for i in xrange(1, len(T) - 1):
i_mirror = 2 * center - i
if right > i:
P[i] = min(right - i, P[i_mirror])
else:
P[i] = 0
while T[i + 1 + P[i]] == T[i - 1 - P[i]]:
P[i] += 1
if i + P[i] > right:
center, right = i, i + P[i]
prefix, suffix = collections.defaultdict(list), collections.defaultdict(list)
for i, word in enumerate(words):
P = [0] * (2 * len(word) + 3)
manacher(word, P)
for j in xrange(len(P)):
if j - P[j] == 1:
prefix[word[(j + P[j]) // 2:]].append(i)
if j + P[j] == len(P) - 2:
suffix[word[:(j - P[j]) // 2]].append(i)
res = []
for i, word in enumerate(words):
for j in prefix[word[::-1]]:
if j != i:
res.append([i, j])
for j in suffix[word[::-1]]:
if len(word) != len(words[j]):
res.append([j, i])
return res
# Time: O(n * k^2), n is the number of the words, k is the max length of the words.
# Space: O(n * k)
# Trie solution.
| Solution_TLE |
python | numpy__numpy | benchmarks/benchmarks/bench_io.py | {
"start": 1835,
"end": 2084
} | class ____(Benchmark):
def setup(self):
self.buffer = BytesIO()
np.save(self.buffer, get_squares_()['float32'])
def time_loadnpy_overhead(self):
self.buffer.seek(0, SEEK_SET)
np.load(self.buffer)
| LoadNpyOverhead |
python | kamyu104__LeetCode-Solutions | Python/number-of-valid-subarrays.py | {
"start": 29,
"end": 360
} | class ____(object):
def validSubarrays(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = 0
s = []
for num in nums:
while s and s[-1] > num:
s.pop()
s.append(num)
result += len(s)
return result
| Solution |
python | pandas-dev__pandas | asv_bench/benchmarks/series_methods.py | {
"start": 7063,
"end": 7363
} | class ____:
params = [[10**3, 10**6], ["fast", "slow"], ["bool", "boolean"]]
param_names = ["N", "case", "dtype"]
def setup(self, N, case, dtype):
val = case == "fast"
self.s = Series([val] * N, dtype=dtype)
def time_any(self, N, case, dtype):
self.s.any()
| Any |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 3593,
"end": 3681
} | class ____(models.Model):
title = models.CharField(max_length=64)
| Issue3674ParentModel |
python | numpy__numpy | numpy/f2py/tests/test_return_real.py | {
"start": 1787,
"end": 2698
} | class ____(TestReturnReal):
suffix = ".pyf"
module_name = "c_ext_return_real"
code = """
python module c_ext_return_real
usercode \'\'\'
float t4(float value) { return value; }
void s4(float *t4, float value) { *t4 = value; }
double t8(double value) { return value; }
void s8(double *t8, double value) { *t8 = value; }
\'\'\'
interface
function t4(value)
real*4 intent(c) :: t4,value
end
function t8(value)
real*8 intent(c) :: t8,value
end
subroutine s4(t4,value)
intent(c) s4
real*4 intent(out) :: t4
real*4 intent(c) :: value
end
subroutine s8(t8,value)
intent(c) s8
real*8 intent(out) :: t8
real*8 intent(c) :: value
end
end interface
end python module c_ext_return_real
"""
@pytest.mark.parametrize("name", ["t4", "t8", "s4", "s8"])
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
| TestCReturnReal |
python | joke2k__faker | tests/providers/test_ssn.py | {
"start": 30167,
"end": 30491
} | class ____(unittest.TestCase):
def setUp(self):
self.fake = Faker("en_GB")
Faker.seed(0)
def test_vat_id(self):
for _ in range(100):
assert re.search(
r"^GB\d{3} \d{4} \d{2}(?: \d{3})?$|^GB(?:GD|HA)\d{3}$",
self.fake.vat_id(),
)
| TestEnGB |
python | crytic__slither | slither/utils/output_capture.py | {
"start": 38,
"end": 432
} | class ____(io.StringIO):
"""
I/O implementation which captures output, and optionally mirrors it to the original I/O stream it replaces.
"""
def __init__(self, original_io=None):
super().__init__()
self.original_io = original_io
def write(self, s):
super().write(s)
if self.original_io:
self.original_io.write(s)
| CapturingStringIO |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/combinatory_ports.py | {
"start": 943,
"end": 1136
} | class ____(Base):
def __init__(self, base: Base, x: str) -> None:
self.c: Base = base
self.x: str = x
def method(self):
_test_sink(self.x)
self.c.method()
| C |
python | great-expectations__great_expectations | contrib/cli/great_expectations_contrib/package.py | {
"start": 1314,
"end": 1524
} | class ____(SerializableDictDot):
full_name: str
social_links: Optional[List[SocialLink]] = None
picture: Optional[str] = None
title: Optional[str] = None
bio: Optional[str] = None
| DomainExpert |
python | celery__celery | celery/backends/base.py | {
"start": 33540,
"end": 36056
} | class ____:
def iter_native(self, result, timeout=None, interval=0.5, no_ack=True,
on_message=None, on_interval=None):
self._ensure_not_eager()
results = result.results
if not results:
return
task_ids = set()
for result in results:
if isinstance(result, ResultSet):
yield result.id, result.results
else:
task_ids.add(result.id)
yield from self.get_many(
task_ids,
timeout=timeout, interval=interval, no_ack=no_ack,
on_message=on_message, on_interval=on_interval,
)
def wait_for_pending(self, result, timeout=None, interval=0.5,
no_ack=True, on_message=None, on_interval=None,
callback=None, propagate=True):
self._ensure_not_eager()
if on_message is not None:
raise ImproperlyConfigured(
'Backend does not support on_message callback')
meta = self.wait_for(
result.id, timeout=timeout,
interval=interval,
on_interval=on_interval,
no_ack=no_ack,
)
if meta:
result._maybe_set_cache(meta)
return result.maybe_throw(propagate=propagate, callback=callback)
def wait_for(self, task_id,
timeout=None, interval=0.5, no_ack=True, on_interval=None):
"""Wait for task and return its result.
If the task raises an exception, this exception
will be re-raised by :func:`wait_for`.
Raises:
celery.exceptions.TimeoutError:
If `timeout` is not :const:`None`, and the operation
takes longer than `timeout` seconds.
"""
self._ensure_not_eager()
time_elapsed = 0.0
while 1:
meta = self.get_task_meta(task_id)
if meta['status'] in states.READY_STATES:
return meta
if on_interval:
on_interval()
# avoid hammering the CPU checking status.
time.sleep(interval)
time_elapsed += interval
if timeout and time_elapsed >= timeout:
raise TimeoutError('The operation timed out.')
def add_pending_result(self, result, weak=False):
return result
def remove_pending_result(self, result):
return result
@property
def is_async(self):
return False
| SyncBackendMixin |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/transfers/postgres_to_gcs.py | {
"start": 1315,
"end": 2719
} | class ____:
"""
Inspired by `_PrestoToGCSPrestoCursorAdapter` to keep this consistent.
Decorator for allowing description to be available for postgres cursor in case server side
cursor is used. It doesn't provide other methods except those needed in BaseSQLToGCSOperator,
which is more of a safety feature.
"""
def __init__(self, cursor):
self.cursor = cursor
self.rows = []
self.initialized = False
def __iter__(self):
"""Make the cursor iterable."""
return self
def __next__(self):
"""Fetch next row from the cursor."""
if USE_PSYCOPG3:
if self.rows:
return self.rows.pop()
self.initialized = True
row = self.cursor.fetchone()
if row is None:
raise StopIteration
return row
# psycopg2
if self.rows:
return self.rows.pop()
self.initialized = True
return next(self.cursor)
@property
def description(self):
"""Fetch first row to initialize cursor description when using server side cursor."""
if not self.initialized:
element = self.cursor.fetchone()
if element is not None:
self.rows.append(element)
self.initialized = True
return self.cursor.description
| _PostgresServerSideCursorDecorator |
python | oauthlib__oauthlib | oauthlib/oauth1/rfc5849/errors.py | {
"start": 2100,
"end": 2245
} | class ____(OAuth1Error):
error = 'insecure_transport_protocol'
description = 'Only HTTPS connections are permitted.'
| InsecureTransportError |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 778820,
"end": 779049
} | class ____(VegaLiteSchema):
"""NumericMarkPropDef schema wrapper."""
_schema = {"$ref": "#/definitions/NumericMarkPropDef"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| NumericMarkPropDef |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py | {
"start": 18578,
"end": 20511
} | class ____(AbstractResolver):
"""The thing that performs the actual resolution work."""
base_exception = ResolverException
def resolve(self, requirements, max_rounds=100):
"""Take a collection of constraints, spit out the resolution result.
The return value is a representation to the final resolution result. It
is a tuple subclass with three public members:
* `mapping`: A dict of resolved candidates. Each key is an identifier
of a requirement (as returned by the provider's `identify` method),
and the value is the resolved candidate.
* `graph`: A `DirectedGraph` instance representing the dependency tree.
The vertices are keys of `mapping`, and each edge represents *why*
a particular package is included. A special vertex `None` is
included to represent parents of user-supplied requirements.
* `criteria`: A dict of "criteria" that hold detailed information on
how edges in the graph are derived. Each key is an identifier of a
requirement, and the value is a `Criterion` instance.
The following exceptions may be raised if a resolution cannot be found:
* `ResolutionImpossible`: A resolution cannot be found for the given
combination of requirements. The `causes` attribute of the
exception is a list of (requirement, parent), giving the
requirements that could not be satisfied.
* `ResolutionTooDeep`: The dependency tree is too deeply nested and
the resolver gave up. This is usually caused by a circular
dependency, but you can try to resolve this by increasing the
`max_rounds` argument.
"""
resolution = Resolution(self.provider, self.reporter)
state = resolution.resolve(requirements, max_rounds=max_rounds)
return _build_result(state)
| Resolver |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/validation.py | {
"start": 1435,
"end": 5580
} | class ____(object):
__slots__ = ('_schema', '_ast', '_type_info', '_errors', '_fragments', '_fragment_spreads',
'_recursively_referenced_fragments', '_variable_usages', '_recursive_variable_usages')
def __init__(self, schema, ast, type_info):
self._schema = schema
self._ast = ast
self._type_info = type_info
self._errors = []
self._fragments = None
self._fragment_spreads = {}
self._recursively_referenced_fragments = {}
self._variable_usages = {}
self._recursive_variable_usages = {}
def report_error(self, error):
self._errors.append(error)
def get_errors(self):
return self._errors
def get_schema(self):
return self._schema
def get_variable_usages(self, node):
usages = self._variable_usages.get(node)
if usages is None:
usages = []
sub_visitor = UsageVisitor(usages, self._type_info)
visit(node, TypeInfoVisitor(self._type_info, sub_visitor))
self._variable_usages[node] = usages
return usages
def get_recursive_variable_usages(self, operation):
assert isinstance(operation, OperationDefinition)
usages = self._recursive_variable_usages.get(operation)
if usages is None:
usages = self.get_variable_usages(operation)
fragments = self.get_recursively_referenced_fragments(operation)
for fragment in fragments:
usages.extend(self.get_variable_usages(fragment))
self._recursive_variable_usages[operation] = usages
return usages
def get_recursively_referenced_fragments(self, operation):
assert isinstance(operation, OperationDefinition)
fragments = self._recursively_referenced_fragments.get(operation)
if not fragments:
fragments = []
collected_names = set()
nodes_to_visit = [operation.selection_set]
while nodes_to_visit:
node = nodes_to_visit.pop()
spreads = self.get_fragment_spreads(node)
for spread in spreads:
frag_name = spread.name.value
if frag_name not in collected_names:
collected_names.add(frag_name)
fragment = self.get_fragment(frag_name)
if fragment:
fragments.append(fragment)
nodes_to_visit.append(fragment.selection_set)
self._recursively_referenced_fragments[operation] = fragments
return fragments
def get_fragment_spreads(self, node):
spreads = self._fragment_spreads.get(node)
if not spreads:
spreads = []
sets_to_visit = [node]
while sets_to_visit:
_set = sets_to_visit.pop()
for selection in _set.selections:
if isinstance(selection, FragmentSpread):
spreads.append(selection)
elif selection.selection_set:
sets_to_visit.append(selection.selection_set)
self._fragment_spreads[node] = spreads
return spreads
def get_ast(self):
return self._ast
def get_fragment(self, name):
fragments = self._fragments
if fragments is None:
self._fragments = fragments = {}
for statement in self.get_ast().definitions:
if isinstance(statement, FragmentDefinition):
fragments[statement.name.value] = statement
return fragments.get(name)
def get_type(self):
return self._type_info.get_type()
def get_parent_type(self):
return self._type_info.get_parent_type()
def get_input_type(self):
return self._type_info.get_input_type()
def get_field_def(self):
return self._type_info.get_field_def()
def get_directive(self):
return self._type_info.get_directive()
def get_argument(self):
return self._type_info.get_argument()
| ValidationContext |
python | django__django | django/contrib/postgres/search.py | {
"start": 13993,
"end": 15313
} | class ____(LexemeCombinable, Value):
_output_field = SearchQueryField()
def __init__(
self, value, output_field=None, *, invert=False, prefix=False, weight=None
):
if value == "":
raise ValueError("Lexeme value cannot be empty.")
if not isinstance(value, str):
raise TypeError(
f"Lexeme value must be a string, got {value.__class__.__name__}."
)
if weight is not None and (
not isinstance(weight, str) or weight.lower() not in {"a", "b", "c", "d"}
):
raise ValueError(
f"Weight must be one of 'A', 'B', 'C', and 'D', got {weight!r}."
)
self.prefix = prefix
self.invert = invert
self.weight = weight
super().__init__(value, output_field=output_field)
def as_sql(self, compiler, connection):
param = quote_lexeme(self.value)
label = ""
if self.prefix:
label += "*"
if self.weight:
label += self.weight
if label:
param = f"{param}:{label}"
if self.invert:
param = f"!{param}"
return "%s", (param,)
def __invert__(self):
cloned = self.copy()
cloned.invert = not self.invert
return cloned
| Lexeme |
python | charliermarsh__ruff | crates/ruff_server/resources/test/fixtures/pandas_html.py | {
"start": 3994,
"end": 16735
} | class ____:
"""
Base class for parsers that parse HTML into DataFrames.
Parameters
----------
io : str or file-like
This can be either a string path, a valid URL using the HTTP,
FTP, or FILE protocols or a file-like object.
match : str or regex
The text to match in the document.
attrs : dict
List of HTML <table> element attributes to match.
encoding : str
Encoding to be used by parser
displayed_only : bool
Whether or not items with "display:none" should be ignored
extract_links : {None, "all", "header", "body", "footer"}
Table elements in the specified section(s) with <a> tags will have their
href extracted.
.. versionadded:: 1.5.0
Attributes
----------
io : str or file-like
raw HTML, URL, or file-like object
match : regex
The text to match in the raw HTML
attrs : dict-like
A dictionary of valid table attributes to use to search for table
elements.
encoding : str
Encoding to be used by parser
displayed_only : bool
Whether or not items with "display:none" should be ignored
extract_links : {None, "all", "header", "body", "footer"}
Table elements in the specified section(s) with <a> tags will have their
href extracted.
.. versionadded:: 1.5.0
Notes
-----
To subclass this class effectively you must override the following methods:
* :func:`_build_doc`
* :func:`_attr_getter`
* :func:`_href_getter`
* :func:`_text_getter`
* :func:`_parse_td`
* :func:`_parse_thead_tr`
* :func:`_parse_tbody_tr`
* :func:`_parse_tfoot_tr`
* :func:`_parse_tables`
* :func:`_equals_tag`
See each method's respective documentation for details on their
functionality.
"""
def __init__(
self,
io: FilePath | ReadBuffer[str] | ReadBuffer[bytes],
match: str | Pattern,
attrs: dict[str, str] | None,
encoding: str,
displayed_only: bool,
extract_links: Literal[None, "header", "footer", "body", "all"],
storage_options: StorageOptions = None,
) -> None:
self.io = io
self.match = match
self.attrs = attrs
self.encoding = encoding
self.displayed_only = displayed_only
self.extract_links = extract_links
self.storage_options = storage_options
def parse_tables(self):
"""
Parse and return all tables from the DOM.
Returns
-------
list of parsed (header, body, footer) tuples from tables.
"""
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
return (self._parse_thead_tbody_tfoot(table) for table in tables)
def _attr_getter(self, obj, attr):
"""
Return the attribute value of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
attr : str or unicode
The attribute, such as "colspan"
Returns
-------
str or unicode
The attribute value.
"""
# Both lxml and BeautifulSoup have the same implementation:
return obj.get(attr)
def _href_getter(self, obj) -> str | None:
"""
Return a href if the DOM node contains a child <a> or None.
Parameters
----------
obj : node-like
A DOM node.
Returns
-------
href : str or unicode
The href from the <a> child of the DOM node.
"""
raise AbstractMethodError(self)
def _text_getter(self, obj):
"""
Return the text of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
Returns
-------
text : str or unicode
The text from an individual DOM node.
"""
raise AbstractMethodError(self)
def _parse_td(self, obj):
"""
Return the td elements from a row element.
Parameters
----------
obj : node-like
A DOM <tr> node.
Returns
-------
list of node-like
These are the elements of each row, i.e., the columns.
"""
raise AbstractMethodError(self)
def _parse_thead_tr(self, table):
"""
Return the list of thead row elements from the parsed table element.
Parameters
----------
table : a table element that contains zero or more thead elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tbody_tr(self, table):
"""
Return the list of tbody row elements from the parsed table element.
HTML5 table bodies consist of either 0 or more <tbody> elements (which
only contain <tr> elements) or 0 or more <tr> elements. This method
checks for both structures.
Parameters
----------
table : a table element that contains row elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tfoot_tr(self, table):
"""
Return the list of tfoot row elements from the parsed table element.
Parameters
----------
table : a table element that contains row elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tables(self, document, match, attrs):
"""
Return all tables from the parsed DOM.
Parameters
----------
document : the DOM from which to parse the table element.
match : str or regular expression
The text to search for in the DOM tree.
attrs : dict
A dictionary of table attributes that can be used to disambiguate
multiple tables on a page.
Raises
------
ValueError : `match` does not match any text in the document.
Returns
-------
list of node-like
HTML <table> elements to be parsed into raw data.
"""
raise AbstractMethodError(self)
def _equals_tag(self, obj, tag) -> bool:
"""
Return whether an individual DOM node matches a tag
Parameters
----------
obj : node-like
A DOM node.
tag : str
Tag name to be checked for equality.
Returns
-------
boolean
Whether `obj`'s tag name is `tag`
"""
raise AbstractMethodError(self)
def _build_doc(self):
"""
Return a tree-like object that can be used to iterate over the DOM.
Returns
-------
node-like
The DOM from which to parse the table element.
"""
raise AbstractMethodError(self)
def _parse_thead_tbody_tfoot(self, table_html):
"""
Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
Returns
-------
tuple of (header, body, footer), each a list of list-of-text rows.
Notes
-----
Header and body are lists-of-lists. Top level list is a list of
rows. Each row is a list of str text.
Logic: Use <thead>, <tbody>, <tfoot> elements to identify
header, body, and footer, otherwise:
- Put all rows into body
- Move rows from top of body to header only if
all elements inside row are <th>
- Move rows from bottom of body to footer only if
all elements inside row are <th>
"""
header_rows = self._parse_thead_tr(table_html)
body_rows = self._parse_tbody_tr(table_html)
footer_rows = self._parse_tfoot_tr(table_html)
def row_is_all_th(row):
return all(self._equals_tag(t, "th") for t in self._parse_td(row))
if not header_rows:
# The table has no <thead>. Move the top all-<th> rows from
# body_rows to header_rows. (This is a common case because many
# tables in the wild have no <thead> or <tfoot>
while body_rows and row_is_all_th(body_rows[0]):
header_rows.append(body_rows.pop(0))
header = self._expand_colspan_rowspan(header_rows, section="header")
body = self._expand_colspan_rowspan(body_rows, section="body")
footer = self._expand_colspan_rowspan(footer_rows, section="footer")
return header, body, footer
def _expand_colspan_rowspan(
self, rows, section: Literal["header", "footer", "body"]
) -> list[list]:
"""
Given a list of <tr>s, return a list of text rows.
Parameters
----------
rows : list of node-like
List of <tr>s
section : the section that the rows belong to (header, body or footer).
Returns
-------
list of list
Each returned row is a list of str text, or tuple (text, link)
if extract_links is not None.
Notes
-----
Any cell with ``rowspan`` or ``colspan`` will have its contents copied
to subsequent cells.
"""
all_texts = [] # list of rows, each a list of str
text: str | tuple
remainder: list[
tuple[int, str | tuple, int]
] = [] # list of (index, text, nrows)
for tr in rows:
texts = [] # the output for this row
next_remainder = []
index = 0
tds = self._parse_td(tr)
for td in tds:
# Append texts from previous rows with rowspan>1 that come
# before this <td>
while remainder and remainder[0][0] <= index:
prev_i, prev_text, prev_rowspan = remainder.pop(0)
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
index += 1
# Append the text from this <td>, colspan times
text = _remove_whitespace(self._text_getter(td))
if self.extract_links in ("all", section):
href = self._href_getter(td)
text = (text, href)
rowspan = int(self._attr_getter(td, "rowspan") or 1)
colspan = int(self._attr_getter(td, "colspan") or 1)
for _ in range(colspan):
texts.append(text)
if rowspan > 1:
next_remainder.append((index, text, rowspan - 1))
index += 1
# Append texts from previous rows at the final position
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
# Append rows that only appear because the previous row had non-1
# rowspan
while remainder:
next_remainder = []
texts = []
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text, prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
return all_texts
def _handle_hidden_tables(self, tbl_list, attr_name: str):
"""
Return list of tables, potentially removing hidden elements
Parameters
----------
tbl_list : list of node-like
Type of list elements will vary depending upon parser used
attr_name : str
Name of the accessor for retrieving HTML attributes
Returns
-------
list of node-like
Return type matches `tbl_list`
"""
if not self.displayed_only:
return tbl_list
return [
x
for x in tbl_list
if "display:none"
not in getattr(x, attr_name).get("style", "").replace(" ", "")
]
| _HtmlFrameParser |
python | pytorch__pytorch | test/distributed/tensor/parallel/test_tp_random_state.py | {
"start": 661,
"end": 5506
} | class ____(DTensorTestBase):
def get_tensor_slice(self, idx, n, large_tensor):
shape = large_tensor.shape
assert shape[0] % n == 0
local_shape = [shape[0] // n, shape[1]]
slice_idx = (
slice(idx * local_shape[0], (idx + 1) * local_shape[0]),
slice(local_shape[1]),
)
return large_tensor[slice_idx]
def check_gathered_tensors(self, self_rank, size, gathered_tensors, assertFunc):
for other_rank in range(size):
if self_rank != other_rank:
assertFunc(
self.get_tensor_slice(self_rank, size, gathered_tensors),
self.get_tensor_slice(other_rank, size, gathered_tensors),
)
@with_comms
@skip_if_lt_x_gpu(4)
def test_model_init(self):
dp_size = 2
tp_size = self.world_size // dp_size
mesh_2d = init_device_mesh(
self.device_type, (dp_size, tp_size), mesh_dim_names=("dp", "tp")
)
dp_mesh = mesh_2d["dp"]
tp_mesh = mesh_2d["tp"]
dp_rank = dp_mesh.get_coordinate()[0]
tp_rank = tp_mesh.get_coordinate()[0]
self.assertEqual(dp_rank, self.rank // tp_size)
self.assertEqual(tp_rank, self.rank % tp_size)
for enable_distribute_flag in [True, False]:
# a local model on meta device
model = MLPModule(device="meta")
# the col-wise parallel style shards the weight over tensor dim 0
model_tp = parallelize_module(
model,
tp_mesh,
{
"net1": ColwiseParallel(output_layouts=Replicate()),
"net2": ColwiseParallel(output_layouts=Replicate()),
},
)
# in most cases, the random number generator states is set by data loader
# in the following way:
# - within a tensor parallel group, the RNG is set with the same seed
# - across data parallel groups, the RNG is set with different seeds
torch.get_device_module(self.device_type).manual_seed(0)
# disable/enable parallel RNG feature
if random._rng_tracker:
random._rng_tracker.distribute_region_enabled = enable_distribute_flag
self.assertTrue(model_tp.net1.weight.is_meta)
# initialize the model's local shard
model_tp.to_empty(device=self.device_type)
model_tp.reset_parameters()
# examine that the weights are initialized adhere to DP/TP
for dtensor in [model_tp.net1.weight, model_tp.net2.weight]:
# check within the TP group
# the 1d mesh represents the TP group
_1d_mesh = dtensor.device_mesh
assert _1d_mesh.ndim == 1
self.assertEqual(_1d_mesh, tp_mesh)
tensor_local = dtensor.to_local()
# all-gather local shards
tensor_gather = funcol.all_gather_tensor(
tensor_local,
gather_dim=0,
group=_1d_mesh,
)
self.assertEqual(_1d_mesh.get_coordinate()[0], tp_rank)
# compare local shards within the TP group
def tp_weights_assert(tensor1, tensor2):
if enable_distribute_flag:
# each rank within a TP group shall initialize local weights differently
self.assertNotEqual(tensor1, tensor2)
else:
# without the parallel RNG, weight initialization violates the TP setup:
# each rank within a TP group has the same initial weights
self.assertEqual(tensor1, tensor2)
self.check_gathered_tensors(
tp_rank, tp_size, tensor_gather, tp_weights_assert
)
# check across TP groups
# all-gather local shards
tensor_gather = funcol.all_gather_tensor(
tensor_local,
gather_dim=0,
group=dp_mesh,
)
# compare local shards across TP groups
def dp_weights_assert(tensor1, tensor2):
# local weights shall be initialized the same across TP groups,
# and it doesn't matter whether DTensor's RNG infra is activated since all spmd ranks
# started with the same seed.
self.assertEqual(tensor1, tensor2)
self.check_gathered_tensors(
dp_rank, dp_size, tensor_gather, dp_weights_assert
)
if __name__ == "__main__":
run_tests()
| TensorParallelRandomStateTests |
python | pandas-dev__pandas | pandas/tests/arrays/sparse/test_arithmetics.py | {
"start": 537,
"end": 20316
} | class ____:
def _assert(self, a, b):
# We have to use tm.assert_sp_array_equal. See GH #45126
tm.assert_numpy_array_equal(a, b)
def _check_numeric_ops(self, a, b, a_dense, b_dense, mix: bool, op):
# Check that arithmetic behavior matches non-Sparse Series arithmetic
if isinstance(a_dense, np.ndarray):
expected = op(pd.Series(a_dense), b_dense).values
elif isinstance(b_dense, np.ndarray):
expected = op(a_dense, pd.Series(b_dense)).values
else:
raise NotImplementedError
with np.errstate(invalid="ignore", divide="ignore"):
if mix:
result = op(a, b_dense).to_dense()
else:
result = op(a, b).to_dense()
self._assert(result, expected)
def _check_bool_result(self, res):
assert isinstance(res, SparseArray)
assert isinstance(res.dtype, SparseDtype)
assert res.dtype.subtype == np.bool_
assert isinstance(res.fill_value, bool)
def _check_comparison_ops(self, a, b, a_dense, b_dense):
with np.errstate(invalid="ignore"):
# Unfortunately, trying to wrap the computation of each expected
# value is with np.errstate() is too tedious.
#
# sparse & sparse
self._check_bool_result(a == b)
self._assert((a == b).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b)
self._assert((a != b).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b)
self._assert((a >= b).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b)
self._assert((a <= b).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b)
self._assert((a > b).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b)
self._assert((a < b).to_dense(), a_dense < b_dense)
# sparse & dense
self._check_bool_result(a == b_dense)
self._assert((a == b_dense).to_dense(), a_dense == b_dense)
self._check_bool_result(a != b_dense)
self._assert((a != b_dense).to_dense(), a_dense != b_dense)
self._check_bool_result(a >= b_dense)
self._assert((a >= b_dense).to_dense(), a_dense >= b_dense)
self._check_bool_result(a <= b_dense)
self._assert((a <= b_dense).to_dense(), a_dense <= b_dense)
self._check_bool_result(a > b_dense)
self._assert((a > b_dense).to_dense(), a_dense > b_dense)
self._check_bool_result(a < b_dense)
self._assert((a < b_dense).to_dense(), a_dense < b_dense)
def _check_logical_ops(self, a, b, a_dense, b_dense):
# sparse & sparse
self._check_bool_result(a & b)
self._assert((a & b).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b)
self._assert((a | b).to_dense(), a_dense | b_dense)
# sparse & dense
self._check_bool_result(a & b_dense)
self._assert((a & b_dense).to_dense(), a_dense & b_dense)
self._check_bool_result(a | b_dense)
self._assert((a | b_dense).to_dense(), a_dense | b_dense)
@pytest.mark.parametrize("scalar", [0, 1, 3])
@pytest.mark.parametrize("fill_value", [None, 0, 2])
def test_float_scalar(
self, kind, mix, all_arithmetic_functions, fill_value, scalar, request
):
op = all_arithmetic_functions
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
a = SparseArray(values, kind=kind, fill_value=fill_value)
self._check_numeric_ops(a, scalar, values, scalar, mix, op)
def test_float_scalar_comparison(self, kind):
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
a = SparseArray(values, kind=kind)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = SparseArray(values, kind=kind, fill_value=0)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
a = SparseArray(values, kind=kind, fill_value=2)
self._check_comparison_ops(a, 1, values, 1)
self._check_comparison_ops(a, 0, values, 0)
self._check_comparison_ops(a, 3, values, 3)
def test_float_same_index_without_nans(self, kind, mix, all_arithmetic_functions):
# when sp_index are the same
op = all_arithmetic_functions
values = np.array([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])
rvalues = np.array([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_same_index_with_nans(
self, kind, mix, all_arithmetic_functions, request
):
# when sp_index are the same
op = all_arithmetic_functions
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = SparseArray(values, kind=kind)
b = SparseArray(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_same_index_comparison(self, kind):
# when sp_index are the same
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan])
a = SparseArray(values, kind=kind)
b = SparseArray(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
values = np.array([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0])
rvalues = np.array([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0])
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
def test_float_array(self, kind, mix, all_arithmetic_functions):
op = all_arithmetic_functions
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = SparseArray(values, kind=kind)
b = SparseArray(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind, fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = SparseArray(values, kind=kind, fill_value=1)
b = SparseArray(rvalues, kind=kind, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_array_different_kind(self, mix, all_arithmetic_functions):
op = all_arithmetic_functions
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = SparseArray(values, kind="integer")
b = SparseArray(rvalues, kind="block")
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = SparseArray(values, kind="integer", fill_value=0)
b = SparseArray(rvalues, kind="block")
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = SparseArray(values, kind="integer", fill_value=0)
b = SparseArray(rvalues, kind="block", fill_value=0)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = SparseArray(values, kind="integer", fill_value=1)
b = SparseArray(rvalues, kind="block", fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_float_array_comparison(self, kind):
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan])
a = SparseArray(values, kind=kind)
b = SparseArray(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = SparseArray(values, kind=kind, fill_value=1)
b = SparseArray(rvalues, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
def test_int_array(self, kind, mix, all_arithmetic_functions):
op = all_arithmetic_functions
# have to specify dtype explicitly until fixing GH 667
dtype = np.int64
values = np.array([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
a = SparseArray(values, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype)
b = SparseArray(rvalues, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = SparseArray(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype)
b = SparseArray(rvalues, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = SparseArray(values, fill_value=0, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype)
b = SparseArray(rvalues, fill_value=0, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = SparseArray(values, fill_value=1, dtype=dtype, kind=kind)
assert a.dtype == SparseDtype(dtype, fill_value=1)
b = SparseArray(rvalues, fill_value=2, dtype=dtype, kind=kind)
assert b.dtype == SparseDtype(dtype, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_int_array_comparison(self, kind):
dtype = "int64"
# int32 NI ATM
values = np.array([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype)
rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype)
a = SparseArray(values, dtype=dtype, kind=kind)
b = SparseArray(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = SparseArray(values, dtype=dtype, kind=kind, fill_value=0)
b = SparseArray(rvalues, dtype=dtype, kind=kind)
self._check_comparison_ops(a, b, values, rvalues)
a = SparseArray(values, dtype=dtype, kind=kind, fill_value=0)
b = SparseArray(rvalues, dtype=dtype, kind=kind, fill_value=0)
self._check_comparison_ops(a, b, values, rvalues)
a = SparseArray(values, dtype=dtype, kind=kind, fill_value=1)
b = SparseArray(rvalues, dtype=dtype, kind=kind, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
@pytest.mark.parametrize("fill_value", [True, False, np.nan])
def test_bool_same_index(self, kind, fill_value):
# GH 14000
# when sp_index are the same
values = np.array([True, False, True, True], dtype=np.bool_)
rvalues = np.array([True, False, True, True], dtype=np.bool_)
a = SparseArray(values, kind=kind, dtype=np.bool_, fill_value=fill_value)
b = SparseArray(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
@pytest.mark.parametrize("fill_value", [True, False, np.nan])
def test_bool_array_logical(self, kind, fill_value):
# GH 14000
# when sp_index are the same
values = np.array([True, False, True, False, True, True], dtype=np.bool_)
rvalues = np.array([True, False, False, True, False, True], dtype=np.bool_)
a = SparseArray(values, kind=kind, dtype=np.bool_, fill_value=fill_value)
b = SparseArray(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value)
self._check_logical_ops(a, b, values, rvalues)
def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions, request):
op = all_arithmetic_functions
rdtype = "int64"
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
a = SparseArray(values, kind=kind)
b = SparseArray(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op)
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind, fill_value=0)
assert b.dtype == SparseDtype(rdtype)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
a = SparseArray(values, kind=kind, fill_value=1)
b = SparseArray(rvalues, kind=kind, fill_value=2)
assert b.dtype == SparseDtype(rdtype, fill_value=2)
self._check_numeric_ops(a, b, values, rvalues, mix, op)
def test_mixed_array_comparison(self, kind):
rdtype = "int64"
# int32 NI ATM
values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan])
rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype)
a = SparseArray(values, kind=kind)
b = SparseArray(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_comparison_ops(a, b, values, rvalues)
self._check_comparison_ops(a, b * 0, values, rvalues * 0)
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind)
assert b.dtype == SparseDtype(rdtype)
self._check_comparison_ops(a, b, values, rvalues)
a = SparseArray(values, kind=kind, fill_value=0)
b = SparseArray(rvalues, kind=kind, fill_value=0)
assert b.dtype == SparseDtype(rdtype)
self._check_comparison_ops(a, b, values, rvalues)
a = SparseArray(values, kind=kind, fill_value=1)
b = SparseArray(rvalues, kind=kind, fill_value=2)
assert b.dtype == SparseDtype(rdtype, fill_value=2)
self._check_comparison_ops(a, b, values, rvalues)
def test_xor(self):
s = SparseArray([True, True, False, False])
t = SparseArray([True, False, True, False])
result = s ^ t
sp_index = pd.core.arrays.sparse.IntIndex(4, np.array([0, 1, 2], dtype="int32"))
expected = SparseArray([False, True, True], sparse_index=sp_index)
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize("op", [operator.eq, operator.add])
def test_with_list(op):
arr = SparseArray([0, 1], fill_value=0)
result = op(arr, [0, 1])
expected = op(arr, SparseArray([0, 1]))
tm.assert_sp_array_equal(result, expected)
def test_with_dataframe():
# GH#27910
arr = SparseArray([0, 1], fill_value=0)
df = pd.DataFrame([[1, 2], [3, 4]])
result = arr.__add__(df)
assert result is NotImplemented
def test_with_zerodim_ndarray():
# GH#27910
arr = SparseArray([0, 1], fill_value=0)
result = arr * np.array(2)
expected = arr * 2
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.abs, np.exp])
@pytest.mark.parametrize(
"arr", [SparseArray([0, 0, -1, 1]), SparseArray([None, None, -1, 1])]
)
def test_ufuncs(ufunc, arr):
result = ufunc(arr)
fill_value = ufunc(arr.fill_value)
expected = SparseArray(ufunc(np.asarray(arr)), fill_value=fill_value)
tm.assert_sp_array_equal(result, expected)
@pytest.mark.parametrize(
"a, b",
[
(SparseArray([0, 0, 0]), np.array([0, 1, 2])),
(SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])),
],
)
@pytest.mark.parametrize("ufunc", [np.add, np.greater])
def test_binary_ufuncs(ufunc, a, b):
# can't say anything about fill value here.
result = ufunc(a, b)
expected = ufunc(np.asarray(a), np.asarray(b))
assert isinstance(result, SparseArray)
tm.assert_numpy_array_equal(np.asarray(result), expected)
def test_ndarray_inplace():
sparray = SparseArray([0, 2, 0, 0])
ndarray = np.array([0, 1, 2, 3])
ndarray += sparray
expected = np.array([0, 3, 2, 3])
tm.assert_numpy_array_equal(ndarray, expected)
def test_sparray_inplace():
sparray = SparseArray([0, 2, 0, 0])
ndarray = np.array([0, 1, 2, 3])
sparray += ndarray
expected = SparseArray([0, 3, 2, 3], fill_value=0)
tm.assert_sp_array_equal(sparray, expected)
@pytest.mark.parametrize("cons", [list, np.array, SparseArray])
def test_mismatched_length_cmp_op(cons):
left = SparseArray([True, True])
right = cons([True, True, True])
with pytest.raises(ValueError, match="operands have mismatched length"):
left & right
@pytest.mark.parametrize(
"a, b",
[
([0, 1, 2], [0, 1, 2, 3]),
([0, 1, 2, 3], [0, 1, 2]),
],
)
def test_mismatched_length_arith_op(a, b, all_arithmetic_functions):
op = all_arithmetic_functions
with pytest.raises(AssertionError, match=f"length mismatch: {len(a)} vs. {len(b)}"):
op(SparseArray(a, fill_value=0), np.array(b))
@pytest.mark.parametrize("op", ["add", "sub", "mul", "truediv", "floordiv", "pow"])
@pytest.mark.parametrize("fill_value", [np.nan, 3])
def test_binary_operators(op, fill_value):
op = getattr(operator, op)
data1 = np.random.default_rng(2).standard_normal(20)
data2 = np.random.default_rng(2).standard_normal(20)
data1[::2] = fill_value
data2[::3] = fill_value
first = SparseArray(data1, fill_value=fill_value)
second = SparseArray(data2, fill_value=fill_value)
with np.errstate(all="ignore"):
res = op(first, second)
exp = SparseArray(
op(first.to_dense(), second.to_dense()), fill_value=first.fill_value
)
assert isinstance(res, SparseArray)
tm.assert_almost_equal(res.to_dense(), exp.to_dense())
res2 = op(first, second.to_dense())
assert isinstance(res2, SparseArray)
tm.assert_sp_array_equal(res, res2)
res3 = op(first.to_dense(), second)
assert isinstance(res3, SparseArray)
tm.assert_sp_array_equal(res, res3)
res4 = op(first, 4)
assert isinstance(res4, SparseArray)
# Ignore this if the actual op raises (e.g. pow).
try:
exp = op(first.to_dense(), 4)
exp_fv = op(first.fill_value, 4)
except ValueError:
pass
else:
tm.assert_almost_equal(res4.fill_value, exp_fv)
tm.assert_almost_equal(res4.to_dense(), exp)
| TestSparseArrayArithmetics |
python | huggingface__transformers | tests/models/mamba2/test_modeling_mamba2.py | {
"start": 8645,
"end": 14426
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (Mamba2Model, Mamba2ForCausalLM) if is_torch_available() else ()
has_attentions = False # Mamba does not support attentions
test_missing_keys = False
pipeline_model_mapping = (
{"feature-extraction": Mamba2Model, "text-generation": Mamba2ForCausalLM} if is_torch_available() else {}
)
def setUp(self):
self.model_tester = Mamba2ModelTester(self)
self.config_tester = Mamba2ConfigTester(
self, config_class=Mamba2Config, n_embd=37, common_properties=["hidden_size", "num_hidden_layers"]
)
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
self.assertIsInstance(past_key_values, Mamba2Cache)
intermediate_size = config.expand * config.hidden_size
conv_shape = (
config.num_hidden_layers,
batch_size,
intermediate_size + 2 * config.n_groups * config.state_size,
config.conv_kernel,
)
ssm_shape = (config.num_hidden_layers, batch_size, config.num_heads, config.head_dim, config.state_size)
self.assertEqual(past_key_values.conv_states.shape, conv_shape)
self.assertEqual(past_key_values.ssm_states.shape, ssm_shape)
def test_mamba2_caching(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mamba2_caching(*config_and_inputs)
def test_mamba2_slow_vs_fast_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mamba2_slow_vs_fast_forward(*config_and_inputs)
# This test adjusts n_groups to half the original setting and effectively
# creates a grouped SSD configuration in the mamba2 layers
# See https://github.com/huggingface/transformers/pull/37533/
def test_mamba2_slow_vs_fast_forward_grouped(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
config_and_inputs[0].n_groups //= 2
self.model_tester.create_and_check_mamba2_slow_vs_fast_forward(*config_and_inputs)
@unittest.skip(reason="A large mamba2 would be necessary (and costly) for that")
def test_multi_gpu_data_parallel_forward(self):
pass
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, Mamba2Cache): # MODIFIED PART START
recursive_check(tuple_object.conv_states, dict_object.conv_states)
recursive_check(tuple_object.ssm_states, dict_object.ssm_states)
elif isinstance(tuple_object, (list, tuple)): # MODIFIED PART END
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(tuple_object, dict_object, atol=1e-5),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
@require_torch
@slow
@require_read_token
| Mamba2ModelTest |
python | viewflow__viewflow | viewflow/views/list.py | {
"start": 1279,
"end": 2369
} | class ____(object):
def __init__(self, attr_name):
self.attr_name = attr_name
def get_value(self, obj):
raise NotImplementedError("subclasses must implement this method.")
def header(self):
raise NotImplementedError("subclasses must implement this method")
def column_type(self):
raise NotImplementedError("subclasses must implement this method")
def orderby(self):
raise NotImplementedError("subclasses must implement this method")
def format_value(self, obj, value):
if value is None:
return ""
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, (int, float, decimal.Decimal)):
return formats.number_format(value)
elif isinstance(value, (list, tuple)):
return ", ".join(force_str(v) for v in value)
else:
return force_str(value)
| BaseColumn |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_tensor_test.py | {
"start": 2884,
"end": 80010
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
longMessage = True # Property in unittest.Testcase. pylint: disable=invalid-name
# =============================================================================
# RaggedTensor class docstring examples
# =============================================================================
def testClassDocStringExamples(self):
# From section: "Component Tensors"
rt = RaggedTensor.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
self.assertAllEqual(rt, [[3, 1, 4, 1], [], [5, 9, 2], [6], []])
del rt
# From section: "Alternative Row-Partitioning Schemes"
values = [3, 1, 4, 1, 5, 9, 2, 6]
rt1 = RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8])
rt2 = RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0])
rt3 = RaggedTensor.from_value_rowids(
values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)
rt4 = RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])
rt5 = RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])
for rt in (rt1, rt2, rt3, rt4, rt5):
self.assertAllEqual(rt, [[3, 1, 4, 1], [], [5, 9, 2], [6], []])
del rt1, rt2, rt3, rt4, rt5
# From section: "Multiple Ragged Dimensions"
inner_rt = RaggedTensor.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
outer_rt = RaggedTensor.from_row_splits(
values=inner_rt, row_splits=[0, 3, 3, 5])
self.assertEqual(outer_rt.ragged_rank, 2)
self.assertAllEqual(outer_rt,
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
del inner_rt, outer_rt
# From section: "Multiple Ragged Dimensions"
rt = RaggedTensor.from_nested_row_splits(
flat_values=[3, 1, 4, 1, 5, 9, 2, 6],
nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8]))
self.assertAllEqual(rt, [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
del rt
# From section: "Uniform Inner Dimensions"
rt = RaggedTensor.from_row_splits(
values=array_ops.ones([5, 3]), row_splits=[0, 2, 5])
self.assertAllEqual(
rt, [[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]]])
self.assertEqual(rt.shape.as_list(), [2, None, 3])
del rt
# =============================================================================
# RaggedTensorValue Constructor
# =============================================================================
def testRaggedTensorValueConstruction(self):
values = np.array(b'a b c d e f g'.split())
splits = np.array([0, 2, 5, 6, 6, 7], dtype=np.int64)
splits2 = np.array([0, 3, 5], dtype=np.int64)
# Test construction of a RaggedTensorValue with ragged_rank=1.
rt_value = ragged_tensor_value.RaggedTensorValue(values, splits)
self.assertEqual(rt_value.row_splits.dtype, np.int64)
self.assertEqual(rt_value.shape, (5, None))
self.assertLen(rt_value.nested_row_splits, 1)
self.assertAllEqual(splits, rt_value.row_splits)
self.assertAllEqual(values, rt_value.values)
self.assertAllEqual(splits, rt_value.nested_row_splits[0])
self.assertAllEqual(values, rt_value.flat_values)
# Test construction of a RaggedTensorValue with ragged_rank=2.
rt_value = ragged_tensor_value.RaggedTensorValue(
values=ragged_tensor_value.RaggedTensorValue(values, splits),
row_splits=splits2)
self.assertEqual(rt_value.row_splits.dtype, np.int64)
self.assertEqual(rt_value.shape, (2, None, None))
self.assertLen(rt_value.nested_row_splits, 2)
self.assertAllEqual(splits2, rt_value.row_splits)
self.assertAllEqual(splits, rt_value.values.row_splits)
self.assertAllEqual(splits2, rt_value.nested_row_splits[0])
self.assertAllEqual(splits, rt_value.nested_row_splits[1])
self.assertAllEqual(values, rt_value.values.values)
self.assertAllEqual(values, rt_value.flat_values)
# =============================================================================
# RaggedTensor Constructor (private)
# =============================================================================
def testRaggedTensorConstruction(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
rp = RowPartition.from_row_splits(row_splits)
rt = RaggedTensor(values=values, row_partition=rp, internal=True)
self.assertAllEqual(rt,
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testRaggedTensorConstructionErrors(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
rp = RowPartition.from_row_splits(row_splits)
with self.assertRaisesRegex(ValueError,
'RaggedTensor constructor is private'):
RaggedTensor(values=values, row_partition=rp)
with self.assertRaisesRegex(
TypeError, r'type\(values\) must be one of: Tensor, RaggedTensor'):
RaggedTensor(values=range(7), row_partition=rp, internal=True)
with self.assertRaisesRegex(
TypeError, 'Argument `row_partition` must be a RowPartition'):
RaggedTensor(
values=values, row_partition=[0, 2, 2, 5, 6, 7], internal=True)
# =============================================================================
# RaggedTensor Factory Ops
# =============================================================================
def testFromValueRowIdsWithDerivedNRows(self):
# nrows is known at graph creation time.
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
rt = RaggedTensor.from_value_rowids(values, value_rowids, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [5, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_value_rowids = rt.value_rowids()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids
self.assertAllEqual(rt_value_rowids, value_rowids)
self.assertAllEqual(rt_nrows, 5)
self.assertAllEqual(rt,
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromValueRowIdsWithDerivedNRowsDynamic(self):
# nrows is not known at graph creation time.
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
value_rowids = array_ops.placeholder_with_default(value_rowids, shape=None)
rt = RaggedTensor.from_value_rowids(values, value_rowids, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
if context.executing_eagerly():
self.assertEqual(rt.shape.as_list(), [5, None])
else:
self.assertEqual(rt.shape.as_list(), [None, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_value_rowids = rt.value_rowids()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids
self.assertAllEqual(rt_value_rowids, value_rowids)
self.assertAllEqual(rt_nrows, 5)
self.assertAllEqual(rt,
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromValueRowIdsWithExplicitNRows(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
nrows = constant_op.constant(7, dtypes.int64)
rt = RaggedTensor.from_value_rowids(
values, value_rowids, nrows, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [7, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_value_rowids = rt.value_rowids()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids
self.assertIs(rt_nrows, nrows) # cached_nrows
self.assertAllEqual(
rt, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g'], [], []])
def testFromValueRowIdsWithExplicitNRowsEqualToDefault(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
nrows = constant_op.constant(5, dtypes.int64)
rt = RaggedTensor.from_value_rowids(
values, value_rowids, nrows, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [5, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_value_rowids = rt.value_rowids()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertIs(rt_value_rowids, value_rowids) # cached_value_rowids
self.assertIs(rt_nrows, nrows) # cached_nrows
self.assertAllEqual(rt_value_rowids, value_rowids)
self.assertAllEqual(rt_nrows, nrows)
self.assertAllEqual(rt,
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromValueRowIdsWithEmptyValues(self):
rt = RaggedTensor.from_value_rowids([], [])
rt_nrows = rt.nrows()
self.assertEqual(rt.dtype, dtypes.float32)
self.assertEqual(rt.shape.as_list(), [0, None])
self.assertEqual(rt.ragged_rank, 1)
self.assertEqual(rt.values.shape.as_list(), [0])
self.assertEqual(rt.value_rowids().shape.as_list(), [0])
self.assertAllEqual(rt_nrows, 0)
self.assertAllEqual(rt, [])
def testFromRowSplits(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
rt = RaggedTensor.from_row_splits(values, row_splits, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [5, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_row_splits = rt.row_splits
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertIs(rt_row_splits, row_splits)
self.assertAllEqual(rt_nrows, 5)
self.assertAllEqual(rt,
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromRowSplitsWithDifferentSplitTypes(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
splits1 = [0, 2, 2, 5, 6, 7]
splits2 = np.array([0, 2, 2, 5, 6, 7], np.int64)
splits3 = np.array([0, 2, 2, 5, 6, 7], np.int32)
splits4 = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
splits5 = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int32)
rt1 = RaggedTensor.from_row_splits(values, splits1)
rt2 = RaggedTensor.from_row_splits(values, splits2)
rt3 = RaggedTensor.from_row_splits(values, splits3)
rt4 = RaggedTensor.from_row_splits(values, splits4)
rt5 = RaggedTensor.from_row_splits(values, splits5)
self.assertEqual(rt1.row_splits.dtype, dtypes.int64)
self.assertEqual(rt2.row_splits.dtype, dtypes.int64)
self.assertEqual(rt3.row_splits.dtype, dtypes.int32)
self.assertEqual(rt4.row_splits.dtype, dtypes.int64)
self.assertEqual(rt5.row_splits.dtype, dtypes.int32)
def testFromRowSplitsWithEmptySplits(self):
err_msg = 'row_splits tensor may not be empty'
with self.assertRaisesRegex(ValueError, err_msg):
RaggedTensor.from_row_splits([], [])
def testFromRowStarts(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_starts = constant_op.constant([0, 2, 2, 5, 6], dtypes.int64)
rt = RaggedTensor.from_row_starts(values, row_starts, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [5, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_row_starts = rt.row_starts()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertAllEqual(rt_nrows, 5)
self.assertAllEqual(rt_row_starts, row_starts)
self.assertAllEqual(rt,
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromRowLimits(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_limits = constant_op.constant([2, 2, 5, 6, 7], dtypes.int64)
rt = RaggedTensor.from_row_limits(values, row_limits, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [5, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_row_limits = rt.row_limits()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertAllEqual(rt_nrows, 5)
self.assertAllEqual(rt_row_limits, row_limits)
self.assertAllEqual(rt,
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromRowLengths(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_lengths = constant_op.constant([2, 0, 3, 1, 1], dtypes.int64)
rt = RaggedTensor.from_row_lengths(values, row_lengths, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [5, None])
self.assertEqual(rt.ragged_rank, 1)
rt_values = rt.values
rt_row_lengths = rt.row_lengths()
rt_nrows = rt.nrows()
self.assertIs(rt_values, values)
self.assertIs(rt_row_lengths, row_lengths) # cached_nrows
self.assertAllEqual(rt_nrows, 5)
self.assertAllEqual(rt_row_lengths, row_lengths)
self.assertAllEqual(rt,
[[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
def testFromRowLengthsInt32(self):
rt = RaggedTensor.from_row_lengths([1, 2, 3, 4],
constant_op.constant([1, 0, 3],
dtype=dtypes.int32))
rt2 = RaggedTensor.from_row_lengths(rt, [2, 1, 0])
self.assertAllEqual([2, 1, 0], rt2.row_lengths())
def testFromUniformRowLength(self):
values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
a1 = RaggedTensor.from_uniform_row_length(values, 2)
a2 = RaggedTensor.from_uniform_row_length(values, 2, 8)
self.assertAllEqual(
a1,
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16]])
self.assertAllEqual(a1, a2)
self.assertEqual(a1.shape.as_list(), [8, 2])
self.assertEqual(a2.shape.as_list(), [8, 2])
b1 = RaggedTensor.from_uniform_row_length(a1, 2)
b2 = RaggedTensor.from_uniform_row_length(a1, 2, 4)
self.assertAllEqual(b1, [[[1, 2], [3, 4]], [[5, 6], [7, 8]],
[[9, 10], [11, 12]], [[13, 14], [15, 16]]])
self.assertAllEqual(b1, b2)
self.assertEqual(b1.shape.as_list(), [4, 2, 2])
self.assertEqual(b2.shape.as_list(), [4, 2, 2])
c1 = RaggedTensor.from_uniform_row_length(b1, 2)
c2 = RaggedTensor.from_uniform_row_length(b1, 2, 2)
self.assertAllEqual(c1, [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[9, 10], [11, 12]], [[13, 14], [15, 16]]]])
self.assertAllEqual(c1, c2)
self.assertEqual(c1.shape.as_list(), [2, 2, 2, 2])
self.assertEqual(c2.shape.as_list(), [2, 2, 2, 2])
def testFromUniformRowLengthWithEmptyValues(self):
empty_values = []
a = RaggedTensor.from_uniform_row_length(empty_values, 0, nrows=10)
self.assertEqual(a.shape.as_list(), [10, 0])
b = RaggedTensor.from_uniform_row_length(a, 2)
self.assertEqual(b.shape.as_list(), [5, 2, 0])
# Make sure we avoid divide-by-zero when finding nrows for nvals=rowlen=0.
c = RaggedTensor.from_uniform_row_length(empty_values, 0)
self.assertEqual(c.shape.as_list(), [0, 0])
d = RaggedTensor.from_uniform_row_length(empty_values, 0, nrows=0)
self.assertEqual(d.shape.as_list(), [0, 0])
def testFromUniformRowLengthWithPlaceholders(self):
ph_values = array_ops.placeholder_with_default([1, 2, 3, 4, 5, 6], [None])
ph_rowlen = array_ops.placeholder_with_default(3, None)
rt1 = RaggedTensor.from_uniform_row_length(ph_values, 3)
rt2 = RaggedTensor.from_uniform_row_length(ph_values, ph_rowlen)
rt3 = RaggedTensor.from_uniform_row_length([1, 2, 3, 4, 5, 6], ph_rowlen)
self.assertAllEqual(rt1, [[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(rt2, [[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(rt3, [[1, 2, 3], [4, 5, 6]])
if context.executing_eagerly():
self.assertEqual(rt1.shape.as_list(), [2, 3])
self.assertEqual(rt2.shape.as_list(), [2, 3])
self.assertEqual(rt3.shape.as_list(), [2, 3])
else:
self.assertEqual(rt1.shape.as_list(), [None, 3])
self.assertEqual(rt2.shape.as_list(), [None, None])
self.assertEqual(rt3.shape.as_list(), [None, None])
b = RaggedTensor.from_uniform_row_length(rt1, 2)
self.assertAllEqual(b, [[[1, 2, 3], [4, 5, 6]]])
# Make sure we avoid divide-by-zero when finding nrows for nvals=rowlen=0.
ph_empty_values = array_ops.placeholder_with_default(
array_ops.zeros([0], dtypes.int64), [None])
ph_zero = array_ops.placeholder_with_default(0, [])
c = RaggedTensor.from_uniform_row_length(ph_empty_values, ph_zero)
if context.executing_eagerly():
self.assertEqual(c.shape.as_list(), [0, 0])
else:
self.assertEqual(c.shape.as_list(), [None, None])
def testFromNestedValueRowIdsWithDerivedNRows(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
nested_value_rowids = [
constant_op.constant([0, 0, 1, 3, 3], dtypes.int64),
constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
]
rt = RaggedTensor.from_nested_value_rowids(values, nested_value_rowids)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [4, None, None])
self.assertEqual(rt.ragged_rank, 2)
rt_values = rt.values
rt_value_rowids = rt.value_rowids()
rt_values_values = rt_values.values
rt_values_value_rowids = rt_values.value_rowids()
self.assertIs(rt_values_values, values)
self.assertAllEqual(rt_value_rowids, nested_value_rowids[0])
self.assertAllEqual(rt_values_value_rowids, nested_value_rowids[1])
self.assertAllEqual(
rt, [[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
def testFromNestedRowPartitions(self):
flat_values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
nested_row_splits = [[0, 2, 3, 3, 5], [0, 2, 2, 5, 6, 7]]
nested_row_partition = [
RowPartition.from_row_splits(constant_op.constant(x, dtypes.int64))
for x in nested_row_splits
]
rt = RaggedTensor._from_nested_row_partitions(
flat_values, nested_row_partition, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [4, None, None])
self.assertEqual(rt.ragged_rank, 2)
self.assertAllEqual(
rt, [[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
def testFromNestedValueRowIdsWithExplicitNRows(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
nested_value_rowids = [
constant_op.constant([0, 0, 1, 3, 3, 3], dtypes.int64),
constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
]
nrows = [
constant_op.constant(6, dtypes.int64),
constant_op.constant(6, dtypes.int64)
]
rt = RaggedTensor.from_nested_value_rowids(values, nested_value_rowids,
nrows)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [6, None, None])
self.assertEqual(rt.ragged_rank, 2)
rt_values = rt.values
rt_value_rowids = rt.value_rowids()
rt_nrows = rt.nrows()
rt_values_values = rt_values.values
rt_values_value_rowids = rt_values.value_rowids()
rt_values_nrows = rt_values.nrows()
self.assertIs(rt_values_values, values)
self.assertAllEqual(rt_value_rowids, nested_value_rowids[0])
self.assertAllEqual(rt_values_value_rowids, nested_value_rowids[1])
self.assertAllEqual(rt_nrows, nrows[0])
self.assertAllEqual(rt_values_nrows, nrows[1])
self.assertAllEqual(rt, [[[b'a', b'b'], []], [[b'c', b'd', b'e']], [],
[[b'f'], [b'g'], []], [], []])
def testFromNestedValueRowIdsWithExplicitNRowsMismatch(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
nested_value_rowids = [
constant_op.constant([0, 0, 1, 3, 3, 3], dtypes.int64),
constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
]
nrows = [constant_op.constant(6, dtypes.int64)]
with self.assertRaisesRegex(
ValueError, 'Argument `nested_nrows` must have the same length as '
'argument `nested_value_rowids`'):
RaggedTensor.from_nested_value_rowids(values, nested_value_rowids, nrows)
def testFromNestedValueRowIdsWithNonListInput(self):
with self.assertRaisesRegex(
TypeError, 'Argument `nested_value_rowids` must be a list of Tensors'):
RaggedTensor.from_nested_value_rowids(
[1, 2, 3], constant_op.constant([[0, 1, 2], [0, 1, 2]], dtypes.int64))
with self.assertRaisesRegex(
TypeError, 'Argument `nested_nrows` must be a list of Tensors'):
RaggedTensor.from_nested_value_rowids([1, 2, 3], [[0, 1, 2], [0, 1, 2]],
constant_op.constant([3, 3]))
def testFromNestedRowSplits(self):
flat_values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
nested_row_splits = [
constant_op.constant([0, 2, 3, 3, 5], dtypes.int64),
constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
]
rt = RaggedTensor.from_nested_row_splits(
flat_values, nested_row_splits, validate=False)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [4, None, None])
self.assertEqual(rt.ragged_rank, 2)
rt_values = rt.values
rt_row_splits = rt.row_splits
rt_values_values = rt_values.values
rt_values_row_splits = rt_values.row_splits
self.assertIs(rt_values_values, flat_values)
self.assertIs(rt_row_splits, nested_row_splits[0])
self.assertIs(rt_values_row_splits, nested_row_splits[1])
self.assertAllEqual(
rt, [[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
def testWithRowSplits(self):
flat_values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
nested_row_splits = [
constant_op.constant([0, 2, 3, 3, 5], dtypes.int64),
constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
]
rt = RaggedTensor.from_nested_row_splits(
flat_values, nested_row_splits, validate=False)
rt = rt.with_row_splits_dtype(dtypes.int32)
self.assertEqual(rt.dtype, dtypes.string)
self.assertEqual(rt.shape.as_list(), [4, None, None])
self.assertEqual(rt.ragged_rank, 2)
rt_values = rt.values
rt_row_splits = rt.row_splits
rt_values_values = rt_values.values
rt_values_row_splits = rt_values.row_splits
self.assertAllEqual(rt_values_values, flat_values)
self.assertAllEqual(rt_row_splits, nested_row_splits[0])
self.assertAllEqual(rt_values_row_splits, nested_row_splits[1])
self.assertAllEqual(
rt, [[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
def testFromNestedRowSplitsWithNonListInput(self):
with self.assertRaisesRegex(
TypeError, '`nested_row_splits` must be a list of Tensors'):
RaggedTensor.from_nested_row_splits(
[1, 2], constant_op.constant([[0, 1, 2], [0, 1, 2]], dtypes.int64))
def testFromValueRowIdsWithBadNRows(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
nrows = constant_op.constant(5, dtypes.int64)
with self.assertRaisesRegex(ValueError, r'Expected nrows >= 0; got -2'):
RaggedTensor.from_value_rowids(
values=values,
value_rowids=array_ops.placeholder_with_default(value_rowids, None),
nrows=-2)
with self.assertRaisesRegex(
ValueError, r'Expected nrows >= value_rowids\[-1\] \+ 1; got nrows=2, '
r'value_rowids\[-1\]=4'):
RaggedTensor.from_value_rowids(
values=values, value_rowids=value_rowids, nrows=2)
with self.assertRaisesRegex(
ValueError, r'Expected nrows >= value_rowids\[-1\] \+ 1; got nrows=4, '
r'value_rowids\[-1\]=4'):
RaggedTensor.from_value_rowids(
values=values, value_rowids=value_rowids, nrows=4)
with self.assertRaisesRegex(ValueError, r'Shape \(7, 1\) must have rank 1'):
RaggedTensor.from_value_rowids(
values=values,
value_rowids=array_ops.expand_dims(value_rowids, 1),
nrows=nrows)
with self.assertRaisesRegex(ValueError, r'Shape \(1,\) must have rank 0'):
RaggedTensor.from_value_rowids(
values=values,
value_rowids=value_rowids,
nrows=array_ops.expand_dims(nrows, 0))
def testCondWithTensorsFromValueIds(self):
# b/141166460
rt = RaggedTensor.from_value_rowids([1, 2, 3], [0, 0, 2])
c = array_ops.placeholder_with_default(True, None)
result = cond.cond(c, lambda: rt, lambda: rt)
self.assertAllEqual(rt, result)
def testGraphMismatch(self):
if not context.executing_eagerly():
with ops.Graph().as_default():
values = constant_op.constant([1, 2, 3], dtypes.int64)
with ops.Graph().as_default():
splits = constant_op.constant([0, 2, 3], dtypes.int64)
with self.assertRaisesRegex(ValueError,
'.* must be from the same graph as .*'):
RaggedTensor.from_row_splits(values, splits)
@parameterized.named_parameters([
dict(
testcase_name='Rank0',
tensor='a'),
dict(
testcase_name='Rank1',
tensor=['a', 'b']),
])
def testFromTensorRankError(self, tensor):
with self.assertRaisesRegex(ValueError, 'must be greater than 1'):
RaggedTensor.from_tensor(tensor)
# =============================================================================
# Ragged Value & Row-Partitioning Tensor Accessors
# =============================================================================
def testRaggedTensorAccessors_2d(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
rt1 = RaggedTensor.from_row_splits(values, row_splits)
rt2 = RaggedTensor.from_value_rowids(values, value_rowids)
for rt in [rt1, rt2]:
self.assertAllEqual(
rt, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertAllEqual(rt.values, [b'a', b'b', b'c', b'd', b'e', b'f', b'g'])
self.assertEqual(rt.values.shape.dims[0].value, 7)
self.assertAllEqual(rt.value_rowids(), [0, 0, 2, 2, 2, 3, 4])
self.assertAllEqual(rt.nrows(), 5)
self.assertAllEqual(rt.row_splits, [0, 2, 2, 5, 6, 7])
self.assertAllEqual(rt.row_starts(), [0, 2, 2, 5, 6])
self.assertAllEqual(rt.row_limits(), [2, 2, 5, 6, 7])
self.assertAllEqual(rt.row_lengths(), [2, 0, 3, 1, 1])
self.assertAllEqual(rt.flat_values,
[b'a', b'b', b'c', b'd', b'e', b'f', b'g'])
self.assertLen(rt.nested_row_splits, 1)
self.assertAllEqual(rt.nested_row_splits[0], [0, 2, 2, 5, 6, 7])
def testRaggedTensorAccessors_3d_with_ragged_rank_1(self):
values = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]]
row_splits = constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
value_rowids = constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
row_lengths = constant_op.constant([2, 0, 3, 1, 1])
rt1 = RaggedTensor.from_row_splits(values, row_splits)
rt2 = RaggedTensor.from_value_rowids(values, value_rowids)
rt3 = RaggedTensor.from_row_lengths(values, row_lengths)
for rt in [rt1, rt2, rt3]:
self.assertAllEqual(rt, [[[0, 1], [2, 3]], [], [[4, 5], [6, 7], [8, 9]],
[[10, 11]], [[12, 13]]])
self.assertAllEqual(
rt.values,
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]])
self.assertEqual(rt.values.shape.dims[0].value, 7)
self.assertAllEqual(rt.value_rowids(), [0, 0, 2, 2, 2, 3, 4])
self.assertAllEqual(rt.nrows(), 5)
self.assertAllEqual(rt.row_splits, [0, 2, 2, 5, 6, 7])
self.assertAllEqual(rt.row_starts(), [0, 2, 2, 5, 6])
self.assertAllEqual(rt.row_limits(), [2, 2, 5, 6, 7])
self.assertAllEqual(rt.row_lengths(), [2, 0, 3, 1, 1])
self.assertAllEqual(
rt.row_lengths(axis=2), [[2, 2], [], [2, 2, 2], [2], [2]])
self.assertAllEqual(
rt.flat_values,
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11], [12, 13]])
self.assertLen(rt.nested_row_splits, 1)
self.assertAllEqual(rt.nested_row_splits[0], [0, 2, 2, 5, 6, 7])
self.assertLen(rt.nested_value_rowids(), 1)
self.assertAllEqual(rt.nested_value_rowids()[0], [0, 0, 2, 2, 2, 3, 4])
def testRaggedTensorAccessors_3d_with_ragged_rank_2(self):
values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])
nested_row_splits = [
constant_op.constant([0, 2, 3, 3, 5], dtypes.int64),
constant_op.constant([0, 2, 2, 5, 6, 7], dtypes.int64)
]
nested_value_rowids = [
constant_op.constant([0, 0, 1, 3, 3], dtypes.int64),
constant_op.constant([0, 0, 2, 2, 2, 3, 4], dtypes.int64)
]
rt1 = RaggedTensor.from_nested_row_splits(values, nested_row_splits)
rt2 = RaggedTensor.from_nested_value_rowids(values, nested_value_rowids)
for rt in [rt1, rt2]:
self.assertAllEqual(
rt, [[[b'a', b'b'], []], [[b'c', b'd', b'e']], [], [[b'f'], [b'g']]])
self.assertAllEqual(
rt.values, [[b'a', b'b'], [], [b'c', b'd', b'e'], [b'f'], [b'g']])
self.assertEqual(rt.values.shape.dims[0].value, 5)
self.assertAllEqual(rt.value_rowids(), [0, 0, 1, 3, 3])
self.assertAllEqual(rt.nrows(), 4)
self.assertAllEqual(rt.row_splits, [0, 2, 3, 3, 5])
self.assertAllEqual(rt.row_starts(), [0, 2, 3, 3])
self.assertAllEqual(rt.row_limits(), [2, 3, 3, 5])
self.assertAllEqual(rt.row_lengths(), [2, 1, 0, 2])
self.assertAllEqual(rt.flat_values,
[b'a', b'b', b'c', b'd', b'e', b'f', b'g'])
self.assertLen(rt.nested_row_splits, 2)
self.assertAllEqual(rt.nested_row_splits[0], [0, 2, 3, 3, 5])
self.assertAllEqual(rt.nested_row_splits[1], [0, 2, 2, 5, 6, 7])
self.assertLen(rt.nested_value_rowids(), 2)
self.assertAllEqual(rt.nested_value_rowids()[0], [0, 0, 1, 3, 3])
self.assertAllEqual(rt.nested_value_rowids()[1], [0, 0, 2, 2, 2, 3, 4])
# =============================================================================
# RaggedTensor.shape
# =============================================================================
def testShape(self):
"""Tests for RaggedTensor.shape."""
rt1 = RaggedTensor.from_row_splits(b'a b c d e f g'.split(),
[0, 2, 5, 6, 6, 7])
self.assertEqual(rt1.shape.as_list(), [5, None])
rt2 = RaggedTensor.from_row_splits(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14]],
[0, 2, 5, 6, 6, 7])
self.assertEqual(rt2.shape.as_list(), [5, None, 2])
rt3 = RaggedTensor.from_row_splits(
[[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], [0, 2, 2, 3])
self.assertEqual(rt3.shape.as_list(), [3, None, 2, 2])
rt4 = RaggedTensor.from_row_splits(rt3, [0, 1, 3, 3])
self.assertEqual(rt4.shape.as_list(), [3, None, None, 2, 2])
if not context.executing_eagerly():
rt5 = RaggedTensor.from_row_splits(
array_ops.placeholder(dtype=dtypes.string), [0, 2, 3, 5])
self.assertIsNone(rt5.shape.ndims)
rt6 = RaggedTensor.from_row_splits(
[1, 2, 3], array_ops.placeholder(dtype=dtypes.int64))
self.assertEqual(rt6.shape.as_list(), [None, None])
def testGetShape(self):
rt = RaggedTensor.from_row_splits(b'a b c d e f g'.split(),
[0, 2, 5, 6, 6, 7])
self.assertEqual(rt.shape.as_list(), rt.get_shape().as_list())
# =============================================================================
# RaggedTensor.__str__
# =============================================================================
def testRaggedTensorStr(self):
values = [b'a', b'b', b'c', b'd', b'e', b'f', b'g']
row_splits = [0, 2, 5, 6, 6, 7]
rt = RaggedTensor.from_row_splits(values, row_splits, validate=False)
splits_type = 'int64'
if context.executing_eagerly():
expected_repr = '<tf.RaggedTensor {}>'.format([[b'a', b'b'],
[b'c', b'd', b'e'], [b'f'],
[], [b'g']])
else:
expected_repr = (
'tf.RaggedTensor(values=Tensor("RaggedFromRowSplits/values:0", '
'shape=(7,), dtype=string), '
'row_splits=Tensor('
'"RaggedFromRowSplits/RowPartitionFromRowSplits/row_splits:0",'
' shape=(6,), dtype={}))').format(splits_type)
self.assertEqual(repr(rt), expected_repr)
self.assertEqual(str(rt), expected_repr)
def testRaggedTensorValueStr(self):
values = [b'a', b'b', b'c', b'd', b'e', b'f', b'g']
row_splits = [0, 2, 5, 6, 6, 7]
rt = ragged_tensor_value.RaggedTensorValue(
np.array(values), np.array(row_splits, dtype=np.int64))
expected_str = '<tf.RaggedTensorValue {}>'.format([[b'a', b'b'],
[b'c', b'd', b'e'],
[b'f'], [], [b'g']])
expected_repr = ("tf.RaggedTensorValue(values=array({}, dtype='|S1'), "
'row_splits=array({}))'.format(values, row_splits))
self.assertEqual(' '.join(str(rt).split()), expected_str)
self.assertEqual(' '.join(repr(rt).split()), expected_repr)
def testRaggedTensorStrWithZeroSizeInnerShape(self):
# Tests that b/226112826 is fixed.
if context.executing_eagerly():
rt = RaggedTensor.from_row_lengths(array_ops.zeros([9, 0]), [4, 3, 2])
expected_repr = (
'<tf.RaggedTensor [[[], [], [], []], [[], [], []], [[], []]]>')
self.assertEqual(' '.join(repr(rt).split()), expected_repr)
# =============================================================================
# RaggedTensor.with_values() and RaggedTensor.with_flat_values().
# =============================================================================
def testWithValues(self):
rt1 = ragged_factory_ops.constant([[1, 2], [3, 4, 5], [6], [], [7]])
rt2 = ragged_factory_ops.constant([[[1, 2], [3, 4, 5]], [[6]], [], [[],
[7]]])
rt1_plus_10 = rt1.with_values(rt1.values + 10)
rt2_times_10 = rt2.with_flat_values(rt2.flat_values * 10)
rt1_expanded = rt1.with_values(array_ops.expand_dims(rt1.values, axis=1))
self.assertAllEqual(rt1_plus_10, [[11, 12], [13, 14, 15], [16], [], [17]])
self.assertAllEqual(rt2_times_10,
[[[10, 20], [30, 40, 50]], [[60]], [], [[], [70]]])
self.assertAllEqual(rt1_expanded,
[[[1], [2]], [[3], [4], [5]], [[6]], [], [[7]]])
# =============================================================================
# Session.run
# =============================================================================
def testSessionRun(self):
if context.executing_eagerly():
return
rt1 = ragged_factory_ops.constant([[1, 2, 3], [4]])
rt2 = ragged_factory_ops.constant([[[], [1, 2]], [[3]]])
with self.test_session() as session:
result = session.run({'rt1': rt1, 'rt2': rt2})
self.assertCountEqual(result.keys(), ['rt1', 'rt2'])
self.assertEqual(result['rt1'].to_list(), [[1, 2, 3], [4]])
self.assertEqual(result['rt2'].to_list(), [[[], [1, 2]], [[3]]])
def testSessionRunFeed(self):
if context.executing_eagerly():
return
rt1 = RaggedTensor.from_row_splits(
array_ops.placeholder(dtypes.int32),
array_ops.placeholder(dtypes.int64))
rt2 = RaggedTensor.from_nested_row_splits(
array_ops.placeholder(dtypes.int32), [
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.int64)
])
rt1_feed_val = ragged_factory_ops.constant_value([[1, 2, 3], [4]])
rt2_feed_val = ragged_factory_ops.constant_value([[[], [1, 2]], [[3]]])
with self.test_session() as session:
fetches = {'rt1': rt1, 'rt2': rt2}
feeds = {rt1: rt1_feed_val, rt2: rt2_feed_val}
result = session.run(fetches, feed_dict=feeds)
self.assertCountEqual(result.keys(), ['rt1', 'rt2'])
self.assertEqual(result['rt1'].to_list(), [[1, 2, 3], [4]])
self.assertEqual(result['rt2'].to_list(), [[[], [1, 2]], [[3]]])
def testSessionPartialRunFeed(self):
if context.executing_eagerly():
return
# Placeholder inputs.
a = RaggedTensor.from_row_splits(
array_ops.placeholder(dtypes.int32, shape=[None], name='a.values'),
array_ops.placeholder(dtypes.int64, name='a.row_splits'))
b = RaggedTensor.from_row_splits(
array_ops.placeholder(dtypes.int32, shape=[None], name='b.values'),
array_ops.placeholder(dtypes.int64, name='b.row_splits'))
c = array_ops.placeholder(dtypes.int32, shape=[], name='c')
# Feed values for placeholder inputs.
a_val = ragged_factory_ops.constant_value([[1, 2, 3], [4]])
b_val = ragged_factory_ops.constant_value([[5, 4, 3], [2]])
c_val = 3
# Compute some values.
r1 = ragged_math_ops.reduce_sum(a * b, axis=1)
r2 = ragged_math_ops.reduce_sum(a + c, axis=1)
with self.test_session() as session:
handle = session.partial_run_setup([r1, r2], [a, b, c])
res1 = session.partial_run(handle, r1, feed_dict={a: a_val, b: b_val})
self.assertAllEqual(res1, [22, 8])
res2 = session.partial_run(handle, r2, feed_dict={c: c_val})
self.assertAllEqual(res2, [15, 7])
# Test case for GitHub issue 24679.
def testEagerForLoop(self):
if not context.executing_eagerly():
return
values = [[1., 2.], [3., 4., 5.], [6.]]
r = ragged_factory_ops.constant(values)
i = 0
for elem in r:
self.assertAllEqual(elem, values[i])
i += 1
def testConsumers(self):
if context.executing_eagerly():
return
a = RaggedTensor.from_row_splits(
array_ops.placeholder(dtypes.int32, shape=[None], name='a.values'),
array_ops.placeholder(dtypes.int64, name='a.row_splits'),
validate=False)
ragged_math_ops.reduce_sum(a)
self.assertLen(a.consumers(), 1)
@parameterized.parameters([
{
'descr': 'from_value_rowids',
'factory': RaggedTensor.from_value_rowids,
'test': RaggedTensor.value_rowids,
'values': {
'values': [1, 2, 3, 4, 5, 6],
'value_rowids': [0, 0, 1, 1, 2, 2],
},
'tensor_field': 'value_rowids',
'value_rowids': [0, 1, 2],
'nrows': 10
},
{
'descr': 'from_row_splits',
'factory': RaggedTensor.from_row_splits,
# row_splits is a property, not a function.
'test': (lambda rt: rt.row_splits),
'values': {
'values': [1, 2, 3, 4, 5, 6],
'row_splits': [0, 2, 4, 6],
},
'tensor_field': 'row_splits',
'row_splits': [0, 1, 2, 3]
},
{
'descr': 'from_row_lengths',
'factory': RaggedTensor.from_row_lengths,
'test': RaggedTensor.row_lengths,
'values': {
'values': [1, 2, 3, 4, 5, 6],
'row_lengths': [2, 2, 2],
},
'tensor_field': 'row_lengths',
'row_lengths': [1, 1, 1],
},
# from_row_starts
{
'descr': 'from_row_starts',
'factory': RaggedTensor.from_row_starts,
'test': RaggedTensor.row_starts,
'values': {
'values': [1, 2, 3, 4, 5, 6],
'row_starts': [0, 2, 4]
},
'tensor_field': 'row_starts',
'row_starts': [0, 1, 2]
},
# from_row_limits
{
'descr': 'from_row_limits',
'factory': RaggedTensor.from_row_limits,
'test': RaggedTensor.row_limits,
'values': {
'values': [1, 2, 3, 4, 5, 6],
'row_limits': [2, 4, 6]
},
'tensor_field': 'row_limits',
'row_limits': [3]
},
# from_uniform_row_length
{
'descr': 'from_uniform_row_length',
'factory': RaggedTensor.from_uniform_row_length,
# One cannot extract uniform_row_length or nvals, so we return
# nvals//nrows = uniform_row_length, where nvals = 3
'test': (lambda rt: 3 // (rt.shape[0])),
'values': {
'values': [1, 2, 3, 4, 5, 6],
'uniform_row_length': 2
},
'tensor_field': 'uniform_row_length',
'uniform_row_length': 3
},
])
def testFactoryTypePreference(self, descr, test, factory, values,
tensor_field, **kwargs):
# When input tensors have shape information, some of these errors will be
# detected statically.
def op_cast(k, v):
if k == tensor_field:
return constant_op.constant(v, dtype=dtypes.int32)
else:
return v
value_copy = {k: op_cast(k, v) for k, v in values.items()}
rt = factory(**value_copy)
kw_copy = {k: v for k, v in kwargs.items()}
kw_copy['values'] = rt
rt2 = factory(**kw_copy)
self.assertAllEqual(kwargs[tensor_field], test(rt2))
@parameterized.parameters([
# from_value_rowids
{
'descr': 'bad rank for value_rowids',
'factory': RaggedTensor.from_value_rowids,
'values': [[1, 2], [3, 4]],
'value_rowids': [[1, 2], [3, 4]],
'nrows': 10,
},
{
'descr': 'bad rank for nrows',
'factory': RaggedTensor.from_value_rowids,
'values': [1, 2, 3, 4],
'value_rowids': [1, 2, 3, 4],
'nrows': [10],
},
{
'descr': 'len(values) != len(value_rowids)',
'factory': RaggedTensor.from_value_rowids,
'values': [1, 2, 3, 4],
'value_rowids': [1, 2, 3, 4, 5],
'nrows': 10,
},
{
'descr': 'negative value_rowid',
'factory': RaggedTensor.from_value_rowids,
'values': [1, 2, 3, 4],
'value_rowids': [-5, 2, 3, 4],
'nrows': 10,
},
{
'descr': 'non-monotonic-increasing value_rowid',
'factory': RaggedTensor.from_value_rowids,
'values': [1, 2, 3, 4],
'value_rowids': [4, 3, 2, 1],
'nrows': 10,
},
{
'descr': 'value_rowid > nrows',
'factory': RaggedTensor.from_value_rowids,
'values': [1, 2, 3, 4],
'value_rowids': [1, 2, 3, 4],
'nrows': 2,
},
{
'descr': 'bad rank for values',
'factory': RaggedTensor.from_value_rowids,
'values': 10,
'value_rowids': [1, 2, 3, 4],
'nrows': 10,
},
# from_row_splits
{
'descr': 'bad rank for row_splits',
'factory': RaggedTensor.from_row_splits,
'values': [[1, 2], [3, 4]],
'row_splits': [[1, 2], [3, 4]],
},
{
'descr': 'row_splits[0] != 0',
'factory': RaggedTensor.from_row_splits,
'values': [1, 2, 3, 4],
'row_splits': [2, 3, 4],
},
{
'descr': 'non-monotonic-increasing row_splits',
'factory': RaggedTensor.from_row_splits,
'values': [1, 2, 3, 4],
'row_splits': [0, 3, 2, 4],
},
{
'descr': 'row_splits[0] != nvals',
'factory': RaggedTensor.from_row_splits,
'values': [1, 2, 3, 4],
'row_splits': [0, 2, 3, 5],
},
{
'descr': 'bad rank for values',
'factory': RaggedTensor.from_row_splits,
'values': 10,
'row_splits': [0, 1],
},
# from_row_lengths
{
'descr': 'bad rank for row_lengths',
'factory': RaggedTensor.from_row_lengths,
'values': [1, 2, 3, 4],
'row_lengths': [[1, 2], [1, 0]],
},
{
'descr': 'negative row_lengths',
'factory': RaggedTensor.from_row_lengths,
'values': [1, 2, 3, 4],
'row_lengths': [3, -1, 2],
},
{
'descr': 'sum(row_lengths) != nvals',
'factory': RaggedTensor.from_row_lengths,
'values': [1, 2, 3, 4],
'row_lengths': [2, 4, 2, 8],
},
{
'descr': 'bad rank for values',
'factory': RaggedTensor.from_row_lengths,
'values': 10,
'row_lengths': [0, 1],
},
# from_row_starts
{
'descr': 'bad rank for row_starts',
'factory': RaggedTensor.from_row_starts,
'values': [[1, 2], [3, 4]],
'row_starts': [[1, 2], [3, 4]],
},
{
'descr': 'row_starts[0] != 0',
'factory': RaggedTensor.from_row_starts,
'values': [1, 2, 3, 4],
'row_starts': [2, 3, 4],
},
{
'descr': 'non-monotonic-increasing row_starts',
'factory': RaggedTensor.from_row_starts,
'values': [1, 2, 3, 4],
'row_starts': [0, 3, 2, 4],
},
{
'descr': 'row_starts[0] > nvals',
'factory': RaggedTensor.from_row_starts,
'values': [1, 2, 3, 4],
'row_starts': [0, 2, 3, 5],
},
{
'descr': 'bad rank for values',
'factory': RaggedTensor.from_row_starts,
'values': 10,
'row_starts': [0, 1],
},
# from_row_limits
{
'descr': 'bad rank for row_limits',
'factory': RaggedTensor.from_row_limits,
'values': [[1, 2], [3, 4]],
'row_limits': [[1, 2], [3, 4]],
},
{
'descr': 'row_limits[0] < 0',
'factory': RaggedTensor.from_row_limits,
'values': [1, 2, 3, 4],
'row_limits': [-1, 3, 4],
},
{
'descr': 'non-monotonic-increasing row_limits',
'factory': RaggedTensor.from_row_limits,
'values': [1, 2, 3, 4],
'row_limits': [0, 3, 2, 4],
},
{
'descr': 'row_limits[0] != nvals',
'factory': RaggedTensor.from_row_limits,
'values': [1, 2, 3, 4],
'row_limits': [0, 2, 3, 5],
},
{
'descr': 'bad rank for values',
'factory': RaggedTensor.from_row_limits,
'values': 10,
'row_limits': [0, 1],
},
# from_uniform_row_length
{
'descr': 'rowlen * nrows != nvals (1)',
'factory': RaggedTensor.from_uniform_row_length,
'values': [1, 2, 3, 4, 5],
'uniform_row_length': 3,
},
{
'descr': 'rowlen * nrows != nvals (2)',
'factory': RaggedTensor.from_uniform_row_length,
'values': [1, 2, 3, 4, 5],
'uniform_row_length': 6,
},
{
'descr': 'rowlen * nrows != nvals (3)',
'factory': RaggedTensor.from_uniform_row_length,
'values': [1, 2, 3, 4, 5, 6],
'uniform_row_length': 3,
'nrows': 3,
},
{
'descr': 'rowlen must be a scalar',
'factory': RaggedTensor.from_uniform_row_length,
'values': [1, 2, 3, 4],
'uniform_row_length': [2],
},
{
'descr': 'rowlen must be nonnegative',
'factory': RaggedTensor.from_uniform_row_length,
'values': [1, 2, 3, 4],
'uniform_row_length': -1,
},
])
def testFactoryValidation(self, descr, factory, **kwargs):
# When input tensors have shape information, some of these errors will be
# detected statically.
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
self.evaluate(factory(**kwargs))
# Remove shape information (by wrapping tensors in placeholders), and check
# that we detect the errors when the graph is run.
if not context.executing_eagerly():
def wrap_arg(v):
return array_ops.placeholder_with_default(
constant_op.constant(v, dtype=dtypes.int64),
tensor_shape.TensorShape(None))
kwargs = dict((k, wrap_arg(v)) for (k, v) in kwargs.items())
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(factory(**kwargs))
# =============================================================================
# RaggedTensor Variant conversion
# =============================================================================
@parameterized.named_parameters(
{
'testcase_name': 'Shape_5_none',
'ragged_constant': [[1, 2], [3, 4, 5], [6], [], [7]],
'ragged_rank': 1
}, {
'testcase_name': 'Shape_4_none_2',
'ragged_constant': [[[1, 2]], [], [[3, 4]], []],
'ragged_rank': 1
}, {
'testcase_name': 'Shape_1_none_none',
'ragged_constant': [[[1], [2, 3, 4, 5, 6, 7]], [[]]],
'ragged_rank': 2
})
def testRaggedToVariant(self, ragged_constant, ragged_rank):
rt = ragged_factory_ops.constant(ragged_constant, ragged_rank=ragged_rank)
et = rt._to_variant()
self.assertEqual(et.shape.as_list(), [])
self.assertEqual(et.dtype, dtypes.variant)
@parameterized.parameters(
{
'ragged_constant': [[1, 2], [3, 4, 5], [6], [], [7]],
'ragged_rank': 1,
'num_batched_elems': 5
}, {
'ragged_constant': [[[1, 2]], [], [[3, 4]], []],
'ragged_rank': 1,
'num_batched_elems': 4
}, {
'ragged_constant': [[[1], [2, 3, 4, 5, 6, 7]], [[]]],
'ragged_rank': 2,
'num_batched_elems': 2
})
def testRaggedToBatchedVariant(self, ragged_constant, ragged_rank,
num_batched_elems):
rt = ragged_factory_ops.constant(ragged_constant, ragged_rank=ragged_rank)
et = rt._to_variant(batched_input=True)
self.assertEqual(et.shape.as_list(), [num_batched_elems])
self.assertEqual(et.dtype, dtypes.variant)
@parameterized.parameters(
# 2D test cases.
{
'ragged_constant': [[]],
'ragged_rank': 1,
},
{
'ragged_constant': [[1]],
'ragged_rank': 1,
},
{
'ragged_constant': [[1, 2]],
'ragged_rank': 1,
},
{
'ragged_constant': [[1], [2], [3]],
'ragged_rank': 1,
},
{
'ragged_constant': [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
'ragged_rank': 1,
},
{
'ragged_constant': [[1, 2], [3, 4, 5], [6], [], [7]],
'ragged_rank': 1,
},
# 3D test cases.
{
'ragged_constant': [[[]]],
'ragged_rank': 2,
},
{
'ragged_constant': [[[1]]],
'ragged_rank': 2,
},
{
'ragged_constant': [[[1, 2]]],
'ragged_rank': 2,
},
{
'ragged_constant': [[[1, 2], [3, 4]]],
'ragged_rank': 2,
},
{
'ragged_constant': [[[1, 2]], [[3, 4]], [[5, 6]], [[7, 8]]],
'ragged_rank': 2,
},
{
'ragged_constant': [[[1], [2]], [[3], [4]], [[5], [6]], [[7], [8]]],
'ragged_rank': 2,
},
{
'ragged_constant': [[[1, 2]], [], [[3, 4]], []],
'ragged_rank': 2,
},
# 4D test cases.
{
'ragged_constant': [[[[1, 2], [3, 4]]],
[[[0, 0], [0, 0]], [[5, 6], [7, 8]]], []],
'ragged_rank': 3,
},
# dtype `string`.
{
'ragged_constant': [['a'], ['b'], ['c']],
'ragged_rank': 1,
'dtype': dtypes.string,
},
{
'ragged_constant': [[['a', 'b'], ['c', 'd']]],
'ragged_rank': 2,
'dtype': dtypes.string,
},
{
'ragged_constant': [[[['a', 'b'], ['c', 'd']]],
[[['e', 'f'], ['g', 'h']], [['i', 'j'],
['k', 'l']]], []],
'ragged_rank': 3,
'dtype': dtypes.string,
})
def testVariantRoundTrip(self,
ragged_constant,
ragged_rank,
dtype=dtypes.int32):
rt = ragged_factory_ops.constant(
ragged_constant, ragged_rank=ragged_rank, dtype=dtype)
et = rt._to_variant()
round_trip_rt = RaggedTensor._from_variant(
et, dtype, output_ragged_rank=ragged_rank)
self.assertAllEqual(rt, round_trip_rt)
def testBatchedVariantRoundTripInputRaggedRankInferred(self):
ragged_rank = 1
rt = ragged_factory_ops.constant(
[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]],
ragged_rank=ragged_rank)
batched_variant = rt._to_variant(batched_input=True)
nested_batched_variant = array_ops.reshape(batched_variant, [5, 2])
decoded_rt = RaggedTensor._from_variant(
nested_batched_variant,
dtype=dtypes.int32,
output_ragged_rank=ragged_rank + 1)
expected_rt = ragged_factory_ops.constant([[[0], [1]], [[2], [3]], [[4],
[5]],
[[6], [7]], [[8], [9]]])
self.assertAllEqual(decoded_rt, expected_rt)
def testBatchedVariantRoundTripWithInputRaggedRank(self):
ragged_rank = 1
rt = ragged_factory_ops.constant(
[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]],
ragged_rank=ragged_rank)
batched_variant = rt._to_variant(batched_input=True)
nested_batched_variant = array_ops.reshape(batched_variant, [5, 2])
decoded_rt = RaggedTensor._from_variant(
nested_batched_variant,
dtype=dtypes.int32,
output_ragged_rank=ragged_rank + 1,
input_ragged_rank=ragged_rank - 1)
expected_rt = ragged_factory_ops.constant([[[0], [1]], [[2], [3]], [[4],
[5]],
[[6], [7]], [[8], [9]]])
self.assertAllEqual(decoded_rt, expected_rt)
def testUnbatchVariant(self): # b/141789000
rt = ragged_factory_ops.constant([[1, 2, 3], [4, 5], [], [6, 7, 8, 9]])
batched = rt._to_variant(batched_input=True)
for i in range(4):
row = RaggedTensor._from_variant(
batched[i], dtype=dtypes.int32, output_ragged_rank=0)
self.assertAllEqual(rt[i], row)
def testUnbatchVariantInDataset(self):
rt = ragged_factory_ops.constant([[1, 2, 3], [4, 5], [], [6, 7, 8, 9]])
ds = dataset_ops.Dataset.from_tensor_slices(rt)
if context.executing_eagerly():
for i, value in enumerate(ds):
self.assertAllEqual(rt[i], value)
else:
it = dataset_ops.make_one_shot_iterator(ds)
out = it.get_next()
with self.cached_session() as sess:
for i in range(3):
self.assertAllEqual(sess.run(rt[i]), out)
def testToVariantInvalidParams(self):
self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
r'be rank 1 but is rank 0',
gen_ragged_conversion_ops.ragged_tensor_to_variant,
rt_nested_splits=[0, 1, 2],
rt_dense_values=[0, 1, 2],
batched_input=True)
self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
r'be rank 1 but is rank 2',
gen_ragged_conversion_ops.ragged_tensor_to_variant,
rt_nested_splits=[[[0]], [[1]], [[2]]],
rt_dense_values=[0, 1, 2],
batched_input=True)
def testFromVariantInvalidParams(self):
rt = ragged_factory_ops.constant([[0], [1], [2], [3]])
batched_variant = rt._to_variant(batched_input=True)
nested_batched_variant = array_ops.reshape(batched_variant, [2, 2])
with self.assertRaisesRegex(ValueError,
r'`output_ragged_rank` \(1\) must be equal to'):
RaggedTensor._from_variant(
nested_batched_variant,
dtype=dtypes.int32,
output_ragged_rank=1,
input_ragged_rank=1)
def testUnbatchToTensor(self):
batched = ragged_factory_ops.constant([[0], [1], [2], [3]])
unbatched = [constant_op.constant(x) for x in [[0], [1], [2], [3]]]
batched_spec = type_spec.type_spec_from_value(batched)
# Note that the unbatched_spec is derived from the batched spec, so it can
# add back a ragged instead of a dense tensor.
unbatched_spec = batched_spec._unbatch()
batched_tensor_list = batched_spec._to_batched_tensor_list(batched)
unbatched_tensor_lists = zip(
*[array_ops_stack.unstack(tensor) for tensor in batched_tensor_list])
actual_unbatched = [
batched_spec._unbatch()._from_tensor_list(tensor_list)
for tensor_list in unbatched_tensor_lists]
self.assertLen(actual_unbatched, len(unbatched))
for x in actual_unbatched:
self.assertTrue(unbatched_spec.is_compatible_with(x))
for (actual, expected) in zip(actual_unbatched, unbatched):
self.assertAllEqual(actual, expected)
def testDatasetUnbatchTwice(self):
batched = ragged_factory_ops.constant([[[0], [1], [5]], [[2], [3]]])
ds = dataset_ops.Dataset.from_tensors(batched)
ds2 = ds.unbatch()
ds3 = ds2.unbatch()
if context.executing_eagerly():
value = next(iter(ds3))
self.assertAllEqual([0], value)
def testDatasetUnbatchToScalar(self):
batched = ragged_factory_ops.constant([[0], [1], [2], [3]])
ds = dataset_ops.Dataset.from_tensors(batched)
ds2 = ds.unbatch()
ds3 = ds2.unbatch()
if context.executing_eagerly():
value = next(iter(ds3))
self.assertAllEqual(0, value)
def testBatchToTensor(self):
batched = ragged_factory_ops.constant([[0], [1], [2], [3]])
unbatched = [constant_op.constant(x) for x in [[0], [1], [2], [3]]]
batched_spec = type_spec.type_spec_from_value(batched)
# Note that the unbatched_spec is derived from the batched spec, so it can
# add back a ragged instead of a dense tensor.
unbatched_spec = batched_spec._unbatch()
unbatched_tensor_lists = [unbatched_spec._to_tensor_list(x)
for x in unbatched]
batched_tensor_list = [array_ops_stack.stack(tensors)
for tensors in zip(*unbatched_tensor_lists)]
actual_batched = unbatched_spec._batch(4)._from_tensor_list(
batched_tensor_list)
self.assertAllEqual(actual_batched, batched)
def _testGradient(self, func, x, expected_grad, grad_y=None):
x = ragged_factory_ops.constant(x)
if grad_y is not None:
grad_y = ragged_factory_ops.constant(grad_y)
if context.executing_eagerly():
with backprop.GradientTape() as t:
t.watch(x)
y = func(x)
g = t.gradient(y, x, grad_y)
else:
y = func(x)
g = gradients_impl.gradients(ys=y, xs=x, grad_ys=grad_y)[0]
if expected_grad is None:
self.assertIsNone(g)
else:
g = ragged_tensor.convert_to_tensor_or_ragged_tensor(g)
self.assertAllClose(g, expected_grad)
@parameterized.named_parameters([
dict(
testcase_name='RaggedInput',
func=lambda x: math_ops.reduce_prod(x, axis=1),
x=[[1., 2.], [3.]],
expected=[[2., 1.], [1.]]),
dict(
testcase_name='RaggedOutput',
func=lambda x: ragged_concat_ops.stack([x, x[:1]]),
x=[3., 2.],
expected=[2., 1.]),
dict(
testcase_name='RaggedInputAndOutput',
func=lambda x: array_ops_stack.stack([x, x * x]),
x=[[1., 2.], [3.]],
expected=[[3., 5.], [7.]]),
dict(
testcase_name='RaggedOutputWithGradYs',
func=lambda x: ragged_concat_ops.stack([x, x[:1]]),
x=[3., 2.],
grad_ys=[[1., 1.], [1.]],
expected=[2., 1.]),
dict(
testcase_name='RaggedInputAndOutputWithGradYs',
func=lambda x: array_ops_stack.stack([x, x * x]),
x=[[1., 2.], [3.]],
grad_ys=[[[1., 1.], [1.]], [[1., 1.], [1.]]],
expected=[[3., 5.], [7.]]),
dict(
testcase_name='RaggedRank3',
func=lambda x: ragged_concat_ops.stack([x, (x * x)[:, 1:]]),
x=[[[1., 2.], [3., 4., 5.]], [[6.]]],
expected=[[[1.0, 1.0], [7.0, 9.0, 11.0]], [[1.0]]]),
dict(
testcase_name='RaggedIndexedSlices',
func=lambda x: ragged_gather_ops.gather(x, [0, 2]),
x=[[1., 2.], [3.], [4., 5., 6.]],
expected=[[1., 1.], [0.], [1., 1., 1.]]),
])
def testGradient(self, func, x, expected, grad_ys=None):
self._testGradient(func, x, expected, grad_ys)
def testHigherOrderGradient(self):
x = ragged_factory_ops.constant([[1.0, 2.0], [3.0]])
with backprop.GradientTape() as t2:
t2.watch(x)
with backprop.GradientTape() as t1:
t1.watch(x)
y = x * x * x
dy_dx = t1.gradient(y, x)
d2y_dx2 = t2.gradient(dy_dx, x)
self.assertAllEqual(dy_dx, [[3.0, 12.0], [27.0]])
self.assertAllEqual(d2y_dx2, [[6.0, 12.0], [18.0]])
def testUnconnectedGradient(self):
x = ragged_factory_ops.constant([[1.0, 2.0], [3.0]])
with backprop.GradientTape() as t:
t.watch(x)
y = ragged_factory_ops.constant([[2.0, 4.0], [6.0]])
self.assertIsNone(t.gradient(y, x))
def testStopGradient(self):
def func(x):
y = x * constant_op.constant([[1.], [3.]])
y = y.with_values(array_ops.stop_gradient(y.values))
z = x * y
return math_ops.reduce_sum(z)
self._testGradient(func, [[1., 2.], [3., 4., 5.]],
[[1., 2.], [9., 12., 15.]])
def testStopGradientNoneComponent(self):
def func(x):
y = x * constant_op.constant([[1.], [3.]])
y = y.with_values(array_ops.stop_gradient(y.values))
return y
self._testGradient(func, [[1., 2], [3, 4, 5]], None)
def testRaggedVariantGradients(self):
def func(x):
rt1 = RaggedTensor.from_row_splits(values=x, row_splits=[0, 4, 7, 8])
rt2 = rt1 * [[10], [100], [1000]]
v = rt2._to_variant(batched_input=False)
rt3 = RaggedTensor._from_variant(v, dtype=rt2.dtype, output_ragged_rank=1)
return rt3.flat_values
self._testGradient(func, [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[10., 10., 10., 10., 100., 100., 100., 1000.])
def testRaggedVariantGradientsEmptyRows(self):
def func(x):
rt1 = RaggedTensor.from_row_splits(
values=x, row_splits=[0, 2, 2, 4, 7, 7, 8])
rt2 = rt1 * [[10], [20], [30], [40], [50], [60]]
v = rt2._to_variant(batched_input=False)
rt3 = RaggedTensor._from_variant(v, dtype=rt2.dtype, output_ragged_rank=1)
return rt3.flat_values
self._testGradient(func, [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[10., 10., 30., 30., 40., 40., 40., 60.])
def testRaggedVariantSteps(self):
x = [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0]
rt1 = RaggedTensor.from_row_splits(values=x, row_splits=[0, 4, 7, 8])
rt2 = rt1 * [[10], [100], [1000]]
v = rt2._to_variant(batched_input=False)
rt3 = RaggedTensor._from_variant(v, dtype=rt2.dtype, output_ragged_rank=1)
self.assertAllClose([30., 10., 40., 10., 100., 0., 200., 1000.],
rt3.flat_values)
def testRaggedVariantGradientsBatched(self):
def func(x):
rt1 = RaggedTensor.from_row_splits(values=x, row_splits=[0, 4, 7, 8])
rt2 = rt1 * [[10], [100], [1000]]
v = rt2._to_variant(batched_input=True)
rt3 = RaggedTensor._from_variant(v, dtype=rt2.dtype, output_ragged_rank=1)
return rt3.flat_values
self._testGradient(func, [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[10., 10., 10., 10., 100., 100., 100., 1000.])
def testRaggedVariantGradientsEmptyRowsBatched(self):
def func(x):
rt1 = RaggedTensor.from_row_splits(
values=x, row_splits=[0, 2, 2, 4, 7, 7, 8])
rt2 = rt1 * [[10], [20], [30], [40], [50], [60]]
v = rt2._to_variant(batched_input=True)
rt3 = RaggedTensor._from_variant(v, dtype=rt2.dtype, output_ragged_rank=1)
return rt3.flat_values
self._testGradient(func, [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[10., 10., 30., 30., 40., 40., 40., 60.])
def testRaggedVariantGradientsEmptyOutputBatched(self):
def func(x):
rt1 = RaggedTensor.from_row_splits(
values=x, row_splits=[0, 0, 0, 0, 0, 0, 0])
rt2 = rt1 * [[10], [20], [30], [40], [50], [60]]
v = rt2._to_variant(batched_input=True)
rt3 = RaggedTensor._from_variant(v, dtype=rt2.dtype, output_ragged_rank=1)
return rt3.flat_values
self._testGradient(func, [], [])
def testRaggedVariantGradientsBatchedAndSliced(self):
def func(x, i):
rt1 = RaggedTensor.from_row_splits(values=x, row_splits=[0, 4, 7, 8])
rt2 = rt1 * [[10], [100], [1000]]
v_slice = rt2._to_variant(batched_input=True)[i]
return RaggedTensor._from_variant(
v_slice, dtype=rt2.dtype, output_ragged_rank=0)
self._testGradient(
functools.partial(func, i=0), [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[10., 10., 10., 10., 0., 0., 0., 0.])
self._testGradient(
functools.partial(func, i=1), [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[0., 0., 0., 0., 100., 100., 100., 0.])
self._testGradient(
functools.partial(func, i=2), [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[0., 0., 0., 0., 0., 0., 0., 1000.])
def testRaggedVariantGradientsEmptyRowsBatchedAndSliced(self):
def func(x, i):
rt1 = RaggedTensor.from_row_splits(
values=x, row_splits=[0, 2, 2, 4, 7, 7, 8])
rt2 = rt1 * [[10], [20], [30], [40], [50], [60]]
v_slice = rt2._to_variant(batched_input=True)[i]
return RaggedTensor._from_variant(
v_slice, dtype=rt2.dtype, output_ragged_rank=0)
self._testGradient(
functools.partial(func, i=0), [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[10., 10., 0., 0., 0., 0., 0., 0.])
self._testGradient(
functools.partial(func, i=1), [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[0., 0., 0., 0., 0., 0., 0., 0.])
self._testGradient(
functools.partial(func, i=2), [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[0., 0., 30., 30., 0., 0., 0., 0.])
self._testGradient(
functools.partial(func, i=3), [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[0., 0., 0., 0., 40., 40., 40., 0.])
self._testGradient(
functools.partial(func, i=4), [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[0., 0., 0., 0., 0., 0., 0., 0.])
self._testGradient(
functools.partial(func, i=5), [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[0., 0., 0., 0., 0., 0., 0., 60.])
def testRaggedVariantGradientsRaggedRank0(self):
def func(x):
x2 = x * 2
v = gen_ragged_conversion_ops.ragged_tensor_to_variant(
[], x2, batched_input=False)
return RaggedTensor._from_variant(v, dtype=x2.dtype, output_ragged_rank=0)
self._testGradient(func, [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0])
def testRaggedVariantGradientsRaggedRank3(self):
def func(x):
x2 = x * 2
rt1 = RaggedTensor.from_nested_row_splits(
x2, ([0, 0, 3], [0, 2, 2, 3], [0, 4, 7, 8]))
v = rt1._to_variant(batched_input=False)
rt3 = RaggedTensor._from_variant(v, dtype=x2.dtype, output_ragged_rank=3)
return rt3.flat_values
self._testGradient(func, [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0])
def testRaggedVariantGradientsViaMapFn(self):
rt = RaggedTensor.from_row_splits(
values=[3, 1.0, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 7, 8])
def func(x):
def transform_row(row):
return math_ops.sqrt(
math_ops.reduce_mean(math_ops.square(row * x), keepdims=True))
return math_ops.reduce_sum(map_fn.map_fn(transform_row, rt))
self._testGradient(func, 3.0, 14.653377)
def testRaggedVariantGradientsEmptyRowsViaMapFn(self):
rt = RaggedTensor.from_row_splits(
values=[3, 1.0, 4, 1, 5, 9, 2, 6], row_splits=[0, 2, 2, 4, 7, 7, 8])
def func(x):
def transform_row(row):
return math_ops.sqrt(
math_ops.reduce_mean(math_ops.square(row * x), keepdims=True))
return math_ops.reduce_sum(map_fn.map_fn(transform_row, rt))
self._testGradient(func, 3.0, 17.206844)
def testRaggedVariantGradientsEmptyOutputViaMapFn(self):
rt = RaggedTensor.from_row_splits(
values=[], row_splits=[0, 0, 0, 0])
def func(x):
def transform_row(row):
return math_ops.sqrt(
math_ops.reduce_mean(math_ops.square(row * x), keepdims=True))
return math_ops.reduce_sum(map_fn.map_fn(transform_row, rt))
self._testGradient(func, 3.0, 0.0)
def testRaggedVariantGradientsViaMapFnReduce(self):
def func(x):
rt1 = RaggedTensor.from_row_splits(values=x, row_splits=[0, 4, 7, 8])
return map_fn.map_fn(
math_ops.reduce_max,
rt1,
fn_output_signature=tensor_lib.TensorSpec((), x.dtype))
self._testGradient(func, [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0])
def testRaggedVariantGradientsEmptyRowsViaMapFnReduce(self):
def func(x):
rt1 = RaggedTensor.from_row_splits(
values=x, row_splits=[0, 2, 2, 4, 7, 7, 8])
return map_fn.map_fn(
math_ops.reduce_max,
rt1,
fn_output_signature=tensor_lib.TensorSpec((), x.dtype))
self._testGradient(func, [3.0, 1.0, 4.0, 1.0, 1.0, 0.0, 2.0, 1.0],
[1.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0])
def testRaggedVariantGradientsEmptyOutputViaMapFnReduce(self):
def func(x):
rt1 = RaggedTensor.from_row_splits(
values=x, row_splits=[0, 0, 0, 0])
return map_fn.map_fn(
math_ops.reduce_max,
rt1,
fn_output_signature=tensor_lib.TensorSpec((), x.dtype))
self._testGradient(func, [], [])
def testRaggedVariantGradientsErrors(self):
if context.executing_eagerly():
return
rt = RaggedTensor.from_row_splits([1.0, 2.0], row_splits=[0, 2, 2])
v1 = rt._to_variant()
v2 = array_ops_stack.stack([array_ops_stack.stack([v1])])
y = RaggedTensor._from_variant(v2, rt.dtype, output_ragged_rank=3)
with self.assertRaisesRegex(
ValueError, 'Unable to compute gradient: RaggedTensorToVariant '
'can currently only generate 0D or 1D output.'):
gradients_impl.gradients(ys=y.flat_values, xs=rt.flat_values)
def assertNumpyObjectTensorsRecursivelyEqual(self, a, b, msg):
"""Check that two numpy arrays are equal.
For arrays with dtype=object, check values recursively to see if a and b
are equal. (c.f. `np.array_equal`, which checks dtype=object values using
object identity.)
Args:
a: A numpy array.
b: A numpy array.
msg: Message to display if a != b.
"""
if isinstance(a, np.ndarray) and a.dtype == object:
self.assertEqual(a.dtype, b.dtype, msg)
self.assertEqual(a.shape, b.shape, msg)
self.assertLen(a, len(b), msg)
for a_val, b_val in zip(a, b):
self.assertNumpyObjectTensorsRecursivelyEqual(a_val, b_val, msg)
else:
self.assertAllEqual(a, b, msg)
@parameterized.named_parameters([
('Shape_2_R',
[[1, 2], [3, 4, 5]],
np.array([int32array([1, 2]), int32array([3, 4, 5])], dtype=object)),
('Shape_2_2',
[[1, 2], [3, 4]],
np.array([[1, 2], [3, 4]])),
('Shape_2_R_2',
[[[1, 2], [3, 4]], [[5, 6]]],
np.array([int32array([[1, 2], [3, 4]]), int32array([[5, 6]])],
dtype=object)),
('Shape_3_2_R',
[[[1], []], [[2, 3], [4]], [[], [5, 6, 7]]],
np.array([[int32array([1]), int32array([])],
[int32array([2, 3]), int32array([4])],
[int32array([]), int32array([5, 6, 7])]], dtype=object)),
('Shape_0_R',
ragged_factory_ops.constant_value([], ragged_rank=1, dtype=np.int32),
np.zeros([0, 0], dtype=np.int32)),
('Shape_0_R_2',
ragged_factory_ops.constant_value([], ragged_rank=1,
inner_shape=(2,), dtype=np.int32),
np.zeros([0, 0, 2], dtype=np.int32)),
]) # pyformat: disable
def testRaggedTensorNumpy(self, rt, expected):
if isinstance(rt, list):
rt = ragged_factory_ops.constant(rt, dtype=dtypes.int32)
else:
rt = ragged_tensor.convert_to_tensor_or_ragged_tensor(rt)
if context.executing_eagerly():
actual = rt.numpy()
self.assertNumpyObjectTensorsRecursivelyEqual(
expected, actual, 'Expected %r, got %r' % (expected, actual))
else:
with self.assertRaisesRegex(ValueError, 'only supported in eager mode'):
rt.numpy()
@parameterized.parameters([
([[[1, 2], [3, 4, 5]], [[6]]], 2, None),
([[[1, 2], [3, 4, 5]], [[6]]], 2, [None, None, None]),
([[[1, 2], [3, 4, 5]], [[6]]], 2, [2, None, None]),
([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9]]], 1, None),
([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9]]], 1, [None, None, None]),
([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9]]], 1, [2, None, None]),
([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9]]], 1, [2, None, 3]),
([[[1, 2, 3]]], 1, [1, 1, None]),
([[[1, 2, 3]]], 1, [1, 1, 3]),
])
def testRaggedTensorSetShape(self, rt, rt_ragged_rank, shape):
rt1 = ragged_factory_ops.constant(rt, ragged_rank=rt_ragged_rank)
rt1._set_shape(shape)
rt1.shape.assert_is_compatible_with(shape)
if shape is not None:
self.assertIsNot(rt1.shape.rank, None)
for a, b in zip(rt1.shape, shape):
if b is not None:
self.assertEqual(a, b)
@parameterized.parameters([
([[[1, 2], [3, 4, 5]], [[6]]], 2, None),
([[[1, 2], [3, 4, 5]], [[6]]], 2, [None, None, None]),
([[[1, 2], [3, 4, 5]], [[6]]], 2, [2, None, None]),
([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9]]], 1, None),
([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9]]], 1, [None, None, None]),
([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9]]], 1, [2, None, None]),
([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9]]], 1, [2, None, 3]),
([[[1, 2, 3]]], 1, [1, 1, None]),
([[[1, 2, 3]]], 1, [1, 1, 3]),
])
def testRaggedTensorSetShapeWithPlaceholders(self, rt, rt_ragged_rank, shape):
rt2 = nest.map_structure(
lambda x: array_ops.placeholder_with_default(x, None),
ragged_factory_ops.constant(rt, ragged_rank=rt_ragged_rank),
expand_composites=True)
rt2._set_shape(shape)
rt2.shape.assert_is_compatible_with(shape)
if shape is not None:
self.assertIsNot(rt2.shape.rank, None)
for a, b in zip(rt2.shape, shape):
if b is not None:
self.assertEqual(a, b)
def testRaggedTensorSetShapeUniformRowLength(self):
rt = [[[1], [2], [3]], [[4], [5], [6]]]
rt1 = RaggedTensor.from_tensor(rt, ragged_rank=1)
rt1._set_shape([2, 3, 1])
rt2 = nest.map_structure(
lambda x: array_ops.placeholder_with_default(x, None),
rt1,
expand_composites=True)
rt2._set_shape([2, 3, 1])
def testRaggedTensorSetShapeInconsistentShapeError(self):
rt = RaggedTensor.from_tensor([[[1], [2], [3]], [[4], [5], [6]]],
ragged_rank=1)
self.assertEqual(rt.shape.as_list(), [2, 3, 1])
with self.assertRaises(ValueError):
rt._set_shape([None, None, 5])
with self.assertRaisesRegex(ValueError, 'Inconsistent size'):
rt._set_shape([None, 5, None])
with self.assertRaises(ValueError):
rt._set_shape([5, None, None])
@test_util.run_all_in_graph_and_eager_modes
| RaggedTensorTest |
python | joke2k__faker | tests/providers/test_automotive.py | {
"start": 14380,
"end": 14734
} | class ____(_SimpleAutomotiveTestMixin):
"""Test zh_TW automotive provider methods"""
license_plate_pattern: Pattern = re.compile(
r"([A-Z]{2}-\d{4})|" # prior 2012 v1
r"(\d{4}-[A-Z]{2})|" # prior 2012 v2
r"([A-Z]{3}-\d{4})|" # new format since 2014
r"([A-Z]{3}-\d{3})", # commercial cars since 2012
)
| TestZhTw |
python | google__pytype | pytype/tests/test_typing_namedtuple2.py | {
"start": 5925,
"end": 18408
} | class ____(test_base.BaseTest):
"""Tests for the typing.NamedTuple overlay in Python 3."""
def test_basic_namedtuple(self):
ty = self.Infer("""
import typing
X = typing.NamedTuple("X", [("a", int), ("b", str)])
x = X(1, "hello")
a = x.a
b = x.b
""")
self.assertTypesMatchPytd(
ty,
"""
import typing
from typing import NamedTuple
a: int
b: str
x: X
class X(NamedTuple):
a: int
b: str
""",
)
def test_union_attribute(self):
ty = self.Infer("""
from typing import NamedTuple, Union
X = NamedTuple("X", [("x", Union[bytes, str])])
def foo(x: X):
return x.x
""")
self.assertMultiLineEqual(
pytd_utils.Print(ty.Lookup("foo")),
"def foo(x: X) -> Union[bytes, str]: ...",
)
def test_bad_call(self):
errorlog = self.CheckWithErrors("""
from typing import NamedTuple
E2 = NamedTuple('Employee2', [('name', str), ('id', int)], # invalid-namedtuple-arg[e1] # wrong-keyword-args[e2]
birth=str, gender=bool)
""")
self.assertErrorRegexes(
errorlog,
{
"e1": r"Either list of fields or keywords.*",
"e2": r".*(birth, gender).*NamedTuple",
},
)
def test_bad_attribute(self):
errorlog = self.CheckWithErrors("""
from typing import NamedTuple
class SubCls(NamedTuple): # not-writable[e]
def __init__(self):
pass
""")
self.assertErrorRegexes(errorlog, {"e": r".*'__init__'.*[SubCls]"})
def test_bad_arg_count(self):
errorlog = self.CheckWithErrors("""
from typing import NamedTuple
class SubCls(NamedTuple):
a: int
b: int
cls1 = SubCls(5) # missing-parameter[e]
""")
self.assertErrorRegexes(errorlog, {"e": r"Missing.*'b'.*__new__"})
def test_bad_arg_name(self):
self.InferWithErrors("""
from typing import NamedTuple
class SubCls(NamedTuple): # invalid-namedtuple-arg
_a: int
b: int
cls1 = SubCls(5)
""")
def test_namedtuple_class(self):
self.Check("""
from typing import NamedTuple
class SubNamedTuple(NamedTuple):
a: int
b: str ="123"
c: int = 123
def __repr__(self) -> str:
return "__repr__"
def func():
pass
tuple1 = SubNamedTuple(5)
tuple2 = SubNamedTuple(5, "123")
tuple3 = SubNamedTuple(5, "123", 123)
E1 = NamedTuple('Employee1', name=str, id=int)
E2 = NamedTuple('Employee2', [('name', str), ('id', int)])
""")
def test_baseclass(self):
ty = self.Infer("""
from typing import NamedTuple
class baseClass:
x=5
y=6
class SubNamedTuple(baseClass, NamedTuple):
a: int
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import NamedTuple
class SubNamedTuple(baseClass, NamedTuple):
a: int
class baseClass:
x = ... # type: int
y = ... # type: int
""",
)
def test_fields(self):
self.Check("""
from typing import NamedTuple
class X(NamedTuple):
a: str
b: int
v = X("answer", 42)
a = v.a # type: str
b = v.b # type: int
""")
def test_field_wrong_type(self):
self.CheckWithErrors("""
from typing import NamedTuple
class X(NamedTuple):
a: str
b: int
v = X("answer", 42)
a_int = v.a # type: int # annotation-type-mismatch
""")
def test_unpacking(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import NamedTuple
class X(NamedTuple):
a: str
b: int
""",
)
ty = self.Infer(
"""
import foo
v = None # type: foo.X
a, b = v
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Union
v = ... # type: foo.X
a = ... # type: str
b = ... # type: int
""",
)
def test_bad_unpacking(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import NamedTuple
class X(NamedTuple):
a: str
b: int
""",
)
self.CheckWithErrors(
"""
import foo
v = None # type: foo.X
_, _, too_many = v # bad-unpacking
too_few, = v # bad-unpacking
a: float
b: str
a, b = v # annotation-type-mismatch # annotation-type-mismatch
""",
pythonpath=[d.path],
)
def test_is_tuple_type_and_superclasses(self):
"""Test that a NamedTuple (function syntax) behaves like a tuple."""
self.Check("""
from typing import MutableSequence, NamedTuple, Sequence, Tuple, Union
class X(NamedTuple):
a: int
b: str
a = X(1, "2")
a_tuple = a # type: tuple
a_typing_tuple = a # type: Tuple[int, str]
a_typing_tuple_elipses = a # type: Tuple[Union[int, str], ...]
a_sequence = a # type: Sequence[Union[int, str]]
a_iter = iter(a) # type: tupleiterator[Union[int, str]]
a_first = a[0] # type: int
a_second = a[1] # type: str
a_first_next = next(iter(a)) # We don't know the type through the iter() function
""")
def test_is_not_incorrect_types(self):
self.CheckWithErrors("""
from typing import MutableSequence, NamedTuple, Sequence, Tuple, Union
class X(NamedTuple):
a: int
b: str
x = X(1, "2")
x_wrong_tuple_types = x # type: Tuple[str, str] # annotation-type-mismatch
x_not_a_list = x # type: list # annotation-type-mismatch
x_not_a_mutable_seq = x # type: MutableSequence[Union[int, str]] # annotation-type-mismatch
x_first_wrong_element_type = x[0] # type: str # annotation-type-mismatch
""")
def test_meets_protocol(self):
self.Check("""
from typing import NamedTuple, Protocol
class X(NamedTuple):
a: int
b: str
class IntAndStrHolderVars(Protocol):
a: int
b: str
class IntAndStrHolderProperty(Protocol):
@property
def a(self) -> int:
...
@property
def b(self) -> str:
...
a = X(1, "2")
a_vars_protocol: IntAndStrHolderVars = a
a_property_protocol: IntAndStrHolderProperty = a
""")
def test_does_not_meet_mismatching_protocol(self):
self.CheckWithErrors("""
from typing import NamedTuple, Protocol
class X(NamedTuple):
a: int
b: str
class DualStrHolder(Protocol):
a: str
b: str
class IntAndStrHolderVars_Alt(Protocol):
the_number: int
the_string: str
class IntStrIntHolder(Protocol):
a: int
b: str
c: int
a = X(1, "2")
a_wrong_types: DualStrHolder = a # annotation-type-mismatch
a_wrong_names: IntAndStrHolderVars_Alt = a # annotation-type-mismatch
a_too_many: IntStrIntHolder = a # annotation-type-mismatch
""")
def test_generated_members(self):
ty = self.Infer("""
from typing import NamedTuple
class X(NamedTuple):
a: int
b: str
""")
self.assertTypesMatchPytd(
ty,
("""
from typing import NamedTuple
class X(NamedTuple):
a: int
b: str
"""),
)
def test_namedtuple_with_defaults(self):
ty = self.Infer("""
from typing import NamedTuple
class SubNamedTuple(NamedTuple):
a: int
b: str ="123"
c: int = 123
def __repr__(self) -> str:
return "__repr__"
def func():
pass
X = SubNamedTuple(1, "aaa", 222)
a = X.a
b = X.b
c = X.c
f = X.func
Y = SubNamedTuple(1)
a2 = Y.a
b2 = Y.b
c2 = Y.c
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import NamedTuple
X: SubNamedTuple
a: int
b: str
c: int
Y: SubNamedTuple
a2: int
b2: str
c2: int
class SubNamedTuple(NamedTuple):
a: int
b: str = ...
c: int = ...
def __repr__(self) -> str: ...
def func() -> None: ...
def f() -> None: ...
""",
)
def test_bad_default(self):
errors = self.CheckWithErrors("""
from typing import NamedTuple
class Foo(NamedTuple):
x: str = 0 # annotation-type-mismatch[e]
""")
self.assertErrorRegexes(errors, {"e": r"Annotation: str.*Assignment: int"})
def test_nested_namedtuple(self):
# Guard against a crash when hitting max depth (b/162619036)
self.assertNoCrash(
self.Check,
"""
from typing import NamedTuple
def foo() -> None:
class A(NamedTuple):
x: int
def bar():
foo()
""",
)
def test_generic_namedtuple(self):
ty = self.Infer("""
from typing import Callable, Generic, NamedTuple, TypeVar
def _int_identity(x: int) -> int:
return x
T = TypeVar('T')
class Foo(NamedTuple, Generic[T]):
x: T
y: Callable[[T], T]
foo_int = Foo(x=0, y=_int_identity)
x_out = foo_int.x
y_out = foo_int.y
y_call_out = foo_int.y(2)
foo_str: Foo[str] = Foo(x="hi", y=__any_object__)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Callable, Generic, NamedTuple, TypeVar
def _int_identity(x: int) -> int: ...
T = TypeVar("T")
foo_int = ... # type: Foo[int]
x_out = ... # type: int
y_out = ... # type: Callable[[int], int]
y_call_out = ... # type: int
foo_str = ... # type: Foo[str]
class Foo(NamedTuple, Generic[T]):
x: T
y: Callable[[T], T]
""",
)
def test_bad_typevar(self):
self.CheckWithErrors("""
from typing import Generic, NamedTuple, TypeVar
T = TypeVar('T')
class Foo(NamedTuple):
x: T # invalid-annotation
""")
def test_generic_callable(self):
self.Check("""
from typing import Callable, NamedTuple, TypeVar
T = TypeVar('T')
class Foo(NamedTuple):
f: Callable[[T], T]
assert_type(Foo(f=__any_object__).f(''), str)
""")
def test_reingest(self):
foo_ty = self.Infer("""
from typing import Callable, Generic, NamedTuple, TypeVar
T = TypeVar('T')
class Foo(NamedTuple, Generic[T]):
x: T
class Bar(NamedTuple):
x: Callable[[T], T]
""")
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(foo_ty))
self.Check(
"""
import foo
assert_type(foo.Foo(x=0).x, int)
assert_type(foo.Bar(x=__any_object__).x(0), int)
""",
pythonpath=[d.path],
)
def test_recursive_tuple(self):
"""Regression test for a recursive tuple containing a namedtuple."""
# See b/227506303 for details
self.Check("""
from typing import Any, NamedTuple
A = NamedTuple("A", [("x", Any), ("y", Any)])
def decorator(fn):
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
return wrapper
@decorator
def g(x, y):
nt = A(1, 2)
x = x, nt
y = y, nt
def h():
max(x, y)
""")
def test_override_method(self):
foo_pyi = """
from typing import NamedTuple
class Foo(NamedTuple):
a: float
b: str
def __repr__(self) -> str: ...
"""
with self.DepTree([("foo.pyi", foo_pyi)]):
self.Check("""
import foo
class Bar(foo.Foo):
def __repr__(self):
return super().__repr__()
x = Bar(1, '2')
y = x.__repr__()
""")
def test_empty_namedtuple_iterable(self):
self.Check("""
from typing import NamedTuple, Iterable
class Empty(NamedTuple):
pass
def f(x: Iterable[int]) -> None:
pass
f(Empty())
""")
| NamedTupleTestPy3 |
python | pypa__warehouse | warehouse/utils/security_policy.py | {
"start": 561,
"end": 712
} | class ____(enum.Enum):
BASIC_AUTH = "basic-auth"
SESSION = "session"
MACAROON = "macaroon"
@implementer(ISecurityPolicy)
| AuthenticationMethod |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/take_test.py | {
"start": 3259,
"end": 4612
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(index=[-1, 3, 4])))
def testInvalidIndex(self, index):
dataset = dataset_ops.Dataset.range(10).take(3)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=index))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(index=[-2, 0, 1])))
def testEmptyDataset(self, index):
dataset = dataset_ops.Dataset.from_tensor_slices([]).take(5)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=index))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(count=[-1, 0, 4, 10, 25])))
def testBasic(self, count):
dataset = dataset_ops.Dataset.range(10).take(count)
num_output = min(count, 10) if count != -1 else 10
for i in range(num_output):
self.assertEqual(
self.evaluate(random_access.at(dataset, index=i)), i)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=num_output))
| TakeRandomAccessTest |
python | sympy__sympy | sympy/physics/units/dimensions.py | {
"start": 4649,
"end": 10002
} | class ____(Expr):
"""
This class represent the dimension of a physical quantities.
The ``Dimension`` constructor takes as parameters a name and an optional
symbol.
For example, in classical mechanics we know that time is different from
temperature and dimensions make this difference (but they do not provide
any measure of these quantities.
>>> from sympy.physics.units import Dimension
>>> length = Dimension('length')
>>> length
Dimension(length)
>>> time = Dimension('time')
>>> time
Dimension(time)
Dimensions can be composed using multiplication, division and
exponentiation (by a number) to give new dimensions. Addition and
subtraction is defined only when the two objects are the same dimension.
>>> velocity = length / time
>>> velocity
Dimension(length/time)
It is possible to use a dimension system object to get the dimensionsal
dependencies of a dimension, for example the dimension system used by the
SI units convention can be used:
>>> from sympy.physics.units.systems.si import dimsys_SI
>>> dimsys_SI.get_dimensional_dependencies(velocity)
{Dimension(length, L): 1, Dimension(time, T): -1}
>>> length + length
Dimension(length)
>>> l2 = length**2
>>> l2
Dimension(length**2)
>>> dimsys_SI.get_dimensional_dependencies(l2)
{Dimension(length, L): 2}
"""
_op_priority = 13.0
# XXX: This doesn't seem to be used anywhere...
_dimensional_dependencies = {} # type: ignore
is_commutative = True
is_number = False
# make sqrt(M**2) --> M
is_positive = True
is_real = True
def __new__(cls, name, symbol=None):
if isinstance(name, str):
name = Symbol(name)
else:
name = sympify(name)
if not isinstance(name, Expr):
raise TypeError("Dimension name needs to be a valid math expression")
if isinstance(symbol, str):
symbol = Symbol(symbol)
elif symbol is not None:
assert isinstance(symbol, Symbol)
obj = Expr.__new__(cls, name)
obj._name = name
obj._symbol = symbol
return obj
@property
def name(self):
return self._name
@property
def symbol(self):
return self._symbol
def __str__(self):
"""
Display the string representation of the dimension.
"""
if self.symbol is None:
return "Dimension(%s)" % (self.name)
else:
return "Dimension(%s, %s)" % (self.name, self.symbol)
def __repr__(self):
return self.__str__()
def __neg__(self):
return self
def __add__(self, other):
from sympy.physics.units.quantities import Quantity
other = sympify(other)
if isinstance(other, Basic):
if other.has(Quantity):
raise TypeError("cannot sum dimension and quantity")
if isinstance(other, Dimension) and self == other:
return self
return super().__add__(other)
return self
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
# there is no notion of ordering (or magnitude) among dimension,
# subtraction is equivalent to addition when the operation is legal
return self + other
def __rsub__(self, other):
# there is no notion of ordering (or magnitude) among dimension,
# subtraction is equivalent to addition when the operation is legal
return self + other
def __pow__(self, other):
return self._eval_power(other)
def _eval_power(self, other):
other = sympify(other)
return Dimension(self.name**other)
def __mul__(self, other):
from sympy.physics.units.quantities import Quantity
if isinstance(other, Basic):
if other.has(Quantity):
raise TypeError("cannot sum dimension and quantity")
if isinstance(other, Dimension):
return Dimension(self.name*other.name)
if not other.free_symbols: # other.is_number cannot be used
return self
return super().__mul__(other)
return self
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self*Pow(other, -1)
def __rtruediv__(self, other):
return other * pow(self, -1)
@classmethod
def _from_dimensional_dependencies(cls, dependencies):
return reduce(lambda x, y: x * y, (
d**e for d, e in dependencies.items()
), 1)
def has_integer_powers(self, dim_sys):
"""
Check if the dimension object has only integer powers.
All the dimension powers should be integers, but rational powers may
appear in intermediate steps. This method may be used to check that the
final result is well-defined.
"""
return all(dpow.is_Integer for dpow in dim_sys.get_dimensional_dependencies(self).values())
# Create dimensions according to the base units in MKSA.
# For other unit systems, they can be derived by transforming the base
# dimensional dependency dictionary.
| Dimension |
python | pytorch__pytorch | torch/_higher_order_ops/schema.py | {
"start": 9329,
"end": 11223
} | class ____(torch._C.FunctionSchema):
def __init__(
self,
name: str,
overload_name: str,
arguments: list[torch._C.Argument],
returns: list[torch._C.Argument],
is_vararg: bool,
is_varret: bool,
schema_tree_spec: Optional[pytree.TreeSpec],
):
self.tree_spec = schema_tree_spec
self.is_vararg = is_vararg
self.is_varret = is_varret
super().__init__(
name,
overload_name,
arguments,
returns,
self.is_vararg,
self.is_varret,
)
def __deepcopy__(self, memo: Any) -> "HopSchema":
# Need to additionally copy the tree_spec since
# it's not a member of torch._C.FunctionSchema
return HopSchema(
self.name,
self.overload_name,
self.arguments,
self.returns,
self.is_vararg,
self.is_varret,
copy.deepcopy(self.tree_spec),
)
def find_hop_schema(
gm: torch.fx.GraphModule, target: Target
) -> list[torch._C.FunctionSchema]:
schemas = []
for node in gm.graph.find_nodes(op="call_function", target=target):
def _get_example_value(node: torch.fx.Node) -> Any:
if node.op == "get_attr":
assert isinstance(node.target, str)
return getattr(gm, node.target)
else:
return (
node.meta["example_value"]
if "example_value" in node.meta
else node.meta["val"]
)
fake_args, fake_kwargs = pytree.tree_map_only(
torch.fx.Node,
_get_example_value,
(node.args, node.kwargs),
)
schema = node.target.gen_schema(*fake_args, **fake_kwargs)
schemas.append(schema)
return schemas
| HopSchema |
python | tensorflow__tensorflow | tensorflow/core/function/capture/by_ref_capture_test.py | {
"start": 1090,
"end": 3306
} | class ____(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(val_type=[int, constant_op.constant]))
def test_direct_capture_mutation(self, val_type):
x = val_type(1)
@def_function.function
def f():
graph = ops.get_default_graph()
cap_x = graph._experimental_capture_side_input_by_ref("x", lambda: x)
return cap_x + 1
self.assertEqual(f(), 2)
x = val_type(2)
self.assertEqual(f(), 3)
@unittest.skip("By ref capture API does not work for nested tf.function.")
def test_capture_in_nested_function(self):
x = constant_op.constant(1)
@def_function.function
def f():
graph = ops.get_default_graph()
# Capture the same x for the outer tf.function
graph._experimental_capture_side_input_by_ref("x", lambda: x)
@def_function.function
def g():
graph = ops.get_default_graph()
cap_x = graph._experimental_capture_side_input_by_ref("xx", lambda: x)
return cap_x + 100
return g()
self.assertEqual(f(), 2)
x = constant_op.constant(2)
self.assertEqual(f(), 102)
def test_capture_in_outer_function(self):
x = 1
def g():
graph = ops.get_default_graph()
cap_x = graph._experimental_capture_side_input_by_ref("x", lambda: x)
return cap_x + 1
@def_function.function
def f():
return g()
self.assertEqual(f(), 2)
x = 2
self.assertEqual(f(), 3)
@unittest.skip("By ref capture API does not work for nested tf.function.")
def test_capture_in_outer_tf_function(self):
x = 1
@def_function.function
def g():
graph = ops.get_default_graph()
cap_x = graph._experimental_capture_side_input_by_ref("x", lambda: x)
return cap_x + 1
@def_function.function
def f():
# Call `_experimental_capture_side_input_by_ref` so that the outer
# tf.function will retrace when needed.
graph = ops.get_default_graph()
graph._experimental_capture_side_input_by_ref("x", lambda: x)
return g()
self.assertEqual(f(), 2)
x = 2
self.assertEqual(f(), 3)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
test.main()
| ByRefCaptureTest |
python | xlwings__xlwings | xlwings/_xlmac.py | {
"start": 18158,
"end": 19198
} | class ____(base_classes.Sheets):
def __init__(self, workbook):
self.workbook = workbook
@property
def api(self):
return None
@property
def active(self):
return Sheet(self.workbook, self.workbook.xl.active_sheet.name.get())
def __call__(self, name_or_index):
return Sheet(self.workbook, name_or_index)
def __len__(self):
return self.workbook.xl.count(each=kw.worksheet)
def __iter__(self):
for i in range(len(self)):
yield self(i + 1)
def add(self, before=None, after=None, name=None):
if before is None and after is None:
before = self.workbook.app.books.active.sheets.active
if before:
position = before.xl.before
else:
position = after.xl.after
xl = self.workbook.xl.make(new=kw.worksheet, at=position)
if name is not None:
xl.name.set(name)
xl = self.workbook.xl.worksheets[name]
return Sheet(self.workbook, xl.name.get())
| Sheets |
python | kamyu104__LeetCode-Solutions | Python/count-increasing-quadruplets.py | {
"start": 1313,
"end": 2072
} | class ____(object):
def countQuadruplets(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
left = [[0]*(len(nums)+1) for _ in xrange(len(nums))]
for j in xrange(len(nums)):
for i in xrange(j):
left[j][i+1] = left[j][i] + int(nums[i] < nums[j])
right = [[0]*(len(nums)+1) for _ in xrange(len(nums))]
for j in xrange(len(nums)):
for i in reversed(xrange(j+1, len(nums))):
right[j][i] = right[j][i+1] + int(nums[i] > nums[j])
result = 0
for k in xrange(len(nums)):
for j in xrange(k):
if nums[k] < nums[j]:
result += left[k][j]*right[j][k+1]
return result
| Solution3 |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/override_context_templates/package.py | {
"start": 217,
"end": 673
} | class ____(Package):
"""This package updates the context for Tcl modulefiles.
And additional lines that shouldn't be in the short description.
"""
homepage = "http://www.fake-spack-example.org"
url = "http://www.fake-spack-example.org/downloads/fake-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
tcl_template = "extension.tcl"
tcl_context = {"sentence": "sentence from package"}
| OverrideContextTemplates |
python | pydantic__pydantic | pydantic/types.py | {
"start": 61890,
"end": 73121
} | class ____(int):
"""Converts a string representing a number of bytes with units (such as `'1KB'` or `'11.5MiB'`) into an integer.
You can use the `ByteSize` data type to (case-insensitively) convert a string representation of a number of bytes into
an integer, and also to print out human-readable strings representing a number of bytes.
In conformance with [IEC 80000-13 Standard](https://en.wikipedia.org/wiki/ISO/IEC_80000) we interpret `'1KB'` to mean 1000 bytes,
and `'1KiB'` to mean 1024 bytes. In general, including a middle `'i'` will cause the unit to be interpreted as a power of 2,
rather than a power of 10 (so, for example, `'1 MB'` is treated as `1_000_000` bytes, whereas `'1 MiB'` is treated as `1_048_576` bytes).
!!! info
Note that `1b` will be parsed as "1 byte" and not "1 bit".
```python
from pydantic import BaseModel, ByteSize
class MyModel(BaseModel):
size: ByteSize
print(MyModel(size=52000).size)
#> 52000
print(MyModel(size='3000 KiB').size)
#> 3072000
m = MyModel(size='50 PB')
print(m.size.human_readable())
#> 44.4PiB
print(m.size.human_readable(decimal=True))
#> 50.0PB
print(m.size.human_readable(separator=' '))
#> 44.4 PiB
print(m.size.to('TiB'))
#> 45474.73508864641
```
"""
byte_sizes = {
'b': 1,
'kb': 10**3,
'mb': 10**6,
'gb': 10**9,
'tb': 10**12,
'pb': 10**15,
'eb': 10**18,
'kib': 2**10,
'mib': 2**20,
'gib': 2**30,
'tib': 2**40,
'pib': 2**50,
'eib': 2**60,
'bit': 1 / 8,
'kbit': 10**3 / 8,
'mbit': 10**6 / 8,
'gbit': 10**9 / 8,
'tbit': 10**12 / 8,
'pbit': 10**15 / 8,
'ebit': 10**18 / 8,
'kibit': 2**10 / 8,
'mibit': 2**20 / 8,
'gibit': 2**30 / 8,
'tibit': 2**40 / 8,
'pibit': 2**50 / 8,
'eibit': 2**60 / 8,
}
byte_sizes.update({k.lower()[0]: v for k, v in byte_sizes.items() if 'i' not in k})
byte_string_pattern = r'^\s*(\d*\.?\d+)\s*(\w+)?'
byte_string_re = re.compile(byte_string_pattern, re.IGNORECASE)
@classmethod
def __get_pydantic_core_schema__(cls, source: type[Any], handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
return core_schema.with_info_after_validator_function(
function=cls._validate,
schema=core_schema.union_schema(
[
core_schema.str_schema(pattern=cls.byte_string_pattern),
core_schema.int_schema(ge=0),
],
custom_error_type='byte_size',
custom_error_message='could not parse value and unit from byte string',
),
serialization=core_schema.plain_serializer_function_ser_schema(
int, return_schema=core_schema.int_schema(ge=0)
),
)
@classmethod
def _validate(cls, input_value: Any, /, _: core_schema.ValidationInfo) -> ByteSize:
try:
return cls(int(input_value))
except ValueError:
pass
str_match = cls.byte_string_re.match(str(input_value))
if str_match is None:
raise PydanticCustomError('byte_size', 'could not parse value and unit from byte string')
scalar, unit = str_match.groups()
if unit is None:
unit = 'b'
try:
unit_mult = cls.byte_sizes[unit.lower()]
except KeyError:
raise PydanticCustomError('byte_size_unit', 'could not interpret byte unit: {unit}', {'unit': unit})
return cls(int(float(scalar) * unit_mult))
def human_readable(self, decimal: bool = False, separator: str = '') -> str:
"""Converts a byte size to a human readable string.
Args:
decimal: If True, use decimal units (e.g. 1000 bytes per KB). If False, use binary units
(e.g. 1024 bytes per KiB).
separator: A string used to split the value and unit. Defaults to an empty string ('').
Returns:
A human readable string representation of the byte size.
"""
if decimal:
divisor = 1000
units = 'B', 'KB', 'MB', 'GB', 'TB', 'PB'
final_unit = 'EB'
else:
divisor = 1024
units = 'B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB'
final_unit = 'EiB'
num = float(self)
for unit in units:
if abs(num) < divisor:
if unit == 'B':
return f'{num:0.0f}{separator}{unit}'
else:
return f'{num:0.1f}{separator}{unit}'
num /= divisor
return f'{num:0.1f}{separator}{final_unit}'
def to(self, unit: str) -> float:
"""Converts a byte size to another unit, including both byte and bit units.
Args:
unit: The unit to convert to. Must be one of the following: B, KB, MB, GB, TB, PB, EB,
KiB, MiB, GiB, TiB, PiB, EiB (byte units) and
bit, kbit, mbit, gbit, tbit, pbit, ebit,
kibit, mibit, gibit, tibit, pibit, eibit (bit units).
Returns:
The byte size in the new unit.
"""
try:
unit_div = self.byte_sizes[unit.lower()]
except KeyError:
raise PydanticCustomError('byte_size_unit', 'Could not interpret byte unit: {unit}', {'unit': unit})
return self / unit_div
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DATE TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _check_annotated_type(annotated_type: str, expected_type: str, annotation: str) -> None:
if annotated_type != expected_type:
raise PydanticUserError(f"'{annotation}' cannot annotate '{annotated_type}'.", code='invalid-annotated-type')
if TYPE_CHECKING:
PastDate = Annotated[date, ...]
FutureDate = Annotated[date, ...]
else:
class PastDate:
"""A date in the past."""
@classmethod
def __get_pydantic_core_schema__(
cls, source: type[Any], handler: GetCoreSchemaHandler
) -> core_schema.CoreSchema:
if cls is source:
# used directly as a type
return core_schema.date_schema(now_op='past')
else:
schema = handler(source)
_check_annotated_type(schema['type'], 'date', cls.__name__)
schema['now_op'] = 'past'
return schema
def __repr__(self) -> str:
return 'PastDate'
class FutureDate:
"""A date in the future."""
@classmethod
def __get_pydantic_core_schema__(
cls, source: type[Any], handler: GetCoreSchemaHandler
) -> core_schema.CoreSchema:
if cls is source:
# used directly as a type
return core_schema.date_schema(now_op='future')
else:
schema = handler(source)
_check_annotated_type(schema['type'], 'date', cls.__name__)
schema['now_op'] = 'future'
return schema
def __repr__(self) -> str:
return 'FutureDate'
def condate(
*,
strict: bool | None = None,
gt: date | None = None,
ge: date | None = None,
lt: date | None = None,
le: date | None = None,
) -> type[date]:
"""A wrapper for date that adds constraints.
Args:
strict: Whether to validate the date value in strict mode. Defaults to `None`.
gt: The value must be greater than this. Defaults to `None`.
ge: The value must be greater than or equal to this. Defaults to `None`.
lt: The value must be less than this. Defaults to `None`.
le: The value must be less than or equal to this. Defaults to `None`.
Returns:
A date type with the specified constraints.
"""
return Annotated[ # pyright: ignore[reportReturnType]
date,
Strict(strict) if strict is not None else None,
annotated_types.Interval(gt=gt, ge=ge, lt=lt, le=le),
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DATETIME TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if TYPE_CHECKING:
AwareDatetime = Annotated[datetime, ...]
NaiveDatetime = Annotated[datetime, ...]
PastDatetime = Annotated[datetime, ...]
FutureDatetime = Annotated[datetime, ...]
else:
class AwareDatetime:
"""A datetime that requires timezone info."""
@classmethod
def __get_pydantic_core_schema__(
cls, source: type[Any], handler: GetCoreSchemaHandler
) -> core_schema.CoreSchema:
if cls is source:
# used directly as a type
return core_schema.datetime_schema(tz_constraint='aware')
else:
schema = handler(source)
_check_annotated_type(schema['type'], 'datetime', cls.__name__)
schema['tz_constraint'] = 'aware'
return schema
def __repr__(self) -> str:
return 'AwareDatetime'
class NaiveDatetime:
"""A datetime that doesn't require timezone info."""
@classmethod
def __get_pydantic_core_schema__(
cls, source: type[Any], handler: GetCoreSchemaHandler
) -> core_schema.CoreSchema:
if cls is source:
# used directly as a type
return core_schema.datetime_schema(tz_constraint='naive')
else:
schema = handler(source)
_check_annotated_type(schema['type'], 'datetime', cls.__name__)
schema['tz_constraint'] = 'naive'
return schema
def __repr__(self) -> str:
return 'NaiveDatetime'
class PastDatetime:
"""A datetime that must be in the past."""
@classmethod
def __get_pydantic_core_schema__(
cls, source: type[Any], handler: GetCoreSchemaHandler
) -> core_schema.CoreSchema:
if cls is source:
# used directly as a type
return core_schema.datetime_schema(now_op='past')
else:
schema = handler(source)
_check_annotated_type(schema['type'], 'datetime', cls.__name__)
schema['now_op'] = 'past'
return schema
def __repr__(self) -> str:
return 'PastDatetime'
class FutureDatetime:
"""A datetime that must be in the future."""
@classmethod
def __get_pydantic_core_schema__(
cls, source: type[Any], handler: GetCoreSchemaHandler
) -> core_schema.CoreSchema:
if cls is source:
# used directly as a type
return core_schema.datetime_schema(now_op='future')
else:
schema = handler(source)
_check_annotated_type(schema['type'], 'datetime', cls.__name__)
schema['now_op'] = 'future'
return schema
def __repr__(self) -> str:
return 'FutureDatetime'
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Encoded TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| ByteSize |
python | huggingface__transformers | src/transformers/models/ibert/quant_modules.py | {
"start": 24068,
"end": 25588
} | class ____(Function):
"""
Class to quantize the given floating-point values using symmetric quantization with given range and bitwidth.
"""
@staticmethod
def forward(ctx, x, k, percentile_mode, scale):
"""
Args:
x (`torch.Tensor`):
Floating point tensor to be quantized.
k (`int`):
Quantization bitwidth.
percentile_mode (`bool`):
Whether or not to use percentile calibration.
scale (`torch.Tensor`):
Pre-calculated scaling factor for *x*. Note that the current implementation of SymmetricQuantFunction
requires pre-calculated scaling factor.
Returns:
`torch.Tensor`: Symmetric-quantized value of *input*.
"""
zero_point = torch.tensor(0.0, device=scale.device)
n = 2 ** (k - 1) - 1
new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
ctx.scale = scale
return new_quant_x
@staticmethod
def backward(ctx, grad_output):
scale = ctx.scale
if len(grad_output.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(grad_output.shape) == 2:
scale = scale.view(-1, 1)
else:
scale = scale.view(-1)
return grad_output.clone() / scale, None, None, None, None
| SymmetricQuantFunction |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_gradient06.py | {
"start": 315,
"end": 1553
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_gradient06.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [61363328, 61364864]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"gradient": {
"colors": ["#DDEBCF", "#9CB86E", "#156B13"],
"type": "path",
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | apache__airflow | providers/openlineage/tests/unit/openlineage/utils/custom_facet_fixture.py | {
"start": 1059,
"end": 2185
} | class ____(RunFacet):
"""Define a custom run facet."""
name: str
cluster: str
def get_additional_test_facet(
task_instance: TaskInstance, ti_state: TaskInstanceState
) -> dict[str, RunFacet] | None:
operator_name = task_instance.task.operator_name if task_instance.task else ""
if operator_name == "BashOperator":
return None
return {
"additional_run_facet": MyCustomRunFacet(
name=f"test-lineage-namespace-{ti_state}",
cluster=f"TEST_{task_instance.dag_id}.{task_instance.task_id}",
)
}
def get_duplicate_test_facet_key(
task_instance: TaskInstance, ti_state: TaskInstanceState
) -> dict[str, RunFacet] | None:
return get_additional_test_facet(task_instance, ti_state)
def get_another_test_facet(task_instance, ti_state):
return {"another_run_facet": {"name": "another-lineage-namespace"}}
def return_type_is_not_dict(task_instance, ti_state):
return "return type is not dict"
def get_custom_facet_throws_exception(task_instance, ti_state):
raise Exception("fake exception from custom facet function")
| MyCustomRunFacet |
python | scikit-learn__scikit-learn | sklearn/linear_model/_least_angle.py | {
"start": 42591,
"end": 53445
} | class ____(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars.
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
verbose : bool or int, default=False
Sets the verbosity amount.
precompute : bool, 'auto' or array-like, default='auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
copy_X : bool, default=True
If True, X will be copied; else, it may be overwritten.
fit_path : bool, default=True
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
Under the positive restriction the model coefficients will not converge
to the ordinary-least-squares solution for small values of alpha.
Only coefficients up to the smallest alpha value (``alphas_[alphas_ >
0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso
algorithm are typically in congruence with the solution of the
coordinate descent Lasso estimator.
jitter : float, default=None
Upper bound on a uniform noise parameter to be added to the
`y` values, to satisfy the model's assumption of
one-at-a-time computations. Might help with stability.
.. versionadded:: 0.23
random_state : int, RandomState instance or None, default=None
Determines random number generation for jittering. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`. Ignored if `jitter` is None.
.. versionadded:: 0.23
Attributes
----------
alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller. If this is a list of array-like, the length of the outer
list is `n_targets`.
active_ : list of length n_alphas or list of such lists
Indices of active variables at the end of the path.
If this is a list of list, the length of the outer list is `n_targets`.
coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \
of such arrays
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``. If this is a list
of array-like, the length of the outer list is `n_targets`.
coef_ : array-like of shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float or array-like of shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
lars_path : Compute Least Angle Regression or Lasso
path using LARS algorithm.
lasso_path : Compute Lasso path with coordinate descent.
Lasso : Linear Model trained with L1 prior as
regularizer (aka the Lasso).
LassoCV : Lasso linear model with iterative fitting
along a regularization path.
LassoLarsCV: Cross-validated Lasso, using the LARS algorithm.
LassoLarsIC : Lasso model fit with Lars using BIC
or AIC for model selection.
sklearn.decomposition.sparse_encode : Sparse coding.
Examples
--------
>>> from sklearn import linear_model
>>> reg = linear_model.LassoLars(alpha=0.01)
>>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
LassoLars(alpha=0.01)
>>> print(reg.coef_)
[ 0. -0.955]
"""
_parameter_constraints: dict = {
**Lars._parameter_constraints,
"alpha": [Interval(Real, 0, None, closed="left")],
"max_iter": [Interval(Integral, 0, None, closed="left")],
"positive": ["boolean"],
}
_parameter_constraints.pop("n_nonzero_coefs")
method = "lasso"
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
verbose=False,
precompute="auto",
max_iter=500,
eps=np.finfo(float).eps,
copy_X=True,
fit_path=True,
positive=False,
jitter=None,
random_state=None,
):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.positive = positive
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
self.jitter = jitter
self.random_state = random_state
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(
X_train,
y_train,
X_test,
y_test,
Gram=None,
copy=True,
method="lar",
verbose=False,
fit_intercept=True,
max_iter=500,
eps=np.finfo(float).eps,
positive=False,
):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array-like of shape (n_samples, n_features)
The data to fit the LARS on
y_train : array-like of shape (n_samples,)
The target variable to fit LARS on
X_test : array-like of shape (n_samples, n_features)
The data to compute the residues on
y_test : array-like of shape (n_samples,)
The target variable to compute the residues on
Gram : None, 'auto' or array-like of shape (n_features, n_features), \
default=None
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : bool, default=True
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : {'lar' , 'lasso'}, default='lar'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : bool or int, default=False
Sets the amount of verbosity
fit_intercept : bool, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
positive : bool, default=False
Restrict coefficients to be >= 0. Be aware that you might want to
remove fit_intercept which is set True by default.
See reservations for using this option in combination with method
'lasso' for expected small values of alpha in the doc of LassoLarsCV
and LassoLarsIC.
max_iter : int, default=500
Maximum number of iterations to perform.
eps : float, default=np.finfo(float).eps
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array-like of shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array-like of shape (n_features, n_alphas)
Coefficients along the path
residues : array-like of shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
alphas, active, coefs = lars_path(
X_train,
y_train,
Gram=Gram,
copy_X=False,
copy_Gram=False,
method=method,
verbose=max(0, verbose - 1),
max_iter=max_iter,
eps=eps,
positive=positive,
)
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
| LassoLars |
python | google__jax | tests/pgle_test.py | {
"start": 1326,
"end": 19389
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.test_device_matches(["gpu"]):
self.skipTest('Profile-guideded latency estimation only supported on GPU')
cc.set_cache_dir(None)
cc.reset_cache()
def tearDown(self):
cc.set_cache_dir(None)
cc.reset_cache()
super().tearDown()
def testPGLEProfilerGetFDOProfile(self):
mesh = jtu.create_mesh((2,), ('x',))
@partial(
jax.jit,
in_shardings=NamedSharding(mesh, PartitionSpec('x')),
out_shardings=NamedSharding(mesh, PartitionSpec('x')),
compiler_options={
'xla_gpu_enable_latency_hiding_scheduler': 'True',
# Make sure that matmul is not emitted as Triton GEMM.
'xla_gpu_enable_triton_gemm': 'False',
},
)
def f(x, y):
return x @ y
shape = (16, 16)
x = jnp.arange(math.prod(shape)).reshape(shape).astype(np.float32)
y = x + 1
with config.pgle_profiling_runs(0):
f_lowered = f.lower(x, y)
compiled = f_lowered.compile()
pgle_profiler = profiler.PGLEProfiler(1, 90)
with config.enable_pgle(False):
with profiler.PGLEProfiler.trace(pgle_profiler):
jax.block_until_ready(compiled(x, y))
fdo_profile = pgle_profiler.consume_fdo_profile()
self.assertIsNotNone(fdo_profile)
self.assertIn(b'custom', fdo_profile)
def testPGLEProfilerGetFDOProfileLarge(self):
mesh = jtu.create_mesh((2,), ('x',))
its = 500
compiler_options = {
'xla_gpu_enable_latency_hiding_scheduler': 'True',
# Make sure that matmul is not emitted as Triton GEMM.
'xla_gpu_enable_triton_gemm': 'False',
}
# TODO(b/37664749): Remove this flag once the bug is fixed.
compiler_options['xla_gpu_enable_command_buffer'] = ''
@partial(
jax.jit,
in_shardings=NamedSharding(mesh, PartitionSpec('x')),
out_shardings=NamedSharding(mesh, PartitionSpec('x')),
compiler_options=compiler_options,
)
def f(x):
agg = x
for _ in range(its):
agg = agg @ x
return agg
shape = (16, 16)
x = jnp.arange(math.prod(shape)).reshape(shape).astype(np.float32)
pgle_profiler = profiler.PGLEProfiler(1, 90)
with config.enable_pgle(False):
with profiler.PGLEProfiler.trace(pgle_profiler):
f(x)
fdo_profile = pgle_profiler.consume_fdo_profile()
self.assertEqual(fdo_profile.count(b'custom'), its)
def get_fdo_profiles(self, dump_dir):
jit_f_fdo_profiles = [
x
for x in os.listdir(dump_dir)
if 'jit_f' in x and x.endswith('.fdo_profile')
]
return jit_f_fdo_profiles
def testAutoPgle(self):
mesh = jtu.create_mesh((2,), ('x',))
with tempfile.TemporaryDirectory() as dump_dir:
compile_options = {
'xla_gpu_enable_latency_hiding_scheduler': 'True',
'xla_dump_to': dump_dir,
'xla_gpu_experimental_dump_fdo_profiles': 'True',
}
# TODO(b/376647494): Remove this flag once the bug is fixed.
@partial(
jax.jit,
in_shardings=NamedSharding(mesh, PartitionSpec('x')),
out_shardings=NamedSharding(mesh, PartitionSpec('x')),
compiler_options=compile_options,
)
def f(x):
return x * 2
shape = (16, 16)
x = jnp.arange(math.prod(shape)).reshape(shape).astype(np.float32)
expected = x * 2
with config.pgle_profiling_runs(2), config.enable_pgle(True):
# Run 1: Module should be compiled without FDO. Two modules are expected
# One is the function f, the other one is multi slice module
with jtu.count_pjit_cpp_cache_miss() as cache_miss_count:
self.assertArraysEqual(f(x), expected)
self.assertEqual(cache_miss_count(), 2)
# Run 2: Second PGLE run. Profile should be empty.
with jtu.count_pjit_cpp_cache_miss() as cache_miss_count:
self.assertArraysEqual(f(x), expected)
self.assertEqual(cache_miss_count(), 2)
fdo_profiles_before_pgle = self.get_fdo_profiles(dump_dir)
# One for before optimizatiom, one after SPMD partitioning, and one
# after optimization.
self.assertLen(fdo_profiles_before_pgle, 3)
# The FDO profile file should be empty.
self.assertEqual(
os.path.getsize(os.path.join(dump_dir, fdo_profiles_before_pgle[0])), 0)
# Run 3: The module should be recompiled with FDO profiles
with jtu.count_pjit_cpp_cache_miss() as cache_miss_count:
self.assertArraysEqual(f(x), expected)
self.assertEqual(cache_miss_count(), 2)
fdo_profiles_after_pgle = self.get_fdo_profiles(dump_dir)
# One more before optimizatiom, one more after SPMD partitioning, and
# one more after optimization.
self.assertLen(fdo_profiles_after_pgle, 6)
for fdo_profile in fdo_profiles_after_pgle:
if fdo_profile not in fdo_profiles_before_pgle:
self.assertGreater(
os.path.getsize(os.path.join(dump_dir, fdo_profile)), 0
)
# Run 4: Fast-path should be used after PGLE is done
with jtu.count_pjit_cpp_cache_miss() as cache_miss_count:
self.assertArraysEqual(f(x), expected)
self.assertLess(cache_miss_count(), 2)
def testAutoPgleWithAot(self):
@jax.jit
def f(x):
return x * 2
x = jnp.arange(1)
expected = x * 2
f_lowered = f.lower(x)
serialized, in_tree, out_tree = serialize(f_lowered.compile())
compiled = deserialize_and_load(
serialized, in_tree, out_tree, execution_devices=jax.devices()[:1])
with config.pgle_profiling_runs(1), config.enable_pgle(True):
# Run 1
with jtu.count_jit_compilation_cache_miss() as cache_miss_count:
self.assertArraysEqual(compiled(x), expected)
self.assertEqual(cache_miss_count(), 0)
# Run 2
with jtu.count_jit_compilation_cache_miss() as cache_miss_count:
self.assertArraysEqual(compiled(x), expected)
self.assertEqual(cache_miss_count(), 0)
def testAutoPgleWithPersistentCache(self):
its = 50
mesh = jtu.create_mesh((2,), ('x',))
with tempfile.TemporaryDirectory() as dump_dir:
compiler_options = {
'xla_gpu_enable_latency_hiding_scheduler': 'True',
'xla_dump_to': dump_dir,
'xla_gpu_experimental_dump_fdo_profiles': 'True',
}
# TODO(b/376647494): Remove this flag once the bug is fixed.
@partial(
jax.jit,
in_shardings=NamedSharding(mesh, PartitionSpec('x')),
out_shardings=NamedSharding(mesh, PartitionSpec('x')),
compiler_options=compiler_options,
)
def f(x):
agg = x
for _ in range(its):
agg = agg @ x
return agg
shape = (16, 16)
x = jnp.arange(math.prod(shape)).reshape(shape).astype(np.float32)
with (config.enable_compilation_cache(True),
config.enable_pgle(True),
config.raise_persistent_cache_errors(True),
config.raise_persistent_cache_errors(True),
config.persistent_cache_min_entry_size_bytes(0),
config.persistent_cache_min_compile_time_secs(0),
config.pgle_profiling_runs(2),
tempfile.TemporaryDirectory() as cache_dir):
cc.reset_cache()
cc.set_cache_dir(cache_dir)
# Run 1: Module should be compiled without FDO
with jtu.count_jit_compilation_cache_miss() as cache_miss_count:
f(x)
self.assertGreater(cache_miss_count(), 0)
# Non-pgle profiled version of module should be saved
non_pgle_profiled_files = os.listdir(cache_dir)
self.assertNotEmpty(non_pgle_profiled_files)
# Run 2: Compilation should not be called
with jtu.count_jit_compilation_cache_miss() as cache_miss_count:
f(x)
self.assertGreater(cache_miss_count(), 0)
fdo_profiles_before_pgle = self.get_fdo_profiles(dump_dir)
# Run 3: Module should be compiled with FDO and stored to persistent cache
with jtu.count_jit_compilation_cache_miss() as cache_miss_count:
f(x)
self.assertGreater(cache_miss_count(), 0)
# Check if FDO profile file of the biggest module is not empty
fdo_profiles_after_pgle = [
x
for x in self.get_fdo_profiles(dump_dir)
if x not in fdo_profiles_before_pgle
]
self.assertNotEmpty(fdo_profiles_after_pgle)
# Check if FDO profile file in dump directory is not empty
for fdo_profile in fdo_profiles_after_pgle:
self.assertGreater(
os.path.getsize(os.path.join(dump_dir, fdo_profile)), 0
)
for pgle_profiler in pjit._pgle_profiler_dict.values():
self.assertTrue(pgle_profiler.is_enabled())
self.assertTrue(pgle_profiler.is_fdo_consumed())
files_after_pgle_profile = os.listdir(cache_dir)
self.assertGreater(
len(files_after_pgle_profile), len(non_pgle_profiled_files)
)
# Removing non-pgle profiled module from cache to check that later pgle
# profiled version will be used.
for non_pgle_file in non_pgle_profiled_files:
path = os.path.join(cache_dir, non_pgle_file)
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
api.clear_caches()
pjit._pgle_profiler_dict.clear()
# Run 4: Persistent compilation cache should be hit PGLE profiler should
# be disabled
cache_hit = 0
def check_if_cache_hit(event):
nonlocal cache_hit
if event == '/jax/compilation_cache/cache_hits':
cache_hit += 1
monitoring.register_event_listener(check_if_cache_hit)
f(x)
monitoring.unregister_event_listener(check_if_cache_hit)
self.assertGreater(cache_hit, 0)
def testPassingFDOProfile(self):
mesh = jtu.create_mesh((2,), ('x',))
@partial(
jax.jit,
in_shardings=NamedSharding(mesh, PartitionSpec('x')),
out_shardings=NamedSharding(mesh, PartitionSpec('x')),
compiler_options={
'xla_gpu_enable_latency_hiding_scheduler': 'True',
# Make sure that matmul is not emitted as Triton GEMM.
'xla_gpu_enable_triton_gemm': 'False',
},
)
def f(x, y):
return x @ y
shape = (16, 16)
x = jnp.arange(math.prod(shape)).reshape(shape).astype(np.float32)
y = x + 1
with config.pgle_profiling_runs(0):
f_lowered = f.lower(x, y)
compiled = f_lowered.compile()
with tempfile.TemporaryDirectory() as cache_dir:
jax.profiler.start_trace(cache_dir)
compiled(x, y)
jax.profiler.stop_trace()
directories = glob.glob(os.path.join(cache_dir, 'plugins/profile/**/'))
directories = [d for d in directories if os.path.isdir(d)]
rundir = directories[-1]
logging.info('rundir: %s', rundir)
fdo_profile = exp_profiler.get_profiled_instructions_proto(rundir)
if jtu.test_device_matches(['gpu']) and jtu.is_device_cuda():
self.assertIn(b'custom', fdo_profile)
logging.info('fdo_profile: %s', fdo_profile)
# Test pass fdo_profile as compiler_options API works.
f_lowered.compile(compiler_options={'fdo_profile': fdo_profile})
def testPersistentCachePopulatedWithAutoPgle(self):
self.skipTest('Test does not cleanly reset the compilation cache')
its = 50
mesh = jtu.create_mesh((2,), ('x',))
@partial(
jax.jit,
in_shardings=NamedSharding(mesh, PartitionSpec('x')),
out_shardings=NamedSharding(mesh, PartitionSpec('x')),
)
def f(x):
agg = x
for _ in range(its):
agg = agg @ x
return agg
@jax.jit
def g(x):
return x + 4
@jax.jit
def h(x):
return x * 42
shape = (16, 16)
x = jnp.arange(math.prod(shape)).reshape(shape).astype(np.float32)
with tempfile.TemporaryDirectory() as cache_dir:
# 1. populate a persistent cache with PGLE enabled
with (config.enable_compilation_cache(True),
config.enable_pgle(True),
config.raise_persistent_cache_errors(True),
config.persistent_cache_min_entry_size_bytes(0),
config.persistent_cache_min_compile_time_secs(0),
config.pgle_profiling_runs(1)):
cc.reset_cache()
cc.set_cache_dir(cache_dir)
# Run 1: Module should miss the cache and be compiled without PGLE
with jtu.count_jit_compilation_cache_miss() as cache_miss_count:
f(x)
self.assertGreater(cache_miss_count(), 0)
# Non-pgle profiled version of module should be saved
non_pgle_f_files = set(os.listdir(cache_dir))
self.assertNotEmpty(non_pgle_f_files)
# Run 2: Module should be re-compiled with PGLE, miss the cache again
with jtu.count_jit_compilation_cache_miss() as cache_miss_count:
f(x)
self.assertGreater(cache_miss_count(), 0)
# PGLE version of the module should now be saved
pgle_and_non_pgle_f_files = set(os.listdir(cache_dir))
self.assertNotEqual(non_pgle_f_files, pgle_and_non_pgle_f_files)
# Remove non-PGLE version of `f` from the cache so a hit in run 3 is
# definitely the PGLE version
for non_pgle_file in non_pgle_f_files:
os.remove(os.path.join(cache_dir, non_pgle_file))
# Run 3: put a non-PGLE version of `g` in the cache
with jtu.count_jit_compilation_cache_miss() as cache_miss_count:
g(x)
self.assertGreater(cache_miss_count(), 0)
api.clear_caches()
pjit._pgle_profiler_dict.clear()
# 2. read from the persistent cache with PGLE disabled-but-expected
with (config.enable_compilation_cache(True),
config.raise_persistent_cache_errors(True),
config.persistent_cache_min_entry_size_bytes(0),
config.persistent_cache_min_compile_time_secs(0),
config.compilation_cache_expect_pgle(True)):
# Run 4 (simulating run 1 in a new process) should pick up the PGLE-optimised
# cache entry, even though PGLE is not enabled
cache_hit = 0
def check_if_cache_hit(event):
nonlocal cache_hit
if event == '/jax/compilation_cache/cache_hits':
cache_hit += 1
monitoring.register_event_listener(check_if_cache_hit)
f(x)
monitoring.unregister_event_listener(check_if_cache_hit)
self.assertGreater(cache_hit, 0)
# Run 5: `g` was only executed once and did not get re-compiled with PGLE, so
# executing it with compilation_cache_expect_pgle will raise a warning and a
# cache *hit*, because the non-PGLE version will be loaded
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cache_hit = 0
monitoring.register_event_listener(check_if_cache_hit)
g(x)
monitoring.unregister_event_listener(check_if_cache_hit)
self.assertEqual(cache_hit, 1)
if len(w) != 1:
print("Warnings:", [str(w_) for w_ in w], flush=True)
self.assertLen(w, 1)
self.assertIn(
"PERSISTENT CACHE MISS for PGLE-optimized jit_g despite non-PGLE hit",
str(w[0].message)
)
# Run 6: `h` was not executed during step 1, which populated the cache, so
# executing it now and triggering a cache write will emit a warning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with jtu.count_jit_compilation_cache_miss() as cache_miss_count:
h(x)
self.assertGreater(cache_miss_count(), 0)
if len(w) != 1:
print("Warnings:", [str(w_) for w_ in w], flush=True)
self.assertLen(w, 1)
self.assertIn("PERSISTENT CACHE WRITE with key jit_h-", str(w[0].message))
@parameterized.parameters([True, False])
@jtu.thread_unsafe_test()
def testAutoPgleWithCommandBuffers(self, enable_compilation_cache):
with (config.pgle_profiling_runs(1),
config.enable_compilation_cache(enable_compilation_cache),
config.enable_pgle(True),
tempfile.TemporaryDirectory() as dump_dir,
tempfile.TemporaryDirectory() as cache_dir):
if enable_compilation_cache:
cc.reset_cache()
cc.set_cache_dir(cache_dir)
compiler_options = {
'xla_dump_to': dump_dir,
# FUSION, see https://github.com/openxla/xla/issues/22459
'xla_gpu_enable_command_buffer': 1,
'xla_gpu_graph_min_graph_size': 1,
}
@partial(
jax.jit,
compiler_options=compiler_options,
)
def f(x):
return x * 2
x = jnp.arange(1)
expected = x * 2
# This is ugly, but it does not seem possible to get the AutoPGLE-recompiled
# executable text (.lower(x).compile().as_text() or similar).
def get_new_files():
additions = set(os.listdir(dump_dir)) - get_new_files.seen_files
get_new_files.seen_files |= additions
new_files = list(filter(lambda f: f.endswith('debug_options'), additions))
assert len(new_files) == 1
with open(os.path.join(dump_dir, new_files[0])) as ifile:
return ifile.read()
get_new_files.seen_files = set()
# Run 1
self.assertArraysEqual(f(x), expected)
self.assertNotIn(
'xla_gpu_enable_command_buffer: 1', get_new_files()
) # b/376647494 workaround
# Run 2
self.assertArraysEqual(f(x), expected)
self.assertIn(
'xla_gpu_enable_command_buffer', get_new_files()
) # workaround disabled
api.clear_caches()
pjit._pgle_profiler_dict.clear()
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| PgleTest |
python | doocs__leetcode | solution/0800-0899/0878.Nth Magical Number/Solution.py | {
"start": 0,
"end": 237
} | class ____:
def nthMagicalNumber(self, n: int, a: int, b: int) -> int:
mod = 10**9 + 7
c = lcm(a, b)
r = (a + b) * n
return bisect_left(range(r), x=n, key=lambda x: x // a + x // b - x // c) % mod
| Solution |
python | gevent__gevent | src/greentest/3.10/test_ssl.py | {
"start": 99860,
"end": 113174
} | class ____(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(True)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
# bpo-44229, bpo-43855, bpo-44237, and bpo-33450:
# Ignore spurious EPROTOTYPE returned by write() on macOS.
# See also http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
if e.errno != errno.EPROTOTYPE and sys.platform != "darwin":
self.running = False
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
if cert_binary is None:
sys.stdout.write(" client did not provide a cert\n")
else:
sys.stdout.write(f" cert binary is {len(cert_binary)}b\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
elif stripped == b'VERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_verified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
elif stripped == b'UNVERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_unverified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError as e:
# handles SSLError and socket errors
if self.server.chatty and support.verbose:
if isinstance(e, ConnectionError):
# OpenSSL 1.1.1 sometimes raises
# ConnectionResetError when connection is not
# shut down gracefully.
print(
f" Connection reset by peer: {self.addr}"
)
else:
handle_error("Test server failure:\n")
try:
self.write(b"ERROR\n")
except OSError:
pass
self.close()
self.running = False
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = socket_helper.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
self._in_context = False
def __enter__(self):
if self._in_context:
raise ValueError('Re-entering ThreadedEchoServer context')
self._in_context = True
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
assert self._in_context
self._in_context = False
self.stop()
self.join()
def start(self, flag=None):
if not self._in_context:
raise ValueError(
'ThreadedEchoServer must be used as a context manager')
self.flag = flag
threading.Thread.start(self)
def run(self):
if not self._in_context:
raise ValueError(
'ThreadedEchoServer must be used as a context manager')
self.sock.settimeout(1.0)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except TimeoutError as e:
if support.verbose:
sys.stdout.write(f' connection timeout {e!r}\n')
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.close()
def close(self):
if self.sock is not None:
self.sock.close()
self.sock = None
def stop(self):
self.active = False
| ThreadedEchoServer |
python | weaviate__weaviate-python-client | weaviate/exceptions.py | {
"start": 11313,
"end": 11591
} | class ____(WeaviateBaseError):
"""Is raised when a client is closed and a method is called on it."""
def __init__(self) -> None:
msg = "The `WeaviateClient` is closed. Run `client.connect()` to (re)connect!"
super().__init__(msg)
| WeaviateClosedClientError |
python | falconry__falcon | examples/recipes/output_csv_stream_asgi.py | {
"start": 28,
"end": 909
} | class ____:
class PseudoTextStream:
def __init__(self):
self.clear()
def clear(self):
self.result = []
def write(self, data):
self.result.append(data.encode())
async def fibonacci_generator(self, n=1000):
stream = self.PseudoTextStream()
writer = csv.writer(stream, quoting=csv.QUOTE_NONNUMERIC)
writer.writerow(('n', 'Fibonacci Fn'))
previous = 1
current = 0
for i in range(n + 1):
writer.writerow((i, current))
previous, current = current, current + previous
for chunk in stream.result:
yield chunk
stream.clear()
async def on_get(self, req, resp):
resp.content_type = falcon.MEDIA_CSV
resp.downloadable_as = 'report.csv'
resp.stream = self.fibonacci_generator()
| Report |
python | huggingface__transformers | src/transformers/models/mpt/modeling_mpt.py | {
"start": 25377,
"end": 29236
} | class ____(MptPreTrainedModel):
def __init__(self, config: MptConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = MptModel(config)
if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
classifier_dropout = config.classifier_dropout
elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
classifier_dropout = config.hidden_dropout
else:
classifier_dropout = 0.1
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**deprecated_arguments,
) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
# move labels to correct device
labels = labels.to(logits.device)
batch_size, seq_length = labels.shape
loss_fct = CrossEntropyLoss()
loss = loss_fct(
logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
)
if not return_dict:
output = (logits,) + transformer_outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring
| MptForTokenClassification |
python | streamlit__streamlit | lib/tests/streamlit/runtime/caching/storage/local_disk_cache_storage_test.py | {
"start": 5876,
"end": 10484
} | class ____(unittest.TestCase):
def setUp(self):
super().setUp()
self.context = CacheStorageContext(
function_key="func-key",
function_display_name="func-display-name",
persist="disk",
)
self.storage = LocalDiskCacheStorage(self.context)
self.tempdir = TempDirectory(create=True)
self.patch_get_cache_folder_path = patch(
"streamlit.runtime.caching.storage.local_disk_cache_storage.get_cache_folder_path",
return_value=self.tempdir.path,
)
self.patch_get_cache_folder_path.start()
def tearDown(self):
super().tearDown()
self.storage.clear()
self.patch_get_cache_folder_path.stop()
self.tempdir.cleanup()
def test_storage_get_not_found(self):
"""Test that storage.get() returns the correct value."""
with pytest.raises(CacheStorageKeyNotFoundError):
self.storage.get("some-key")
def test_storage_get_found(self):
"""Test that storage.get() returns the correct value."""
self.storage.set("some-key", b"some-value")
assert self.storage.get("some-key") == b"some-value"
def test_storage_set(self):
"""Test that storage.set() writes the correct value to disk."""
self.storage.set("new-key", b"new-value")
assert os.path.exists(self.tempdir.path + "/func-key-new-key.memo")
with open(self.tempdir.path + "/func-key-new-key.memo", "rb") as f:
assert f.read() == b"new-value"
@patch(
"streamlit.runtime.caching.storage.local_disk_cache_storage.streamlit_write",
MagicMock(side_effect=errors.Error("mock exception")),
)
def test_storage_set_error(self):
"""Test that storage.set() raises an exception when it fails to write to disk."""
with pytest.raises(CacheStorageError) as e:
self.storage.set("uniqueKey", b"new-value")
assert str(e.value) == "Unable to write to cache"
def test_storage_set_override(self):
"""Test that storage.set() overrides the value of an existing key."""
self.storage.set("another_key", b"another_value")
self.storage.set("another_key", b"new_value")
assert self.storage.get("another_key") == b"new_value"
def test_storage_delete(self):
"""Test that storage.delete() removes the correct file from disk."""
self.storage.set("new-key", b"new-value")
assert os.path.exists(self.tempdir.path + "/func-key-new-key.memo")
self.storage.delete("new-key")
assert not os.path.exists(self.tempdir.path + "/func-key-new-key.memo")
with pytest.raises(CacheStorageKeyNotFoundError):
self.storage.get("new-key")
def test_storage_clear(self):
"""Test that storage.clear() removes all storage files from disk."""
self.storage.set("some-key", b"some-value")
self.storage.set("another-key", b"another-value")
assert os.path.exists(self.tempdir.path + "/func-key-some-key.memo")
assert os.path.exists(self.tempdir.path + "/func-key-another-key.memo")
self.storage.clear()
assert not os.path.exists(self.tempdir.path + "/func-key-some-key.memo")
assert not os.path.exists(self.tempdir.path + "/func-key-another-key.memo")
with pytest.raises(CacheStorageKeyNotFoundError):
self.storage.get("some-key")
with pytest.raises(CacheStorageKeyNotFoundError):
self.storage.get("another-key")
# test that cache folder is empty
assert os.listdir(self.tempdir.path) == []
def test_storage_clear_not_existing_cache_directory(self):
"""Test that clear() is not crashing if the cache directory does not exist."""
self.tempdir.cleanup()
self.storage.clear()
def test_storage_clear_call_listdir_existing_cache_directory(self):
"""Test that clear() call os.listdir if cache folder does not exist."""
with patch("os.listdir") as mock_listdir:
self.storage.clear()
mock_listdir.assert_called_once()
def test_storage_clear_not_call_listdir_not_existing_cache_directory(self):
"""Test that clear() doesn't call os.listdir if cache folder does not exist."""
self.tempdir.cleanup()
with patch("os.listdir") as mock_listdir:
self.storage.clear()
mock_listdir.assert_not_called()
def test_storage_close(self):
"""Test that storage.close() does not raise any exception."""
self.storage.close()
| LocalDiskPersistCacheStorageTest |
python | doocs__leetcode | solution/1700-1799/1748.Sum of Unique Elements/Solution.py | {
"start": 0,
"end": 151
} | class ____:
def sumOfUnique(self, nums: List[int]) -> int:
cnt = Counter(nums)
return sum(x for x, v in cnt.items() if v == 1)
| Solution |
python | tensorflow__tensorflow | tensorflow/python/distribute/multi_worker_test_base.py | {
"start": 19767,
"end": 20149
} | class ____(test.TestCase):
"""Base class for testing remote single worker strategy eager and dataset."""
def setUp(self):
super(SingleWorkerTestBaseEager, self).setUp()
workers, _ = test_util.create_local_cluster(num_workers=1, num_ps=0)
remote.connect_to_remote_host(workers[0].target)
def cached_session(self):
return DummySession()
| SingleWorkerTestBaseEager |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol53.py | {
"start": 4195,
"end": 4412
} | class ____(Proto_ContraGeneric):
# This should not generate a reportIncompatibleMethodOverride error
# but does currently.
def m(self, x: "Impl_ContraRecursExplicit3") -> None: ...
| Impl_ContraRecursExplicit3 |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0139_addons_filetreediff_field.py | {
"start": 592,
"end": 1214
} | class ____(migrations.Migration):
safe = Safe.before_deploy()
dependencies = [
("projects", "0138_remove_old_fields"),
]
operations = [
migrations.AddField(
model_name="addonsconfig",
name="filetreediff_enabled",
field=models.BooleanField(blank=True, default=False, null=True),
),
migrations.AddField(
model_name="historicaladdonsconfig",
name="filetreediff_enabled",
field=models.BooleanField(blank=True, default=False, null=True),
),
migrations.RunPython(forwards_func),
]
| Migration |
python | tensorflow__tensorflow | tensorflow/python/distribute/values.py | {
"start": 45369,
"end": 48918
} | class ____(DistributedVariable, Mirrored):
"""Holds a map from replica to variables whose values are kept in sync."""
def _is_mirrored(self):
return Mirrored._is_mirrored(self) # Use correct parent class.
def _update_replica(self, update_fn, value, **kwargs):
return _on_write_update_replica(self, update_fn, value, **kwargs)
def scatter_min(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_min(*args, **kwargs)
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError(
values_util.scatter_error_msg.format(
op_name="scatter_min", aggregation=self._aggregation))
return super(MirroredVariable, self).scatter_min(*args, **kwargs)
def scatter_max(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_max(*args, **kwargs)
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError(
values_util.scatter_error_msg.format(
op_name="scatter_max", aggregation=self._aggregation))
return super(MirroredVariable, self).scatter_max(*args, **kwargs)
def scatter_update(self, *args, **kwargs):
if values_util.is_saving_non_distributed():
return self._primary.scatter_update(*args, **kwargs)
if (self._aggregation != vs.VariableAggregation.ONLY_FIRST_REPLICA and
self._aggregation != vs.VariableAggregation.NONE):
raise NotImplementedError(
values_util.scatter_error_msg.format(
op_name="scatter_update", aggregation=self._aggregation))
return super(MirroredVariable, self).scatter_update(*args, **kwargs)
def _get_cross_replica(self):
# Return identity, to avoid directly exposing the variable to the user and
# allowing it to be modified by mistake.
return array_ops.identity(Mirrored._get_cross_replica(self))
def _as_graph_element(self):
return self._get_on_device_or_primary()._as_graph_element() # pylint: disable=protected-access
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
MirroredVariables.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _MirroredSaveable(self, self._primary, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
# TODO(b/154017756): Make _dense_var_to_tensor consistent between ON_READ
# and ON_WRITE.
# Try to avoid assignments to and other mutations of MirroredVariable
# state except through a DistributionStrategy.extended.update() or any of
# the `assign*` and `scatter*` calls.
if as_ref:
# A TF 1.x case where the variable is a boolean variable and used like:
# tf.cond(v, true_fn, false_fn).
raise ValueError(
"You may be using variable created under distribute strategy in TF "
"1.x control flows. Try explicitly converting the variable to Tensor "
"using variable.read_value(), or switch to TF 2.x.")
return ops.convert_to_tensor(
self._get(), dtype=dtype, name=name, as_ref=as_ref)
| MirroredVariable |
python | joerick__pyinstrument | pyinstrument/profiler.py | {
"start": 648,
"end": 1259
} | class ____:
frame_records: list[tuple[list[str], float]]
def __init__(
self,
start_time: float,
start_process_time: float,
start_call_stack: list[str],
target_description: str,
interval: float,
) -> None:
self.start_time = start_time
self.start_process_time = start_process_time
self.start_call_stack = start_call_stack
self.frame_records = []
self.target_description = target_description
self.interval = interval
AsyncMode: TypeAlias = LiteralStr["enabled", "disabled", "strict"]
| ActiveProfilerSession |
python | django__django | tests/requests_tests/test_data_upload_settings.py | {
"start": 1481,
"end": 3503
} | class ____(SimpleTestCase):
def setUp(self):
payload = FakePayload(
"\r\n".join(
[
"--boundary",
'Content-Disposition: form-data; name="name"',
"",
"value",
"--boundary--",
]
)
)
self.request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "multipart/form-data; boundary=boundary",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
}
)
def test_size_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=10):
with self.assertRaisesMessage(RequestDataTooBig, TOO_MUCH_DATA_MSG):
self.request._load_post_and_files()
def test_size_not_exceeded(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=11):
self.request._load_post_and_files()
def test_no_limit(self):
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=None):
self.request._load_post_and_files()
def test_file_passes(self):
payload = FakePayload(
"\r\n".join(
[
"--boundary",
'Content-Disposition: form-data; name="file1"; '
'filename="test.file"',
"",
"value",
"--boundary--",
]
)
)
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "multipart/form-data; boundary=boundary",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
}
)
with self.settings(DATA_UPLOAD_MAX_MEMORY_SIZE=1):
request._load_post_and_files()
self.assertIn("file1", request.FILES, "Upload file not present")
| DataUploadMaxMemorySizeMultipartPostTests |
python | doocs__leetcode | solution/2700-2799/2709.Greatest Common Divisor Traversal/Solution.py | {
"start": 0,
"end": 825
} | class ____:
def __init__(self, n):
self.p = list(range(n))
self.size = [1] * n
def find(self, x):
if self.p[x] != x:
self.p[x] = self.find(self.p[x])
return self.p[x]
def union(self, a, b):
pa, pb = self.find(a), self.find(b)
if pa == pb:
return False
if self.size[pa] > self.size[pb]:
self.p[pb] = pa
self.size[pa] += self.size[pb]
else:
self.p[pa] = pb
self.size[pb] += self.size[pa]
return True
mx = 100010
p = defaultdict(list)
for x in range(1, mx + 1):
v = x
i = 2
while i <= v // i:
if v % i == 0:
p[x].append(i)
while v % i == 0:
v //= i
i += 1
if v > 1:
p[x].append(v)
| UnionFind |
python | django__django | tests/validation/models.py | {
"start": 4616,
"end": 4792
} | class ____(models.Model):
generic_v4unpack_ip = models.GenericIPAddressField(
null=True, blank=True, unique=True, unpack_ipv4=True
)
| GenericIPAddrUnpackUniqueTest |
python | tensorflow__tensorflow | tensorflow/python/distribute/cross_device_ops.py | {
"start": 40467,
"end": 42169
} | class ____(AllReduceCrossDeviceOps):
"""Hierarchical copy all-reduce implementation of CrossDeviceOps.
It reduces to one GPU along edges in some hierarchy and broadcasts back to
each GPU along the same path. For the batch API, tensors will be repacked or
aggregated for more efficient cross-device transportation.
This is a reduction created for Nvidia DGX-1 which assumes GPUs connects like
that on DGX-1 machine. If you have different GPU inter-connections, it is
likely that it would be slower than `tf.distribute.ReductionToOneDevice`.
For reduces that are not all-reduce, it falls back to
`tf.distribute.ReductionToOneDevice`.
Here is how you can use `HierarchicalCopyAllReduce` in
`tf.distribute.MirroredStrategy`:
```
strategy = tf.distribute.MirroredStrategy(
cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
```
"""
def __init__(self, num_packs=1):
"""Initializes the object.
Args:
num_packs: a non-negative integer. The number of packs to split values
into. If zero, no packing will be done.
Raises:
ValueError if `num_packs` is negative.
"""
if num_packs < 0:
raise ValueError(
"HierarchicalCopy requires num_packs >= 0, but {} is specified"
.format(num_packs))
super(HierarchicalCopyAllReduce, self).__init__(
all_reduce_alg="hierarchical_copy",
num_packs=num_packs)
# TODO(crccw): remove after migrating all callers.
CollectiveCommunication = collective_util.CommunicationImplementation
CommunicationImplementation = collective_util.CommunicationImplementation
# TODO(yuefengz): support in-graph collective all-reduce.
| HierarchicalCopyAllReduce |
python | getsentry__sentry | src/sentry/middleware/devtoolbar.py | {
"start": 388,
"end": 2507
} | class ____:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
try:
# Note ordering of conditions to reduce option queries. GET contains the query params, regardless of method.
if request.GET.get("queryReferrer") == "devtoolbar" and options.get(
"devtoolbar.analytics.enabled"
):
_record_api_request(request, response)
except Exception:
logger.exception("devtoolbar: exception while recording api analytics event.")
return response
def _record_api_request(request: HttpRequest, response: HttpResponse) -> None:
resolver_match = request.resolver_match
if resolver_match is None:
raise ValueError(f"Request URL not resolved: {request.path_info}")
kwargs, route, view_name = (
resolver_match.kwargs,
resolver_match.route,
resolver_match.view_name,
)
org_id_or_slug = kwargs.get("organization_id_or_slug", kwargs.get("organization_slug"))
org_id, org_slug = parse_id_or_slug_param(org_id_or_slug)
project_id_or_slug = kwargs.get("project_id_or_slug")
project_id, project_slug = parse_id_or_slug_param(project_id_or_slug)
origin = origin_from_request(request)
query_string: str = get_query_string(request) # starts with ? if non-empty
try:
analytics.record(
DevToolbarApiRequestEvent(
view_name=view_name,
route=route,
query_string=query_string,
origin=origin,
method=request.method,
status_code=response.status_code,
organization_id=org_id or None,
organization_slug=org_slug,
project_id=project_id or None,
project_slug=project_slug,
user_id=request.user.id if hasattr(request, "user") and request.user else None,
)
)
except Exception as e:
sentry_sdk.capture_exception(e)
| DevToolbarAnalyticsMiddleware |
python | apache__avro | lang/py/avro/test/test_bench.py | {
"start": 1821,
"end": 4151
} | class ____(unittest.TestCase):
def test_minimum_speed(self) -> None:
with tempfile.NamedTemporaryFile(suffix="avr") as temp_:
pass
temp = Path(temp_.name)
self.assertLess(
time_writes(temp, NUMBER_OF_TESTS),
MAX_WRITE_SECONDS,
f"Took longer than {MAX_WRITE_SECONDS} second(s) to write the test file with {NUMBER_OF_TESTS} values.",
)
self.assertLess(
time_read(temp),
MAX_READ_SECONDS,
f"Took longer than {MAX_READ_SECONDS} second(s) to read the test file with {NUMBER_OF_TESTS} values.",
)
def rand_name() -> str:
return "".join(random.sample(string.ascii_lowercase, 15))
def rand_ip() -> str:
return ".".join(map(str, randbytes(4)))
def picks(n) -> Sequence[Mapping[str, str]]:
return [{"query": rand_name(), "response": rand_ip(), "type": random.choice(TYPES)} for _ in range(n)]
def time_writes(path: Path, number: int) -> float:
with avro.datafile.DataFileWriter(path.open("wb"), WRITER, SCHEMA) as dw:
globals_ = {"dw": dw, "picks": picks(number)}
return timeit.timeit("dw.append(next(p))", number=number, setup="p=iter(picks)", globals=globals_)
def time_read(path: Path) -> float:
"""
Time how long it takes to read the file written in the `write` function.
We only do this once, because the size of the file is defined by the number sent to `write`.
"""
with avro.datafile.DataFileReader(path.open("rb"), READER) as dr:
return timeit.timeit("tuple(dr)", number=1, globals={"dr": dr})
def parse_args() -> argparse.Namespace: # pragma: no cover
parser = argparse.ArgumentParser(description="Benchmark writing some random avro.")
parser.add_argument(
"--number",
"-n",
type=int,
default=getattr(timeit, "default_number", 1000000),
help="how many times to run",
)
return parser.parse_args()
def main() -> None: # pragma: no cover
args = parse_args()
with tempfile.NamedTemporaryFile(suffix=".avr") as temp_:
pass
temp = Path(temp_.name)
print(f"Using file {temp.name}")
print(f"Writing: {time_writes(temp, args.number)}")
print(f"Reading: {time_read(temp)}")
if __name__ == "__main__": # pragma: no cover
main()
| TestBench |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_dag_runs.py | {
"start": 6479,
"end": 7371
} | class ____:
def setup_method(self):
clear_db_runs()
def teardown_method(self):
clear_db_runs()
def test_get_state(self, client, session, dag_maker):
dag_id = "test_get_state"
run_id = "test_run_id"
with dag_maker(dag_id=dag_id, session=session, serialized=True):
EmptyOperator(task_id="test_task")
dag_maker.create_dagrun(run_id=run_id, state=DagRunState.SUCCESS)
session.commit()
response = client.get(f"/execution/dag-runs/{dag_id}/{run_id}/state")
assert response.status_code == 200
assert response.json() == {"state": "success"}
def test_dag_run_not_found(self, client):
dag_id = "dag_not_found"
run_id = "test_run_id"
response = client.post(f"/execution/dag-runs/{dag_id}/{run_id}/clear")
assert response.status_code == 404
| TestDagRunState |
python | pytorch__pytorch | torch/utils/_sympy/value_ranges.py | {
"start": 3674,
"end": 14432
} | class ____(Generic[_T]):
if TYPE_CHECKING:
# ruff doesn't understand circular references but mypy does
# pyrefly: ignore [unbound-name]
ExprVR = ValueRanges[sympy.Expr] # noqa: F821
# pyrefly: ignore [unbound-name]
BoolVR = ValueRanges[SympyBoolean] # noqa: F821
AllVR = Union[ExprVR, BoolVR]
# Although the type signature here suggests you can pass any
# sympy expression, in practice the analysis here only works
# with constant sympy expressions
lower: _T
upper: _T
is_bool: bool
is_int: bool
is_float: bool
def __repr__(self) -> str:
return f"VR[{self.lower}, {self.upper}]"
@overload
def __init__(
self: ValueRanges[sympy.Expr],
lower: ExprIn,
upper: ExprIn,
) -> None: ...
@overload
def __init__( # type: ignore[misc]
self: ValueRanges[SympyBoolean],
lower: BoolIn,
upper: BoolIn,
) -> None: ...
def __init__(self, lower: AllIn, upper: AllIn) -> None:
lower = simple_sympify(lower)
upper = simple_sympify(upper)
# TODO: when the bounds have free variables, this may be
# nontrivial to actually verify
try:
if not sympy_generic_le(lower, upper):
raise ValueRangeError(f"Invalid ranges [{lower}:{upper}]")
except TypeError as e:
raise TypeError(f"Could not compare {lower} <= {upper}") from e
is_bool_lower = isinstance(lower, SympyBoolean)
is_bool_upper = isinstance(upper, SympyBoolean)
if is_bool_lower != is_bool_upper:
raise AssertionError((lower, upper))
# Warning: is_int/is_float is best effort. We do pretty well in
# Dynamo, but in Inductor these attributes are often wrong because we
# are not very rigorous in dtype analysis. This is also why we need
# the flexible analysis for is_int: sometimes a sympy.oo pops in for
# an integer bound. I would /like/ for us not to do this, but it's
# too hard to push the invariant through right now.
if isinstance(lower, sympy.Integer) and upper == sympy.oo:
upper = int_oo
if isinstance(upper, sympy.Integer) and lower == -sympy.oo:
lower = -int_oo
# NB: [-int_oo, -int_oo] and [int_oo, int_oo] are allowed
integer_types = (sympy.Integer, NegativeIntInfinity, IntInfinity)
is_int_lower = isinstance(lower, integer_types)
is_int_upper = isinstance(upper, integer_types)
# Because this is a frozen class
object.__setattr__(self, "lower", lower)
object.__setattr__(self, "upper", upper)
# Unlike bool/int in Python, we don't report bools are ints
#
# NB: is_bool_lower == is_bool_upper, so we only need to check one
object.__setattr__(self, "is_bool", is_bool_lower)
object.__setattr__(
self,
"is_int",
not self.is_bool and is_int_lower and is_int_upper,
)
"""
# This assert is just impossible right now, too many sympy bugs
if self.is_int:
# NB: sympy will sometimes randomly lose the float-ness of zero,
# so we also need to account for that in the assertion here.
# See also https://github.com/sympy/sympy/issues/26620
assert isinstance(lower, sympy.Integer) or lower in [-sympy.oo, 0], (
lower,
upper,
)
assert isinstance(upper, sympy.Integer) or upper in [sympy.oo, 0], (lower, upper)
"""
# NB: [-oo, oo] always advertises as float!
object.__setattr__(self, "is_float", not self.is_bool and not self.is_int)
if not self.is_bool and not self.is_int and not self.is_float:
raise AssertionError((lower, upper))
def boolify(self) -> ValueRanges[SympyBoolean]:
if vr_is_bool(self):
return self
elif self == ValueRanges.unknown():
return ValueRanges.unknown_bool()
else:
raise AssertionError(f"not bool like {self}")
def __contains__(self, x: AllIn) -> bool:
return ValueRanges.wrap(x).issubset(self)
def issubset(self, other):
if other is self.unknown_int():
return True
return sympy_generic_le(other.lower, self.lower) and sympy_generic_le(
self.upper, other.upper
)
def tighten(self, other) -> ValueRanges:
"""Given two ValueRanges, returns their intersection"""
return self & other
# Intersection
@overload
def __and__(
self: ValueRanges[sympy.Expr],
other: ValueRanges[sympy.Expr],
) -> ValueRanges[sympy.Expr]: ...
@overload
def __and__( # type: ignore[misc]
self: ValueRanges[SympyBoolean],
other: ValueRanges[SympyBoolean],
) -> ValueRanges[SympyBoolean]: ...
def __and__(self: AllVR, other: AllVR) -> AllVR:
if other in (ValueRanges.unknown(), ValueRanges.unknown_int()):
return self
if self in (ValueRanges.unknown(), ValueRanges.unknown_int()):
return other
if self.is_bool != other.is_bool:
raise AssertionError((self, other))
if self.is_int != other.is_int:
raise AssertionError((self, other))
if self.is_float != other.is_float:
raise AssertionError((self, other))
if self.is_bool:
return ValueRanges(
sympy.Or(self.lower, other.lower), sympy.And(self.upper, other.upper)
)
else:
return ValueRanges(
sympy.Max(self.lower, other.lower), sympy.Min(self.upper, other.upper)
)
# Union
@overload
def __or__(
self: ValueRanges[sympy.Expr],
other: ValueRanges[sympy.Expr],
) -> ValueRanges[sympy.Expr]: ...
@overload
def __or__( # type: ignore[misc]
self: ValueRanges[SympyBoolean],
other: ValueRanges[SympyBoolean],
) -> ValueRanges[SympyBoolean]: ...
def __or__(self: AllVR, other: AllVR) -> AllVR:
if ValueRanges.unknown() in (self, other):
return ValueRanges.unknown()
if self.is_bool != other.is_bool:
raise AssertionError((self, other))
if self.is_int != other.is_int:
raise AssertionError((self, other))
if self.is_float != other.is_float:
raise AssertionError((self, other))
if self.is_bool:
return ValueRanges(
sympy.And(self.lower, other.lower), sympy.Or(self.upper, other.upper)
)
else:
return ValueRanges(
sympy.Min(self.lower, other.lower), sympy.Max(self.upper, other.upper)
)
def is_singleton(self) -> bool:
return self.lower == self.upper
@staticmethod
@functools.cache
def unknown() -> ValueRanges[sympy.Expr]:
return ValueRanges(-sympy.oo, sympy.oo)
@staticmethod
@functools.cache
def unknown_int() -> ValueRanges[sympy.Expr]:
return ValueRanges(-int_oo, int_oo)
@staticmethod
@functools.cache
def unknown_bool() -> ValueRanges[SympyBoolean]:
return ValueRanges(sympy.false, sympy.true)
@overload
@staticmethod
# work around the fact that bool and int overlap
def wrap(arg: ExprIn | ExprVR) -> ExprVR: # type: ignore[overload-overlap]
...
@overload
@staticmethod
def wrap(arg: BoolIn | BoolVR) -> BoolVR: # type: ignore[misc]
...
@staticmethod
def wrap(arg: AllIn | AllVR) -> AllVR:
if isinstance(arg, ValueRanges):
return arg
if isinstance(arg, float) and math.isnan(arg):
return ValueRanges.unknown()
# arg is either ExprIn or BoolIn, but we don't know it here
return ValueRanges(arg, arg) # type: ignore[arg-type]
@staticmethod
def increasing_map(x: ExprIn | ExprVR, fn: ExprFn) -> ExprVR:
"""Increasing: x <= y => f(x) <= f(y)."""
x = ValueRanges.wrap(x)
return ValueRanges(fn(x.lower), fn(x.upper))
@overload
@staticmethod
def decreasing_map(x: ExprIn | ExprVR, fn: ExprFn) -> ExprVR: ...
@overload
@staticmethod
def decreasing_map(x: BoolIn | BoolVR, fn: BoolFn) -> BoolVR: # type: ignore[misc]
...
@staticmethod
def decreasing_map(x: AllIn | AllVR, fn: AllFn) -> AllVR:
"""Decreasing: x <= y => f(x) >= f(y)."""
x = ValueRanges.wrap(x)
# consistently either Expr or Bool, but we don't know it here
return ValueRanges(fn(x.upper), fn(x.lower)) # type: ignore[arg-type]
@staticmethod
def monotone_map(x: ExprIn | ExprVR, fn: ExprFn) -> ExprVR:
"""It's increasing or decreasing."""
x = ValueRanges.wrap(x)
l = fn(x.lower)
u = fn(x.upper)
return ValueRanges(min(l, u), max(l, u))
@staticmethod
def convex_min_zero_map(x: ExprIn | ExprVR, fn: ExprFn) -> ExprVR:
"""Fn is convex and has a minimum at 0."""
x = ValueRanges.wrap(x)
if 0 in x:
upper = max(fn(x.lower), fn(x.upper))
upper = simple_sympify(upper)
if isinstance(upper, sympy.Float) or upper == sympy.oo:
return ValueRanges(0.0, upper)
return ValueRanges(0, upper)
return ValueRanges.monotone_map(x, fn)
@overload
@staticmethod
def coordinatewise_increasing_map(
x: ExprIn | ExprVR,
y: ExprIn | ExprVR,
fn: ExprFn2,
) -> ExprVR: ...
@overload
@staticmethod
def coordinatewise_increasing_map( # type: ignore[misc]
x: BoolIn | BoolVR,
y: BoolIn | BoolVR,
fn: BoolFn2,
) -> BoolVR: ...
@staticmethod
def coordinatewise_increasing_map(
x: AllIn | AllVR,
y: AllIn | AllVR,
fn: AllFn2,
) -> AllVR:
"""
It's increasing on each coordinate.
Mathematically:
For every 1 <= i <= n and x_i <= y_i we have that
f(x1, .., xn) <= f(x1, , yi, ..., xn)
"""
x, y = ValueRanges.wrap(x), ValueRanges.wrap(y)
return ValueRanges(
fn(x.lower, y.lower), # type: ignore[arg-type]
fn(x.upper, y.upper), # type: ignore[arg-type]
)
@classmethod
def coordinatewise_monotone_map(cls, x, y, fn):
"""It's increasing or decreasing on each coordinate."""
x, y = cls.wrap(x), cls.wrap(y)
products = [
fn(a, b)
for a, b in itertools.product([x.lower, x.upper], [y.lower, y.upper])
]
return ValueRanges(min(products), max(products))
| ValueRanges |
python | mkdocs__mkdocs | mkdocs/tests/search_tests.py | {
"start": 2330,
"end": 10571
} | class ____(unittest.TestCase):
def test_plugin_config_defaults(self):
expected = {
'lang': None,
'separator': r'[\s\-]+',
'min_search_length': 3,
'prebuild_index': False,
'indexing': 'full',
}
plugin = search.SearchPlugin()
errors, warnings = plugin.load_config({})
self.assertEqual(plugin.config, expected)
self.assertEqual(errors, [])
self.assertEqual(warnings, [])
def test_plugin_config_lang(self):
expected = {
'lang': ['es'],
'separator': r'[\s\-]+',
'min_search_length': 3,
'prebuild_index': False,
'indexing': 'full',
}
plugin = search.SearchPlugin()
errors, warnings = plugin.load_config({'lang': 'es'})
self.assertEqual(plugin.config, expected)
self.assertEqual(errors, [])
self.assertEqual(warnings, [])
def test_plugin_config_separator(self):
expected = {
'lang': None,
'separator': r'[\s\-\.]+',
'min_search_length': 3,
'prebuild_index': False,
'indexing': 'full',
}
plugin = search.SearchPlugin()
errors, warnings = plugin.load_config({'separator': r'[\s\-\.]+'})
self.assertEqual(plugin.config, expected)
self.assertEqual(errors, [])
self.assertEqual(warnings, [])
def test_plugin_config_min_search_length(self):
expected = {
'lang': None,
'separator': r'[\s\-]+',
'min_search_length': 2,
'prebuild_index': False,
'indexing': 'full',
}
plugin = search.SearchPlugin()
errors, warnings = plugin.load_config({'min_search_length': 2})
self.assertEqual(plugin.config, expected)
self.assertEqual(errors, [])
self.assertEqual(warnings, [])
def test_plugin_config_prebuild_index(self):
expected = {
'lang': None,
'separator': r'[\s\-]+',
'min_search_length': 3,
'prebuild_index': True,
'indexing': 'full',
}
plugin = search.SearchPlugin()
errors, warnings = plugin.load_config({'prebuild_index': True})
self.assertEqual(plugin.config, expected)
self.assertEqual(errors, [])
self.assertEqual(warnings, [])
def test_plugin_config_indexing(self):
expected = {
'lang': None,
'separator': r'[\s\-]+',
'min_search_length': 3,
'prebuild_index': False,
'indexing': 'titles',
}
plugin = search.SearchPlugin()
errors, warnings = plugin.load_config({'indexing': 'titles'})
self.assertEqual(plugin.config, expected)
self.assertEqual(errors, [])
self.assertEqual(warnings, [])
def test_event_on_config_defaults(self):
plugin = search.SearchPlugin()
plugin.load_config({})
result = plugin.on_config(load_config(theme='mkdocs', extra_javascript=[]))
self.assertFalse(result['theme']['search_index_only'])
self.assertFalse(result['theme']['include_search_page'])
self.assertEqual(result['theme'].static_templates, {'404.html', 'sitemap.xml'})
self.assertEqual(len(result['theme'].dirs), 3)
self.assertEqual(result['extra_javascript'], ['search/main.js'])
self.assertEqual(plugin.config.lang, [result['theme']['locale'].language])
def test_event_on_config_lang(self):
plugin = search.SearchPlugin()
plugin.load_config({'lang': 'es'})
result = plugin.on_config(load_config(theme='mkdocs', extra_javascript=[]))
self.assertFalse(result['theme']['search_index_only'])
self.assertFalse(result['theme']['include_search_page'])
self.assertEqual(result['theme'].static_templates, {'404.html', 'sitemap.xml'})
self.assertEqual(len(result['theme'].dirs), 3)
self.assertEqual(result['extra_javascript'], ['search/main.js'])
self.assertEqual(plugin.config.lang, ['es'])
def test_event_on_config_theme_locale(self):
plugin = search.SearchPlugin()
plugin.load_config({})
result = plugin.on_config(
load_config(theme={'name': 'mkdocs', 'locale': 'fr'}, extra_javascript=[])
)
self.assertFalse(result['theme']['search_index_only'])
self.assertFalse(result['theme']['include_search_page'])
self.assertEqual(result['theme'].static_templates, {'404.html', 'sitemap.xml'})
self.assertEqual(len(result['theme'].dirs), 3)
self.assertEqual(result['extra_javascript'], ['search/main.js'])
self.assertEqual(plugin.config.lang, [result['theme']['locale'].language])
def test_event_on_config_include_search_page(self):
plugin = search.SearchPlugin()
plugin.load_config({})
config = load_config(
theme={'name': 'mkdocs', 'include_search_page': True}, extra_javascript=[]
)
result = plugin.on_config(config)
self.assertFalse(result['theme']['search_index_only'])
self.assertTrue(result['theme']['include_search_page'])
self.assertEqual(
result['theme'].static_templates, {'404.html', 'sitemap.xml', 'search.html'}
)
self.assertEqual(len(result['theme'].dirs), 3)
self.assertEqual(result['extra_javascript'], ['search/main.js'])
def test_event_on_config_search_index_only(self):
plugin = search.SearchPlugin()
plugin.load_config({})
config = load_config(
theme={'name': 'mkdocs', 'search_index_only': True}, extra_javascript=[]
)
result = plugin.on_config(config)
self.assertTrue(result['theme']['search_index_only'])
self.assertFalse(result['theme']['include_search_page'])
self.assertEqual(result['theme'].static_templates, {'404.html', 'sitemap.xml'})
self.assertEqual(len(result['theme'].dirs), 2)
self.assertEqual(len(result['extra_javascript']), 0)
@mock.patch('mkdocs.utils.write_file', autospec=True)
@mock.patch('mkdocs.utils.copy_file', autospec=True)
def test_event_on_post_build_defaults(self, mock_copy_file, mock_write_file):
plugin = search.SearchPlugin()
plugin.load_config({})
config = load_config(theme='mkdocs')
plugin.on_config(config)
plugin.on_pre_build(config)
plugin.on_post_build(config)
self.assertEqual(mock_copy_file.call_count, 0)
self.assertEqual(mock_write_file.call_count, 1)
@mock.patch('mkdocs.utils.write_file', autospec=True)
@mock.patch('mkdocs.utils.copy_file', autospec=True)
def test_event_on_post_build_single_lang(self, mock_copy_file, mock_write_file):
plugin = search.SearchPlugin()
plugin.load_config({'lang': ['es']})
config = load_config(theme='mkdocs')
plugin.on_pre_build(config)
plugin.on_post_build(config)
self.assertEqual(mock_copy_file.call_count, 2)
self.assertEqual(mock_write_file.call_count, 1)
@mock.patch('mkdocs.utils.write_file', autospec=True)
@mock.patch('mkdocs.utils.copy_file', autospec=True)
def test_event_on_post_build_multi_lang(self, mock_copy_file, mock_write_file):
plugin = search.SearchPlugin()
plugin.load_config({'lang': ['es', 'fr']})
config = load_config(theme='mkdocs')
plugin.on_pre_build(config)
plugin.on_post_build(config)
self.assertEqual(mock_copy_file.call_count, 4)
self.assertEqual(mock_write_file.call_count, 1)
@mock.patch('mkdocs.utils.write_file', autospec=True)
@mock.patch('mkdocs.utils.copy_file', autospec=True)
def test_event_on_post_build_search_index_only(self, mock_copy_file, mock_write_file):
plugin = search.SearchPlugin()
plugin.load_config({'lang': ['es']})
config = load_config(theme={'name': 'mkdocs', 'search_index_only': True})
plugin.on_pre_build(config)
plugin.on_post_build(config)
self.assertEqual(mock_copy_file.call_count, 0)
self.assertEqual(mock_write_file.call_count, 1)
| SearchPluginTests |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/core_tests/instance_tests/test_instance_lazy_load.py | {
"start": 1436,
"end": 2318
} | class ____(NoOpComputeLogManager, ConfigurableClass):
def __init__(self, inst_data: Optional[ConfigurableClassData] = None):
super().__init__(inst_data)
raise Exception("Expected init fail")
@classmethod
def from_config_value(
cls, inst_data: ConfigurableClassData, config_value: Mapping[str, Any]
) -> Self:
return cls(inst_data=inst_data)
def test_lazy_compute_log_manager():
with dg.instance_for_test(
overrides={
"compute_logs": {
"module": "dagster_tests.core_tests.instance_tests.test_instance_lazy_load",
"class": "InitFailComputeLogManager",
"config": {},
}
}
) as instance:
with pytest.raises(Exception, match="Expected init fail"):
print(instance.compute_log_manager) # noqa: T201
| InitFailComputeLogManager |
python | pypa__pip | tests/unit/test_exceptions.py | {
"start": 14084,
"end": 19421
} | class ____:
default_text = (
f"The Python environment under {sys.prefix} is managed externally, "
f"and may not be\nmanipulated by the user. Please use specific "
f"tooling from the distributor of\nthe Python installation to "
f"interact with this environment instead.\n"
)
@pytest.fixture(autouse=True)
def patch_locale(self, monkeypatch: pytest.MonkeyPatch) -> None:
orig_getlocal = locale.getlocale
def fake_getlocale(category: int) -> tuple[str | None, str | None]:
"""Fake getlocale() that always reports zh_Hant for LC_MESSASGES."""
result = orig_getlocal(category)
if category == getattr(locale, "LC_MESSAGES", None):
return "zh_Hant", result[1]
return result
monkeypatch.setattr(locale, "getlocale", fake_getlocale)
@pytest.fixture
def marker(self, tmp_path: pathlib.Path) -> pathlib.Path:
marker = tmp_path.joinpath("EXTERNALLY-MANAGED")
marker.touch()
return marker
def test_invalid_config_format(
self,
caplog: pytest.LogCaptureFixture,
marker: pathlib.Path,
) -> None:
marker.write_text("invalid", encoding="utf8")
with caplog.at_level(logging.WARNING, "pip._internal.exceptions"):
exc = ExternallyManagedEnvironment.from_config(marker)
assert len(caplog.records) == 1
assert caplog.records[-1].getMessage() == f"Failed to read {marker}"
assert str(exc.context) == self.default_text
@pytest.mark.parametrize(
"config",
[
pytest.param("", id="empty"),
pytest.param("[foo]\nblah = blah", id="no-section"),
pytest.param("[externally-managed]\nblah = blah", id="no-key"),
],
)
def test_config_without_key(
self,
caplog: pytest.LogCaptureFixture,
marker: pathlib.Path,
config: str,
) -> None:
marker.write_text(config, encoding="utf8")
with caplog.at_level(logging.WARNING, "pip._internal.exceptions"):
exc = ExternallyManagedEnvironment.from_config(marker)
assert not caplog.records
assert str(exc.context) == self.default_text
@pytest.mark.skipif(
sys.platform == "win32",
reason="Localization disabled on Windows",
)
@pytest.mark.parametrize(
"config, expected",
[
pytest.param(
"""\
[externally-managed]
Error = 最後
Error-en = English
Error-zh = 中文
Error-zh_Hant = 繁體
Error-zh_Hans = 简体
""",
"繁體",
id="full",
),
pytest.param(
"""\
[externally-managed]
Error = 最後
Error-en = English
Error-zh = 中文
Error-zh_Hans = 简体
""",
"中文",
id="no-variant",
),
pytest.param(
"""\
[externally-managed]
Error = 最後
Error-en = English
""",
"最後",
id="fallback",
),
],
)
def test_config_canonical(
self,
caplog: pytest.LogCaptureFixture,
marker: pathlib.Path,
config: str,
expected: str,
) -> None:
marker.write_text(
textwrap.dedent(config),
encoding="utf8",
)
with caplog.at_level(logging.WARNING, "pip._internal.exceptions"):
exc = ExternallyManagedEnvironment.from_config(marker)
assert not caplog.records
assert str(exc.context) == expected
@pytest.mark.skipif(
sys.platform != "win32",
reason="Non-Windows should implement localization",
)
@pytest.mark.parametrize(
"config",
[
pytest.param(
"""\
[externally-managed]
Error = 最後
Error-en = English
Error-zh = 中文
Error-zh_Hant = 繁體
Error-zh_Hans = 简体
""",
id="full",
),
pytest.param(
"""\
[externally-managed]
Error = 最後
Error-en = English
Error-zh = 中文
Error-zh_Hans = 简体
""",
id="no-variant",
),
pytest.param(
"""\
[externally-managed]
Error = 最後
Error-en = English
""",
id="fallback",
),
],
)
def test_config_canonical_no_localization(
self,
caplog: pytest.LogCaptureFixture,
marker: pathlib.Path,
config: str,
) -> None:
marker.write_text(
textwrap.dedent(config),
encoding="utf8",
)
with caplog.at_level(logging.WARNING, "pip._internal.exceptions"):
exc = ExternallyManagedEnvironment.from_config(marker)
assert not caplog.records
assert str(exc.context) == "最後"
| TestExternallyManagedEnvironment |
python | pytorch__pytorch | torch/_dynamo/variables/iter.py | {
"start": 10958,
"end": 11976
} | class ____(IteratorVariable):
"""
VariableTracker for iter(obj) that implements the iterator protocol (i.e.,
has a `__next__` method).
We use this class to track the state of the iterator and handle the case
when the iterator is exhausted:
Example usage:
> b = iter(obj)
> list(b) # exhaust the iterator
> list(b) # empty list
"""
def __init__(self, obj: VariableTracker, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.obj = obj
self.generator_exhausted = False
def next_variable(self, tx: "InstructionTranslator") -> VariableTracker:
if self.generator_exhausted:
raise_observed_exception(StopIteration, tx)
try:
return self.obj.next_variable(tx)
except ObservedUserStopIteration:
# Do not rely on the object to always return StopIteration once it
# is exhausted.
self.generator_exhausted = True
raise
| ObjectIteratorVariable |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 11273,
"end": 11355
} | class ____(AbstractProject):
artist = models.CharField(max_length=30)
| ArtProject |
python | mwaskom__seaborn | seaborn/categorical.py | {
"start": 1228,
"end": 54629
} | class ____(VectorPlotter):
wide_structure = {"x": "@columns", "y": "@values", "hue": "@columns"}
flat_structure = {"y": "@values"}
_legend_attributes = ["color"]
def __init__(
self,
data=None,
variables={},
order=None,
orient=None,
require_numeric=False,
color=None,
legend="auto",
):
super().__init__(data=data, variables=variables)
# This method takes care of some bookkeeping that is necessary because the
# original categorical plots (prior to the 2021 refactor) had some rules that
# don't fit exactly into VectorPlotter logic. It may be wise to have a second
# round of refactoring that moves the logic deeper, but this will keep things
# relatively sensible for now.
# For wide data, orient determines assignment to x/y differently from the
# default VectorPlotter rules. If we do decide to make orient part of the
# _base variable assignment, we'll want to figure out how to express that.
if self.input_format == "wide" and orient in ["h", "y"]:
self.plot_data = self.plot_data.rename(columns={"x": "y", "y": "x"})
orig_variables = set(self.variables)
orig_x = self.variables.pop("x", None)
orig_y = self.variables.pop("y", None)
orig_x_type = self.var_types.pop("x", None)
orig_y_type = self.var_types.pop("y", None)
if "x" in orig_variables:
self.variables["y"] = orig_x
self.var_types["y"] = orig_x_type
if "y" in orig_variables:
self.variables["x"] = orig_y
self.var_types["x"] = orig_y_type
# Initially there was more special code for wide-form data where plots were
# multi-colored by default and then either palette or color could be used.
# We want to provide backwards compatibility for this behavior in a relatively
# simply way, so we delete the hue information when color is specified.
if (
self.input_format == "wide"
and "hue" in self.variables
and color is not None
):
self.plot_data.drop("hue", axis=1)
self.variables.pop("hue")
# The concept of an "orientation" is important to the original categorical
# plots, but there's no provision for it in VectorPlotter, so we need it here.
# Note that it could be useful for the other functions in at least two ways
# (orienting a univariate distribution plot from long-form data and selecting
# the aggregation axis in lineplot), so we may want to eventually refactor it.
self.orient = infer_orient(
x=self.plot_data.get("x", None),
y=self.plot_data.get("y", None),
orient=orient,
require_numeric=False,
)
self.legend = legend
# Short-circuit in the case of an empty plot
if not self.has_xy_data:
return
# Categorical plots can be "univariate" in which case they get an anonymous
# category label on the opposite axis. Note: this duplicates code in the core
# scale_categorical function. We need to do it here because of the next line.
if self.orient not in self.variables:
self.variables[self.orient] = None
self.var_types[self.orient] = "categorical"
self.plot_data[self.orient] = ""
# Categorical variables have discrete levels that we need to track
cat_levels = categorical_order(self.plot_data[self.orient], order)
self.var_levels[self.orient] = cat_levels
def _hue_backcompat(self, color, palette, hue_order, force_hue=False):
"""Implement backwards compatibility for hue parametrization.
Note: the force_hue parameter is used so that functions can be shown to
pass existing tests during refactoring and then tested for new behavior.
It can be removed after completion of the work.
"""
# The original categorical functions applied a palette to the categorical axis
# by default. We want to require an explicit hue mapping, to be more consistent
# with how things work elsewhere now. I don't think there's any good way to
# do this gently -- because it's triggered by the default value of hue=None,
# users would always get a warning, unless we introduce some sentinel "default"
# argument for this change. That's possible, but asking users to set `hue=None`
# on every call is annoying.
# We are keeping the logic for implementing the old behavior in with the current
# system so that (a) we can punt on that decision and (b) we can ensure that
# refactored code passes old tests.
default_behavior = color is None or palette is not None
if force_hue and "hue" not in self.variables and default_behavior:
self._redundant_hue = True
self.plot_data["hue"] = self.plot_data[self.orient]
self.variables["hue"] = self.variables[self.orient]
self.var_types["hue"] = "categorical"
hue_order = self.var_levels[self.orient]
# Because we convert the categorical axis variable to string,
# we need to update a dictionary palette too
if isinstance(palette, dict):
palette = {str(k): v for k, v in palette.items()}
else:
if "hue" in self.variables:
redundant = (self.plot_data["hue"] == self.plot_data[self.orient]).all()
else:
redundant = False
self._redundant_hue = redundant
# Previously, categorical plots had a trick where color= could seed the palette.
# Because that's an explicit parameterization, we are going to give it one
# release cycle with a warning before removing.
if "hue" in self.variables and palette is None and color is not None:
if not isinstance(color, str):
color = mpl.colors.to_hex(color)
palette = f"dark:{color}"
msg = (
"\n\nSetting a gradient palette using color= is deprecated and will be "
f"removed in v0.14.0. Set `palette='{palette}'` for the same effect.\n"
)
warnings.warn(msg, FutureWarning, stacklevel=3)
return palette, hue_order
def _palette_without_hue_backcompat(self, palette, hue_order):
"""Provide one cycle where palette= implies hue= when not provided"""
if "hue" not in self.variables and palette is not None:
msg = (
"\n\nPassing `palette` without assigning `hue` is deprecated "
f"and will be removed in v0.14.0. Assign the `{self.orient}` variable "
"to `hue` and set `legend=False` for the same effect.\n"
)
warnings.warn(msg, FutureWarning, stacklevel=3)
self.legend = False
self.plot_data["hue"] = self.plot_data[self.orient]
self.variables["hue"] = self.variables.get(self.orient)
self.var_types["hue"] = self.var_types.get(self.orient)
hue_order = self.var_levels.get(self.orient)
self._var_levels.pop("hue", None)
return hue_order
def _point_kwargs_backcompat(self, scale, join, kwargs):
"""Provide two cycles where scale= and join= work, but redirect to kwargs."""
if scale is not deprecated:
lw = mpl.rcParams["lines.linewidth"] * 1.8 * scale
mew = lw * .75
ms = lw * 2
msg = (
"\n\n"
"The `scale` parameter is deprecated and will be removed in v0.15.0. "
"You can now control the size of each plot element using matplotlib "
"`Line2D` parameters (e.g., `linewidth`, `markersize`, etc.)."
"\n"
)
warnings.warn(msg, stacklevel=3)
kwargs.update(linewidth=lw, markeredgewidth=mew, markersize=ms)
if join is not deprecated:
msg = (
"\n\n"
"The `join` parameter is deprecated and will be removed in v0.15.0."
)
if not join:
msg += (
" You can remove the line between points with `linestyle='none'`."
)
kwargs.update(linestyle="")
msg += "\n"
warnings.warn(msg, stacklevel=3)
def _err_kws_backcompat(self, err_kws, errcolor, errwidth, capsize):
"""Provide two cycles where existing signature-level err_kws are handled."""
def deprecate_err_param(name, key, val):
if val is deprecated:
return
suggest = f"err_kws={{'{key}': {val!r}}}"
msg = (
f"\n\nThe `{name}` parameter is deprecated. And will be removed "
f"in v0.15.0. Pass `{suggest}` instead.\n"
)
warnings.warn(msg, FutureWarning, stacklevel=4)
err_kws[key] = val
if errcolor is not None:
deprecate_err_param("errcolor", "color", errcolor)
deprecate_err_param("errwidth", "linewidth", errwidth)
if capsize is None:
capsize = 0
msg = (
"\n\nPassing `capsize=None` is deprecated and will be removed "
"in v0.15.0. Pass `capsize=0` to disable caps.\n"
)
warnings.warn(msg, FutureWarning, stacklevel=3)
return err_kws, capsize
def _violin_scale_backcompat(self, scale, scale_hue, density_norm, common_norm):
"""Provide two cycles of backcompat for scale kwargs"""
if scale is not deprecated:
density_norm = scale
msg = (
"\n\nThe `scale` parameter has been renamed and will be removed "
f"in v0.15.0. Pass `density_norm={scale!r}` for the same effect."
)
warnings.warn(msg, FutureWarning, stacklevel=3)
if scale_hue is not deprecated:
common_norm = scale_hue
msg = (
"\n\nThe `scale_hue` parameter has been replaced and will be removed "
f"in v0.15.0. Pass `common_norm={not scale_hue}` for the same effect."
)
warnings.warn(msg, FutureWarning, stacklevel=3)
return density_norm, common_norm
def _violin_bw_backcompat(self, bw, bw_method):
"""Provide two cycles of backcompat for violin bandwidth parameterization."""
if bw is not deprecated:
bw_method = bw
msg = dedent(f"""\n
The `bw` parameter is deprecated in favor of `bw_method`/`bw_adjust`.
Setting `bw_method={bw!r}`, but please see docs for the new parameters
and update your code. This will become an error in seaborn v0.15.0.
""")
warnings.warn(msg, FutureWarning, stacklevel=3)
return bw_method
def _boxen_scale_backcompat(self, scale, width_method):
"""Provide two cycles of backcompat for scale kwargs"""
if scale is not deprecated:
width_method = scale
msg = (
"\n\nThe `scale` parameter has been renamed to `width_method` and "
f"will be removed in v0.15. Pass `width_method={scale!r}"
)
if scale == "area":
msg += ", but note that the result for 'area' will appear different."
else:
msg += " for the same effect."
warnings.warn(msg, FutureWarning, stacklevel=3)
return width_method
def _complement_color(self, color, base_color, hue_map):
"""Allow a color to be set automatically using a basis of comparison."""
if color == "gray":
msg = (
'Use "auto" to set automatic grayscale colors. From v0.14.0, '
'"gray" will default to matplotlib\'s definition.'
)
warnings.warn(msg, FutureWarning, stacklevel=3)
color = "auto"
elif color is None or color is default:
color = "auto"
if color != "auto":
return color
if hue_map.lookup_table is None:
if base_color is None:
return None
basis = [mpl.colors.to_rgb(base_color)]
else:
basis = [mpl.colors.to_rgb(c) for c in hue_map.lookup_table.values()]
unique_colors = np.unique(basis, axis=0)
light_vals = [rgb_to_hls(*rgb[:3])[1] for rgb in unique_colors]
lum = min(light_vals) * .6
return (lum, lum, lum)
def _map_prop_with_hue(self, name, value, fallback, plot_kws):
"""Support pointplot behavior of modifying the marker/linestyle with hue."""
if value is default:
value = plot_kws.pop(name, fallback)
if "hue" in self.variables:
levels = self._hue_map.levels
if isinstance(value, list):
mapping = {k: v for k, v in zip(levels, value)}
else:
mapping = {k: value for k in levels}
else:
mapping = {None: value}
return mapping
def _adjust_cat_axis(self, ax, axis):
"""Set ticks and limits for a categorical variable."""
# Note: in theory, this could happen in _attach for all categorical axes
# But two reasons not to do that:
# - If it happens before plotting, autoscaling messes up the plot limits
# - It would change existing plots from other seaborn functions
if self.var_types[axis] != "categorical":
return
# If both x/y data are empty, the correct way to set up the plot is
# somewhat undefined; because we don't add null category data to the plot in
# this case we don't *have* a categorical axis (yet), so best to just bail.
if self.plot_data[axis].empty:
return
# We can infer the total number of categories (including those from previous
# plots that are not part of the plot we are currently making) from the number
# of ticks, which matplotlib sets up while doing unit conversion. This feels
# slightly risky, as if we are relying on something that may be a matplotlib
# implementation detail. But I cannot think of a better way to keep track of
# the state from previous categorical calls (see GH2516 for context)
n = len(getattr(ax, f"get_{axis}ticks")())
if axis == "x":
ax.xaxis.grid(False)
ax.set_xlim(-.5, n - .5, auto=None)
else:
ax.yaxis.grid(False)
# Note limits that correspond to previously-inverted y axis
ax.set_ylim(n - .5, -.5, auto=None)
def _dodge_needed(self):
"""Return True when use of `hue` would cause overlaps."""
groupers = list({self.orient, "col", "row"} & set(self.variables))
if "hue" in self.variables:
orient = self.plot_data[groupers].value_counts()
paired = self.plot_data[[*groupers, "hue"]].value_counts()
return orient.size != paired.size
return False
def _dodge(self, keys, data):
"""Apply a dodge transform to coordinates in place."""
if "hue" not in self.variables:
# Short-circuit if hue variable was not assigned
# We could potentially warn when hue=None, dodge=True, user may be confused
# But I think it's fine to just treat it as a no-op.
return
hue_idx = self._hue_map.levels.index(keys["hue"])
n = len(self._hue_map.levels)
data["width"] /= n
full_width = data["width"] * n
offset = data["width"] * hue_idx + data["width"] / 2 - full_width / 2
data[self.orient] += offset
def _invert_scale(self, ax, data, vars=("x", "y")):
"""Undo scaling after computation so data are plotted correctly."""
for var in vars:
_, inv = _get_transform_functions(ax, var[0])
if var == self.orient and "width" in data:
hw = data["width"] / 2
data["edge"] = inv(data[var] - hw)
data["width"] = inv(data[var] + hw) - data["edge"].to_numpy()
for suf in ["", "min", "max"]:
if (col := f"{var}{suf}") in data:
data[col] = inv(data[col])
def _configure_legend(self, ax, func, common_kws=None, semantic_kws=None):
if self.legend == "auto":
show_legend = not self._redundant_hue and self.input_format != "wide"
else:
show_legend = bool(self.legend)
if show_legend:
self.add_legend_data(ax, func, common_kws, semantic_kws=semantic_kws)
handles, _ = ax.get_legend_handles_labels()
if handles:
ax.legend(title=self.legend_title)
@property
def _native_width(self):
"""Return unit of width separating categories on native numeric scale."""
# Categorical data always have a unit width
if self.var_types[self.orient] == "categorical":
return 1
# Otherwise, define the width as the smallest space between observations
unique_values = np.unique(self.comp_data[self.orient])
if len(unique_values) > 1:
native_width = np.nanmin(np.diff(unique_values))
else:
native_width = 1
return native_width
def _nested_offsets(self, width, dodge):
"""Return offsets for each hue level for dodged plots."""
offsets = None
if "hue" in self.variables and self._hue_map.levels is not None:
n_levels = len(self._hue_map.levels)
if dodge:
each_width = width / n_levels
offsets = np.linspace(0, width - each_width, n_levels)
offsets -= offsets.mean()
else:
offsets = np.zeros(n_levels)
return offsets
# Note that the plotting methods here aim (in most cases) to produce the
# exact same artists as the original (pre 0.12) version of the code, so
# there is some weirdness that might not otherwise be clean or make sense in
# this context, such as adding empty artists for combinations of variables
# with no observations
def plot_strips(
self,
jitter,
dodge,
color,
plot_kws,
):
width = .8 * self._native_width
offsets = self._nested_offsets(width, dodge)
if jitter is True:
jlim = 0.1
else:
jlim = float(jitter)
if "hue" in self.variables and dodge and self._hue_map.levels is not None:
jlim /= len(self._hue_map.levels)
jlim *= self._native_width
jitterer = partial(np.random.uniform, low=-jlim, high=+jlim)
iter_vars = [self.orient]
if dodge:
iter_vars.append("hue")
ax = self.ax
dodge_move = jitter_move = 0
if "marker" in plot_kws and not MarkerStyle(plot_kws["marker"]).is_filled():
plot_kws.pop("edgecolor", None)
for sub_vars, sub_data in self.iter_data(iter_vars,
from_comp_data=True,
allow_empty=True):
ax = self._get_axes(sub_vars)
if offsets is not None and (offsets != 0).any():
dodge_move = offsets[sub_data["hue"].map(self._hue_map.levels.index)]
jitter_move = jitterer(size=len(sub_data)) if len(sub_data) > 1 else 0
adjusted_data = sub_data[self.orient] + dodge_move + jitter_move
sub_data[self.orient] = adjusted_data
self._invert_scale(ax, sub_data)
points = ax.scatter(sub_data["x"], sub_data["y"], color=color, **plot_kws)
if "hue" in self.variables:
points.set_facecolors(self._hue_map(sub_data["hue"]))
self._configure_legend(ax, _scatter_legend_artist, common_kws=plot_kws)
def plot_swarms(
self,
dodge,
color,
warn_thresh,
plot_kws,
):
width = .8 * self._native_width
offsets = self._nested_offsets(width, dodge)
iter_vars = [self.orient]
if dodge:
iter_vars.append("hue")
ax = self.ax
point_collections = {}
dodge_move = 0
if "marker" in plot_kws and not MarkerStyle(plot_kws["marker"]).is_filled():
plot_kws.pop("edgecolor", None)
for sub_vars, sub_data in self.iter_data(iter_vars,
from_comp_data=True,
allow_empty=True):
ax = self._get_axes(sub_vars)
if offsets is not None:
dodge_move = offsets[sub_data["hue"].map(self._hue_map.levels.index)]
if not sub_data.empty:
sub_data[self.orient] = sub_data[self.orient] + dodge_move
self._invert_scale(ax, sub_data)
points = ax.scatter(sub_data["x"], sub_data["y"], color=color, **plot_kws)
if "hue" in self.variables:
points.set_facecolors(self._hue_map(sub_data["hue"]))
if not sub_data.empty:
point_collections[(ax, sub_data[self.orient].iloc[0])] = points
beeswarm = Beeswarm(width=width, orient=self.orient, warn_thresh=warn_thresh)
for (ax, center), points in point_collections.items():
if points.get_offsets().shape[0] > 1:
def draw(points, renderer, *, center=center):
beeswarm(points, center)
if self.orient == "y":
scalex = False
scaley = ax.get_autoscaley_on()
else:
scalex = ax.get_autoscalex_on()
scaley = False
# This prevents us from undoing the nice categorical axis limits
# set in _adjust_cat_axis, because that method currently leave
# the autoscale flag in its original setting. It may be better
# to disable autoscaling there to avoid needing to do this.
fixed_scale = self.var_types[self.orient] == "categorical"
ax.update_datalim(points.get_datalim(ax.transData))
if not fixed_scale and (scalex or scaley):
ax.autoscale_view(scalex=scalex, scaley=scaley)
super(points.__class__, points).draw(renderer)
points.draw = draw.__get__(points)
_draw_figure(ax.figure)
self._configure_legend(ax, _scatter_legend_artist, plot_kws)
def plot_boxes(
self,
width,
dodge,
gap,
fill,
whis,
color,
linecolor,
linewidth,
fliersize,
plot_kws, # TODO rename user_kws?
):
iter_vars = ["hue"]
value_var = {"x": "y", "y": "x"}[self.orient]
def get_props(element, artist=mpl.lines.Line2D):
return normalize_kwargs(plot_kws.pop(f"{element}props", {}), artist)
if not fill and linewidth is None:
linewidth = mpl.rcParams["lines.linewidth"]
bootstrap = plot_kws.pop("bootstrap", mpl.rcParams["boxplot.bootstrap"])
plot_kws.setdefault("shownotches", plot_kws.pop("notch", False))
box_artist = mpl.patches.Rectangle if fill else mpl.lines.Line2D
props = {
"box": get_props("box", box_artist),
"median": get_props("median"),
"whisker": get_props("whisker"),
"flier": get_props("flier"),
"cap": get_props("cap"),
}
props["median"].setdefault("solid_capstyle", "butt")
props["whisker"].setdefault("solid_capstyle", "butt")
props["flier"].setdefault("markersize", fliersize)
orientation = {"x": "vertical", "y": "horizontal"}[self.orient]
ax = self.ax
for sub_vars, sub_data in self.iter_data(iter_vars,
from_comp_data=True,
allow_empty=False):
ax = self._get_axes(sub_vars)
grouped = sub_data.groupby(self.orient)[value_var]
positions = sorted(sub_data[self.orient].unique().astype(float))
value_data = [x.to_numpy() for _, x in grouped]
stats = pd.DataFrame(mpl.cbook.boxplot_stats(value_data, whis=whis,
bootstrap=bootstrap))
orig_width = width * self._native_width
data = pd.DataFrame({self.orient: positions, "width": orig_width})
if dodge:
self._dodge(sub_vars, data)
if gap:
data["width"] *= 1 - gap
capwidth = plot_kws.get("capwidths", 0.5 * data["width"])
self._invert_scale(ax, data)
_, inv = _get_transform_functions(ax, value_var)
for stat in ["mean", "med", "q1", "q3", "cilo", "cihi", "whislo", "whishi"]:
stats[stat] = inv(stats[stat])
stats["fliers"] = stats["fliers"].map(inv)
linear_orient_scale = getattr(ax, f"get_{self.orient}scale")() == "linear"
maincolor = self._hue_map(sub_vars["hue"]) if "hue" in sub_vars else color
if fill:
boxprops = {
"facecolor": maincolor, "edgecolor": linecolor, **props["box"]
}
medianprops = {"color": linecolor, **props["median"]}
whiskerprops = {"color": linecolor, **props["whisker"]}
flierprops = {"markeredgecolor": linecolor, **props["flier"]}
capprops = {"color": linecolor, **props["cap"]}
else:
boxprops = {"color": maincolor, **props["box"]}
medianprops = {"color": maincolor, **props["median"]}
whiskerprops = {"color": maincolor, **props["whisker"]}
flierprops = {"markeredgecolor": maincolor, **props["flier"]}
capprops = {"color": maincolor, **props["cap"]}
if linewidth is not None:
for prop_dict in [boxprops, medianprops, whiskerprops, capprops]:
prop_dict.setdefault("linewidth", linewidth)
default_kws = dict(
bxpstats=stats.to_dict("records"),
positions=data[self.orient],
# Set width to 0 to avoid going out of domain
widths=data["width"] if linear_orient_scale else 0,
patch_artist=fill,
manage_ticks=False,
boxprops=boxprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
flierprops=flierprops,
capprops=capprops,
# Added in matplotlib 3.10; see below
# orientation=orientation
**(
{"vert": orientation == "vertical"}
if _version_predates(mpl, "3.10.0")
else {"orientation": orientation}
),
# added in matplotlib 3.6.0; see below
# capwidths=capwidth,
**(
{} if _version_predates(mpl, "3.6.0")
else {"capwidths": capwidth}
)
)
boxplot_kws = {**default_kws, **plot_kws}
artists = ax.bxp(**boxplot_kws)
# Reset artist widths after adding so everything stays positive
ori_idx = ["x", "y"].index(self.orient)
if not linear_orient_scale:
for i, box in enumerate(data.to_dict("records")):
p0 = box["edge"]
p1 = box["edge"] + box["width"]
if artists["boxes"]:
box_artist = artists["boxes"][i]
if fill:
box_verts = box_artist.get_path().vertices.T
else:
box_verts = box_artist.get_data()
box_verts[ori_idx][0] = p0
box_verts[ori_idx][3:] = p0
box_verts[ori_idx][1:3] = p1
if not fill:
# When fill is True, the data get changed in place
box_artist.set_data(box_verts)
ax.update_datalim(
np.transpose(box_verts),
updatex=self.orient == "x",
updatey=self.orient == "y",
)
if artists["medians"]:
verts = artists["medians"][i].get_xydata().T
verts[ori_idx][:] = p0, p1
artists["medians"][i].set_data(verts)
if artists["caps"]:
f_fwd, f_inv = _get_transform_functions(ax, self.orient)
for line in artists["caps"][2 * i:2 * i + 2]:
p0 = f_inv(f_fwd(box[self.orient]) - capwidth[i] / 2)
p1 = f_inv(f_fwd(box[self.orient]) + capwidth[i] / 2)
verts = line.get_xydata().T
verts[ori_idx][:] = p0, p1
line.set_data(verts)
ax.add_container(BoxPlotContainer(artists))
legend_artist = _get_patch_legend_artist(fill)
self._configure_legend(ax, legend_artist, boxprops)
def plot_boxens(
self,
width,
dodge,
gap,
fill,
color,
linecolor,
linewidth,
width_method,
k_depth,
outlier_prop,
trust_alpha,
showfliers,
box_kws,
flier_kws,
line_kws,
plot_kws,
):
iter_vars = [self.orient, "hue"]
value_var = {"x": "y", "y": "x"}[self.orient]
estimator = LetterValues(k_depth, outlier_prop, trust_alpha)
width_method_options = ["exponential", "linear", "area"]
_check_argument("width_method", width_method_options, width_method)
box_kws = plot_kws if box_kws is None else {**plot_kws, **box_kws}
flier_kws = {} if flier_kws is None else flier_kws.copy()
line_kws = {} if line_kws is None else line_kws.copy()
if linewidth is None:
if fill:
linewidth = 0.5 * mpl.rcParams["lines.linewidth"]
else:
linewidth = mpl.rcParams["lines.linewidth"]
ax = self.ax
for sub_vars, sub_data in self.iter_data(iter_vars,
from_comp_data=True,
allow_empty=False):
ax = self._get_axes(sub_vars)
_, inv_ori = _get_transform_functions(ax, self.orient)
_, inv_val = _get_transform_functions(ax, value_var)
# Statistics
lv_data = estimator(sub_data[value_var])
n = lv_data["k"] * 2 - 1
vals = lv_data["values"]
pos_data = pd.DataFrame({
self.orient: [sub_vars[self.orient]],
"width": [width * self._native_width],
})
if dodge:
self._dodge(sub_vars, pos_data)
if gap:
pos_data["width"] *= 1 - gap
# Letter-value boxes
levels = lv_data["levels"]
exponent = (levels - 1 - lv_data["k"]).astype(float)
if width_method == "linear":
rel_widths = levels + 1
elif width_method == "exponential":
rel_widths = 2 ** exponent
elif width_method == "area":
tails = levels < (lv_data["k"] - 1)
rel_widths = 2 ** (exponent - tails) / np.diff(lv_data["values"])
center = pos_data[self.orient].item()
widths = rel_widths / rel_widths.max() * pos_data["width"].item()
box_vals = inv_val(vals)
box_pos = inv_ori(center - widths / 2)
box_heights = inv_val(vals[1:]) - inv_val(vals[:-1])
box_widths = inv_ori(center + widths / 2) - inv_ori(center - widths / 2)
maincolor = self._hue_map(sub_vars["hue"]) if "hue" in sub_vars else color
flier_colors = {
"facecolor": "none", "edgecolor": ".45" if fill else maincolor
}
if fill:
cmap = light_palette(maincolor, as_cmap=True)
boxcolors = cmap(2 ** ((exponent + 2) / 3))
else:
boxcolors = maincolor
boxen = []
for i in range(n):
if self.orient == "x":
xy = (box_pos[i], box_vals[i])
w, h = (box_widths[i], box_heights[i])
else:
xy = (box_vals[i], box_pos[i])
w, h = (box_heights[i], box_widths[i])
boxen.append(Rectangle(xy, w, h))
if fill:
box_colors = {"facecolors": boxcolors, "edgecolors": linecolor}
else:
box_colors = {"facecolors": "none", "edgecolors": boxcolors}
collection_kws = {**box_colors, "linewidth": linewidth, **box_kws}
ax.add_collection(PatchCollection(boxen, **collection_kws), autolim=False)
ax.update_datalim(
np.column_stack([box_vals, box_vals]),
updatex=self.orient == "y",
updatey=self.orient == "x",
)
# Median line
med = lv_data["median"]
hw = pos_data["width"].item() / 2
if self.orient == "x":
x, y = inv_ori([center - hw, center + hw]), inv_val([med, med])
else:
x, y = inv_val([med, med]), inv_ori([center - hw, center + hw])
default_kws = {
"color": linecolor if fill else maincolor,
"solid_capstyle": "butt",
"linewidth": 1.25 * linewidth,
}
ax.plot(x, y, **{**default_kws, **line_kws})
# Outliers ("fliers")
if showfliers:
vals = inv_val(lv_data["fliers"])
pos = np.full(len(vals), inv_ori(pos_data[self.orient].item()))
x, y = (pos, vals) if self.orient == "x" else (vals, pos)
ax.scatter(x, y, **{**flier_colors, "s": 25, **flier_kws})
ax.autoscale_view(scalex=self.orient == "y", scaley=self.orient == "x")
legend_artist = _get_patch_legend_artist(fill)
common_kws = {**box_kws, "linewidth": linewidth, "edgecolor": linecolor}
self._configure_legend(ax, legend_artist, common_kws)
def plot_violins(
self,
width,
dodge,
gap,
split,
color,
fill,
linecolor,
linewidth,
inner,
density_norm,
common_norm,
kde_kws,
inner_kws,
plot_kws,
):
iter_vars = [self.orient, "hue"]
value_var = {"x": "y", "y": "x"}[self.orient]
inner_options = ["box", "quart", "stick", "point", None]
_check_argument("inner", inner_options, inner, prefix=True)
_check_argument("density_norm", ["area", "count", "width"], density_norm)
if linewidth is None:
if fill:
linewidth = 1.25 * mpl.rcParams["patch.linewidth"]
else:
linewidth = mpl.rcParams["lines.linewidth"]
if inner is not None and inner.startswith("box"):
box_width = inner_kws.pop("box_width", linewidth * 4.5)
whis_width = inner_kws.pop("whis_width", box_width / 3)
marker = inner_kws.pop("marker", "_" if self.orient == "x" else "|")
kde = KDE(**kde_kws)
ax = self.ax
violin_data = []
# Iterate through all the data splits once to compute the KDEs
for sub_vars, sub_data in self.iter_data(iter_vars,
from_comp_data=True,
allow_empty=False):
sub_data["weight"] = sub_data.get("weights", 1)
stat_data = kde._transform(sub_data, value_var, [])
maincolor = self._hue_map(sub_vars["hue"]) if "hue" in sub_vars else color
if not fill:
linecolor = maincolor
maincolor = "none"
default_kws = dict(
facecolor=maincolor,
edgecolor=linecolor,
linewidth=linewidth,
)
violin_data.append({
"position": sub_vars[self.orient],
"observations": sub_data[value_var],
"density": stat_data["density"],
"support": stat_data[value_var],
"kwargs": {**default_kws, **plot_kws},
"sub_vars": sub_vars,
"ax": self._get_axes(sub_vars),
})
# Once we've computed all the KDEs, get statistics for normalization
def vars_to_key(sub_vars):
return tuple((k, v) for k, v in sub_vars.items() if k != self.orient)
norm_keys = [vars_to_key(violin["sub_vars"]) for violin in violin_data]
if common_norm:
common_max_density = np.nanmax([v["density"].max() for v in violin_data])
common_max_count = np.nanmax([len(v["observations"]) for v in violin_data])
max_density = {key: common_max_density for key in norm_keys}
max_count = {key: common_max_count for key in norm_keys}
else:
with warnings.catch_warnings():
# Ignore warning when all violins are singular; it's not important
warnings.filterwarnings('ignore', "All-NaN (slice|axis) encountered")
max_density = {
key: np.nanmax([
v["density"].max() for v in violin_data
if vars_to_key(v["sub_vars"]) == key
]) for key in norm_keys
}
max_count = {
key: np.nanmax([
len(v["observations"]) for v in violin_data
if vars_to_key(v["sub_vars"]) == key
]) for key in norm_keys
}
real_width = width * self._native_width
# Now iterate through the violins again to apply the normalization and plot
for violin in violin_data:
index = pd.RangeIndex(0, max(len(violin["support"]), 1))
data = pd.DataFrame({
self.orient: violin["position"],
value_var: violin["support"],
"density": violin["density"],
"width": real_width,
}, index=index)
if dodge:
self._dodge(violin["sub_vars"], data)
if gap:
data["width"] *= 1 - gap
# Normalize the density across the distribution(s) and relative to the width
norm_key = vars_to_key(violin["sub_vars"])
hw = data["width"] / 2
peak_density = violin["density"].max()
if np.isnan(peak_density):
span = 1
elif density_norm == "area":
span = data["density"] / max_density[norm_key]
elif density_norm == "count":
count = len(violin["observations"])
span = data["density"] / peak_density * (count / max_count[norm_key])
elif density_norm == "width":
span = data["density"] / peak_density
span = span * hw * (2 if split else 1)
# Handle split violins (i.e. asymmetric spans)
right_side = (
0 if "hue" not in self.variables
else self._hue_map.levels.index(violin["sub_vars"]["hue"]) % 2
)
if split:
offsets = (hw, span - hw) if right_side else (span - hw, hw)
else:
offsets = span, span
ax = violin["ax"]
_, invx = _get_transform_functions(ax, "x")
_, invy = _get_transform_functions(ax, "y")
inv_pos = {"x": invx, "y": invy}[self.orient]
inv_val = {"x": invx, "y": invy}[value_var]
linecolor = violin["kwargs"]["edgecolor"]
# Handle singular datasets (one or more observations with no variance
if np.isnan(peak_density):
pos = data[self.orient].iloc[0]
val = violin["observations"].mean()
if self.orient == "x":
x, y = [pos - offsets[0], pos + offsets[1]], [val, val]
else:
x, y = [val, val], [pos - offsets[0], pos + offsets[1]]
ax.plot(invx(x), invy(y), color=linecolor, linewidth=linewidth)
continue
# Plot the main violin body
plot_func = {"x": ax.fill_betweenx, "y": ax.fill_between}[self.orient]
plot_func(
inv_val(data[value_var]),
inv_pos(data[self.orient] - offsets[0]),
inv_pos(data[self.orient] + offsets[1]),
**violin["kwargs"]
)
# Adjust the observation data
obs = violin["observations"]
pos_dict = {self.orient: violin["position"], "width": real_width}
if dodge:
self._dodge(violin["sub_vars"], pos_dict)
if gap:
pos_dict["width"] *= (1 - gap)
# --- Plot the inner components
if inner is None:
continue
elif inner.startswith("point"):
pos = np.array([pos_dict[self.orient]] * len(obs))
if split:
pos += (-1 if right_side else 1) * pos_dict["width"] / 2
x, y = (pos, obs) if self.orient == "x" else (obs, pos)
kws = {
"color": linecolor,
"edgecolor": linecolor,
"s": (linewidth * 2) ** 2,
"zorder": violin["kwargs"].get("zorder", 2) + 1,
**inner_kws,
}
ax.scatter(invx(x), invy(y), **kws)
elif inner.startswith("stick"):
pos0 = np.interp(obs, data[value_var], data[self.orient] - offsets[0])
pos1 = np.interp(obs, data[value_var], data[self.orient] + offsets[1])
pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])
val_pts = np.stack([inv_val(obs), inv_val(obs)])
segments = np.stack([pos_pts, val_pts]).transpose(2, 1, 0)
if self.orient == "y":
segments = segments[:, :, ::-1]
kws = {
"color": linecolor,
"linewidth": linewidth / 2,
**inner_kws,
}
lines = mpl.collections.LineCollection(segments, **kws)
ax.add_collection(lines, autolim=False)
elif inner.startswith("quart"):
stats = np.percentile(obs, [25, 50, 75])
pos0 = np.interp(stats, data[value_var], data[self.orient] - offsets[0])
pos1 = np.interp(stats, data[value_var], data[self.orient] + offsets[1])
pos_pts = np.stack([inv_pos(pos0), inv_pos(pos1)])
val_pts = np.stack([inv_val(stats), inv_val(stats)])
segments = np.stack([pos_pts, val_pts]).transpose(2, 0, 1)
if self.orient == "y":
segments = segments[:, ::-1, :]
dashes = [(1.25, .75), (2.5, 1), (1.25, .75)]
for i, segment in enumerate(segments):
kws = {
"color": linecolor,
"linewidth": linewidth,
"dashes": dashes[i],
**inner_kws,
}
ax.plot(*segment, **kws)
elif inner.startswith("box"):
stats = mpl.cbook.boxplot_stats(obs)[0]
pos = np.array(pos_dict[self.orient])
if split:
pos += (-1 if right_side else 1) * pos_dict["width"] / 2
pos = [pos, pos], [pos, pos], [pos]
val = (
[stats["whislo"], stats["whishi"]],
[stats["q1"], stats["q3"]],
[stats["med"]]
)
if self.orient == "x":
(x0, x1, x2), (y0, y1, y2) = pos, val
else:
(x0, x1, x2), (y0, y1, y2) = val, pos
if split:
offset = (1 if right_side else -1) * box_width / 72 / 2
dx, dy = (offset, 0) if self.orient == "x" else (0, -offset)
trans = ax.transData + mpl.transforms.ScaledTranslation(
dx, dy, ax.figure.dpi_scale_trans,
)
else:
trans = ax.transData
line_kws = {
"color": linecolor,
"transform": trans,
**inner_kws,
"linewidth": whis_width,
}
ax.plot(invx(x0), invy(y0), **line_kws)
line_kws["linewidth"] = box_width
ax.plot(invx(x1), invy(y1), **line_kws)
dot_kws = {
"marker": marker,
"markersize": box_width / 1.2,
"markeredgewidth": box_width / 5,
"transform": trans,
**inner_kws,
"markeredgecolor": "w",
"markerfacecolor": "w",
"color": linecolor, # simplify tests
}
ax.plot(invx(x2), invy(y2), **dot_kws)
legend_artist = _get_patch_legend_artist(fill)
common_kws = {**plot_kws, "linewidth": linewidth, "edgecolor": linecolor}
self._configure_legend(ax, legend_artist, common_kws)
def plot_points(
self,
aggregator,
markers,
linestyles,
dodge,
color,
capsize,
err_kws,
plot_kws,
):
agg_var = {"x": "y", "y": "x"}[self.orient]
iter_vars = ["hue"]
plot_kws = normalize_kwargs(plot_kws, mpl.lines.Line2D)
plot_kws.setdefault("linewidth", mpl.rcParams["lines.linewidth"] * 1.8)
plot_kws.setdefault("markeredgewidth", plot_kws["linewidth"] * 0.75)
plot_kws.setdefault("markersize", plot_kws["linewidth"] * np.sqrt(2 * np.pi))
markers = self._map_prop_with_hue("marker", markers, "o", plot_kws)
linestyles = self._map_prop_with_hue("linestyle", linestyles, "-", plot_kws)
base_positions = self.var_levels[self.orient]
if self.var_types[self.orient] == "categorical":
min_cat_val = int(self.comp_data[self.orient].min())
max_cat_val = int(self.comp_data[self.orient].max())
base_positions = [i for i in range(min_cat_val, max_cat_val + 1)]
n_hue_levels = 0 if self._hue_map.levels is None else len(self._hue_map.levels)
if dodge is True:
dodge = .025 * n_hue_levels
ax = self.ax
for sub_vars, sub_data in self.iter_data(iter_vars,
from_comp_data=True,
allow_empty=True):
ax = self._get_axes(sub_vars)
ori_axis = getattr(ax, f"{self.orient}axis")
transform, _ = _get_transform_functions(ax, self.orient)
positions = transform(ori_axis.convert_units(base_positions))
agg_data = sub_data if sub_data.empty else (
sub_data
.groupby(self.orient)
.apply(aggregator, agg_var, **groupby_apply_include_groups(False))
.reindex(pd.Index(positions, name=self.orient))
.reset_index()
)
if dodge:
hue_idx = self._hue_map.levels.index(sub_vars["hue"])
step_size = dodge / (n_hue_levels - 1)
offset = -dodge / 2 + step_size * hue_idx
agg_data[self.orient] += offset * self._native_width
self._invert_scale(ax, agg_data)
sub_kws = plot_kws.copy()
sub_kws.update(
marker=markers[sub_vars.get("hue")],
linestyle=linestyles[sub_vars.get("hue")],
color=self._hue_map(sub_vars["hue"]) if "hue" in sub_vars else color,
)
line, = ax.plot(agg_data["x"], agg_data["y"], **sub_kws)
sub_err_kws = err_kws.copy()
line_props = line.properties()
for prop in ["color", "linewidth", "alpha", "zorder"]:
sub_err_kws.setdefault(prop, line_props[prop])
if aggregator.error_method is not None:
self.plot_errorbars(ax, agg_data, capsize, sub_err_kws)
legend_artist = partial(mpl.lines.Line2D, [], [])
semantic_kws = {"hue": {"marker": markers, "linestyle": linestyles}}
self._configure_legend(ax, legend_artist, sub_kws, semantic_kws)
def plot_bars(
self,
aggregator,
dodge,
gap,
width,
fill,
color,
capsize,
err_kws,
plot_kws,
):
agg_var = {"x": "y", "y": "x"}[self.orient]
iter_vars = ["hue"]
ax = self.ax
if self._hue_map.levels is None:
dodge = False
if dodge and capsize is not None:
capsize = capsize / len(self._hue_map.levels)
if not fill:
plot_kws.setdefault("linewidth", 1.5 * mpl.rcParams["lines.linewidth"])
err_kws.setdefault("linewidth", 1.5 * mpl.rcParams["lines.linewidth"])
for sub_vars, sub_data in self.iter_data(iter_vars,
from_comp_data=True,
allow_empty=True):
ax = self._get_axes(sub_vars)
agg_data = sub_data if sub_data.empty else (
sub_data
.groupby(self.orient)
.apply(aggregator, agg_var, **groupby_apply_include_groups(False))
.reset_index()
)
agg_data["width"] = width * self._native_width
if dodge:
self._dodge(sub_vars, agg_data)
if gap:
agg_data["width"] *= 1 - gap
agg_data["edge"] = agg_data[self.orient] - agg_data["width"] / 2
self._invert_scale(ax, agg_data)
if self.orient == "x":
bar_func = ax.bar
kws = dict(
x=agg_data["edge"], height=agg_data["y"], width=agg_data["width"]
)
else:
bar_func = ax.barh
kws = dict(
y=agg_data["edge"], width=agg_data["x"], height=agg_data["width"]
)
main_color = self._hue_map(sub_vars["hue"]) if "hue" in sub_vars else color
# Set both color and facecolor for property cycle logic
kws["align"] = "edge"
if fill:
kws.update(color=main_color, facecolor=main_color)
else:
kws.update(color=main_color, edgecolor=main_color, facecolor="none")
bar_func(**{**kws, **plot_kws})
if aggregator.error_method is not None:
self.plot_errorbars(
ax, agg_data, capsize,
{"color": ".26" if fill else main_color, **err_kws}
)
legend_artist = _get_patch_legend_artist(fill)
self._configure_legend(ax, legend_artist, plot_kws)
def plot_errorbars(self, ax, data, capsize, err_kws):
var = {"x": "y", "y": "x"}[self.orient]
for row in data.to_dict("records"):
row = dict(row)
pos = np.array([row[self.orient], row[self.orient]])
val = np.array([row[f"{var}min"], row[f"{var}max"]])
if capsize:
cw = capsize * self._native_width / 2
scl, inv = _get_transform_functions(ax, self.orient)
cap = inv(scl(pos[0]) - cw), inv(scl(pos[1]) + cw)
pos = np.concatenate([
[*cap, np.nan], pos, [np.nan, *cap]
])
val = np.concatenate([
[val[0], val[0], np.nan], val, [np.nan, val[-1], val[-1]],
])
if self.orient == "x":
args = pos, val
else:
args = val, pos
ax.plot(*args, **err_kws)
| _CategoricalPlotter |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 59313,
"end": 59479
} | class ____(themeable):
"""
Legend key background width
Parameters
----------
theme_element : float
Value in points
"""
| legend_key_width |
python | plotly__plotly.py | plotly/graph_objs/choropleth/colorbar/title/_font.py | {
"start": 233,
"end": 9929
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "choropleth.colorbar.title"
_path_str = "choropleth.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.choropleth.col
orbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choropleth.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choropleth.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_serialization.py | {
"start": 4032,
"end": 14669
} | class ____:
@pytest.mark.parametrize(
"trigger",
[
AthenaTrigger("query_id", 1, 5, "aws connection"),
BatchJobTrigger(
job_id=BATCH_JOB_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
aws_conn_id=AWS_CONN_ID,
region_name=AWS_REGION,
),
BatchCreateComputeEnvironmentTrigger(
compute_env_arn="my_arn",
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
aws_conn_id=AWS_CONN_ID,
region_name=AWS_REGION,
),
ClusterActiveTrigger(
cluster_arn="my_arn",
aws_conn_id="my_conn",
waiter_delay=1,
waiter_max_attempts=2,
region_name="my_region",
),
ClusterInactiveTrigger(
cluster_arn="my_arn",
aws_conn_id="my_conn",
waiter_delay=1,
waiter_max_attempts=2,
region_name="my_region",
),
EksCreateFargateProfileTrigger(
cluster_name=TEST_CLUSTER_IDENTIFIER,
fargate_profile_name=TEST_FARGATE_PROFILE_NAME,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
),
EksDeleteFargateProfileTrigger(
cluster_name=TEST_CLUSTER_IDENTIFIER,
fargate_profile_name=TEST_FARGATE_PROFILE_NAME,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
),
EksCreateNodegroupTrigger(
cluster_name=TEST_CLUSTER_IDENTIFIER,
nodegroup_name=TEST_NODEGROUP_NAME,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
region_name=AWS_REGION,
),
EksDeleteNodegroupTrigger(
cluster_name=TEST_CLUSTER_IDENTIFIER,
nodegroup_name=TEST_NODEGROUP_NAME,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
region_name=AWS_REGION,
),
EksCreateClusterTrigger(
cluster_name=TEST_CLUSTER_IDENTIFIER,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=WAITER_DELAY,
aws_conn_id=AWS_CONN_ID,
region_name=AWS_REGION,
),
EksDeleteClusterTrigger(
cluster_name=TEST_CLUSTER_IDENTIFIER,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=WAITER_DELAY,
aws_conn_id=AWS_CONN_ID,
region_name=AWS_REGION,
force_delete_compute=True,
),
EmrAddStepsTrigger(
job_flow_id=TEST_JOB_FLOW_ID,
step_ids=["my_step1", "my_step2"],
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
),
EmrCreateJobFlowTrigger(
job_flow_id=TEST_JOB_FLOW_ID,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
),
EmrTerminateJobFlowTrigger(
job_flow_id=TEST_JOB_FLOW_ID,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
),
EmrContainerTrigger(
virtual_cluster_id=VIRTUAL_CLUSTER_ID,
job_id=JOB_ID,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
),
EmrStepSensorTrigger(
job_flow_id=TEST_JOB_FLOW_ID,
step_id=STEP_ID,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
),
EmrServerlessCreateApplicationTrigger(
application_id=TEST_APPLICATION_ID,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
),
EmrServerlessStartApplicationTrigger(
application_id=TEST_APPLICATION_ID,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
),
EmrServerlessStopApplicationTrigger(
application_id=TEST_APPLICATION_ID,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
),
EmrServerlessDeleteApplicationTrigger(
application_id=TEST_APPLICATION_ID,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
),
EmrServerlessCancelJobsTrigger(
application_id=TEST_APPLICATION_ID,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
),
EmrServerlessStartJobTrigger(
application_id=TEST_APPLICATION_ID,
job_id=TEST_JOB_ID,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
),
GlueCrawlerCompleteTrigger(crawler_name="my_crawler", waiter_delay=2, aws_conn_id="my_conn_id"),
GlueCatalogPartitionTrigger(
database_name="my_database",
table_name="my_table",
expression="my_expression",
aws_conn_id="my_conn_id",
),
LambdaCreateFunctionCompleteTrigger(
function_name=TEST_FUNCTION_NAME,
function_arn=TEST_ARN,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
),
RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
aws_conn_id=AWS_CONN_ID,
),
RedshiftPauseClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
aws_conn_id=AWS_CONN_ID,
),
RedshiftCreateClusterSnapshotTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
aws_conn_id=AWS_CONN_ID,
),
RedshiftResumeClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
aws_conn_id=AWS_CONN_ID,
),
RedshiftDeleteClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
aws_conn_id=AWS_CONN_ID,
),
RdsDbAvailableTrigger(
db_identifier=TEST_DB_INSTANCE_IDENTIFIER,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
aws_conn_id=AWS_CONN_ID,
region_name=AWS_REGION,
response=TEST_RESPONSE,
db_type=RdsDbType.INSTANCE,
),
RdsDbDeletedTrigger(
db_identifier=TEST_DB_INSTANCE_IDENTIFIER,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
aws_conn_id=AWS_CONN_ID,
region_name=AWS_REGION,
response=TEST_RESPONSE,
db_type=RdsDbType.INSTANCE,
),
RdsDbStoppedTrigger(
db_identifier=TEST_DB_INSTANCE_IDENTIFIER,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
aws_conn_id=AWS_CONN_ID,
region_name=AWS_REGION,
response=TEST_RESPONSE,
db_type=RdsDbType.INSTANCE,
),
SqsSensorTrigger(
sqs_queue=TEST_SQS_QUEUE,
aws_conn_id=AWS_CONN_ID,
max_messages=TEST_MAX_MESSAGES,
num_batches=TEST_NUM_BATCHES,
wait_time_seconds=TEST_WAIT_TIME_SECONDS,
visibility_timeout=TEST_VISIBILITY_TIMEOUT,
message_filtering="literal",
message_filtering_match_values=TEST_MESSAGE_FILTERING_MATCH_VALUES,
message_filtering_config=TEST_MESSAGE_FILTERING_CONFIG,
delete_message_on_reception=TEST_DELETE_MESSAGE_ON_RECEPTION,
waiter_delay=WAITER_DELAY,
),
StepFunctionsExecutionCompleteTrigger(
execution_arn=TEST_ARN,
aws_conn_id=AWS_CONN_ID,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=MAX_ATTEMPTS,
region_name=AWS_REGION,
),
],
ids=gen_test_name,
)
def test_serialize_recreate(self, trigger):
# generate the DB object from the trigger
trigger_db: Trigger = Trigger.from_object(trigger)
# serialize/deserialize using the same method that is used when inserting in DB
json_params = BaseSerialization.serialize(trigger_db.kwargs)
retrieved_params = BaseSerialization.deserialize(json_params)
# recreate a new trigger object from the data we would have in DB
clazz = TriggerRunner().get_trigger_by_classpath(trigger_db.classpath)
# noinspection PyArgumentList
instance = clazz(**retrieved_params)
# recreate a DB column object from the new trigger so that we can easily compare attributes
trigger_db_2: Trigger = Trigger.from_object(instance)
assert trigger_db.classpath == trigger_db_2.classpath
assert trigger_db.kwargs == trigger_db_2.kwargs
| TestTriggersSerialization |
python | hynek__structlog | tests/test_stdlib.py | {
"start": 5651,
"end": 12511
} | class ____:
@pytest.mark.parametrize(
("method_name"),
["debug", "info", "warning", "error", "exception", "critical"],
)
def test_proxies_to_correct_method(self, method_name):
"""
The basic proxied methods are proxied to the correct counterparts.
"""
bl = BoundLogger(ReturnLogger(), [return_method_name], {})
assert method_name == getattr(bl, method_name)("event")
def test_proxies_to_correct_method_special_cases(self):
"""
Fatal maps to critical and warn to warning.
"""
bl = BoundLogger(ReturnLogger(), [return_method_name], {})
assert "warning" == bl.warn("event")
assert "critical" == bl.fatal("event")
def test_proxies_log(self):
"""
BoundLogger.exception.log() is proxied to the appropriate method.
"""
bl = BoundLogger(ReturnLogger(), [return_method_name], {})
assert "critical" == bl.log(50, "event")
assert "debug" == bl.log(10, "event")
def test_positional_args_proxied(self):
"""
Positional arguments supplied are proxied as kwarg.
"""
bl = BoundLogger(ReturnLogger(), [], {})
_args, kwargs = bl.debug("event", "foo", bar="baz")
assert "baz" == kwargs.get("bar")
assert ("foo",) == kwargs.get("positional_args")
@pytest.mark.parametrize(
"attribute_name",
["name", "level", "parent", "propagate", "handlers", "disabled"],
)
def test_stdlib_passthrough_attributes(self, attribute_name):
"""
stdlib logger attributes are also available in stdlib BoundLogger.
"""
stdlib_logger = logging.getLogger("Test")
stdlib_logger_attribute = getattr(stdlib_logger, attribute_name)
bl = BoundLogger(stdlib_logger, [], {})
bound_logger_attribute = getattr(bl, attribute_name)
assert bound_logger_attribute == stdlib_logger_attribute
@pytest.mark.parametrize(
("method_name", "method_args"),
[
("addHandler", [None]),
("removeHandler", [None]),
("hasHandlers", None),
("callHandlers", [None]),
("handle", [None]),
("setLevel", [None]),
("getEffectiveLevel", None),
("isEnabledFor", [None]),
("findCaller", None),
(
"makeRecord",
[
"name",
"debug",
"test_func",
"1",
"test msg",
["foo"],
False,
],
),
("getChild", [None]),
],
)
def test_stdlib_passthrough_methods(self, method_name, method_args):
"""
stdlib logger methods are also available in stdlib BoundLogger.
"""
called_stdlib_method = [False]
def validate(*args, **kw):
called_stdlib_method[0] = True
stdlib_logger = logging.getLogger("Test")
stdlib_logger_method = getattr(stdlib_logger, method_name, None)
if stdlib_logger_method:
setattr(stdlib_logger, method_name, validate)
bl = BoundLogger(stdlib_logger, [], {})
bound_logger_method = getattr(bl, method_name)
assert bound_logger_method is not None
if method_args:
bound_logger_method(*method_args)
else:
bound_logger_method()
assert called_stdlib_method[0] is True
def test_exception_exc_info(self):
"""
BoundLogger.exception sets exc_info=True.
"""
bl = BoundLogger(ReturnLogger(), [], {})
assert ((), {"exc_info": True, "event": "event"}) == bl.exception(
"event"
)
def test_exception_exc_info_override(self):
"""
If *exc_info* is password to exception, it's used.
"""
bl = BoundLogger(ReturnLogger(), [], {})
assert ((), {"exc_info": 42, "event": "event"}) == bl.exception(
"event", exc_info=42
)
def test_proxies_bind(self):
"""
Bind calls the correct bind.
"""
bl = build_bl().bind(a=42)
assert {"a": 42} == get_context(bl)
def test_proxies_new(self):
"""
Newcalls the correct new.
"""
bl = build_bl().bind(a=42).new(b=23)
assert {"b": 23} == get_context(bl)
def test_proxies_unbind(self):
"""
Unbind calls the correct unbind.
"""
bl = build_bl().bind(a=42).unbind("a")
assert {} == get_context(bl)
def test_proxies_try_unbind(self):
"""
try_unbind calls the correct try_unbind.
"""
bl = build_bl().bind(a=42).try_unbind("a", "b")
assert {} == get_context(bl)
@pytest.mark.parametrize(
"meth", ["debug", "info", "warning", "error", "critical"]
)
async def test_async_log_methods(self, meth, cl):
"""
Async methods log async.
"""
bl = build_bl(cl, processors=[])
await getattr(bl, f"a{meth}")("Async!")
assert [
CapturedCall(method_name=meth, args=(), kwargs={"event": "Async!"})
] == cl.calls
async def test_async_log_methods_special_cases(self, cl):
"""
afatal maps to critical.
"""
bl = build_bl(cl, processors=[])
await bl.afatal("Async!")
assert [
CapturedCall(
method_name="critical", args=(), kwargs={"event": "Async!"}
)
] == cl.calls
async def test_alog(self, cl):
"""
Alog logs async at the correct level.
"""
bl = build_bl(cl, processors=[])
await bl.alog(logging.INFO, "foo %s", "bar")
assert [
CapturedCall(
method_name="info",
args=(),
kwargs={"positional_args": ("bar",), "event": "foo %s"},
)
] == cl.calls
async def test_aexception_exc_info_true(self, cl):
"""
aexception passes current exc_info into dispatch.
"""
bl = build_bl(cl, processors=[])
try:
raise ValueError(42)
except ValueError as e:
await bl.aexception("oops")
exc = e
(cc,) = cl.calls
assert isinstance(cc[2]["exc_info"], tuple)
assert exc == cc[2]["exc_info"][1]
async def test_aexception_exc_info_explicit(self, cl):
"""
In aexception, if exc_info isn't missing or True, leave it be.
"""
bl = build_bl(cl, processors=[])
obj = object()
await bl.aexception("ooops", exc_info=obj)
assert obj is cl.calls[0].kwargs["exc_info"]
| TestBoundLogger |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 13718,
"end": 14545
} | class ____(AssetEventDagRunReference):
@classmethod
def from_asset_event_dag_run_reference(
cls,
asset_event_dag_run_reference: AssetEventDagRunReference,
) -> AssetEventDagRunReferenceResult:
return cls(**asset_event_dag_run_reference.model_dump(exclude_defaults=True))
@cached_property
def source_task_instance(self) -> AssetEventSourceTaskInstance | None:
if not (self.source_task_id and self.source_dag_id and self.source_run_id):
return None
if self.source_map_index is None:
return None
return AssetEventSourceTaskInstance(
dag_id=self.source_dag_id,
task_id=self.source_task_id,
run_id=self.source_run_id,
map_index=self.source_map_index,
)
| AssetEventDagRunReferenceResult |
python | django-mptt__django-mptt | tests/myapp/admin.py | {
"start": 162,
"end": 258
} | class ____(MPTTModelAdmin):
pass
admin.site.register(Person, DraggableMPTTAdmin)
| CategoryAdmin |
python | great-expectations__great_expectations | great_expectations/core/config_peer.py | {
"start": 565,
"end": 3153
} | class ____(ABC):
"""
A ConfigPeer is an object, whose subclasses can be instantiated using instantiate_class_from_config() (located in
great_expectations/util.py). Its immediate descendant subclass must use a subclass of BaseYamlConfig as an argument
to its constructor, and the subsequent descendants must use only primitive types as their constructor arguments,
wherever keys correspond to the keys of the "BaseYamlConfig" configuration object counterpart. The name ConfigPeer
means: Every immediate descendant subclass must have Marshmallow Schema validated configuration class as its peer.
# TODO: <Alex>2/11/2022</Alex>
When -- as part of a potential future architecture update -- serialization is decoupled from configuration, the
configuration objects, persistable as YAML files, will no longer inherit from the BaseYamlConfig class. Rather,
any form of serialization (YAML, JSON, SQL Database Tables, Pickle, etc.) will apply as peers, independent of the
configuration classes themselves. Hence, as part of this change, ConfigPeer will cease being the superclass of
business objects (such as BaseDataContext, BaseCheckpoint, and BaseRuleBasedProfiler). Instead, every persistable
business object will contain a reference to its corresponding peer class, supporting the ConfigPeer interfaces.
""" # noqa: E501 # FIXME CoP
@property
@abstractmethod
def config(self) -> BaseYamlConfig:
pass
def get_config(
self,
mode: ConfigOutputModes = ConfigOutputModes.TYPED,
**kwargs,
) -> BaseYamlConfig | dict | str:
if isinstance(mode, str):
mode = ConfigOutputModes(mode.lower())
config: BaseYamlConfig = self.config
if mode == ConfigOutputModes.TYPED:
return config
if mode == ConfigOutputModes.COMMENTED_MAP:
return config.commented_map
if mode == ConfigOutputModes.YAML:
return config.to_yaml_str()
if mode == ConfigOutputModes.DICT:
config_kwargs: dict = config.to_dict()
elif mode == ConfigOutputModes.JSON_DICT:
config_kwargs = config.to_json_dict()
else:
raise ValueError(f'Unknown mode {mode} in "BaseCheckpoint.get_config()".') # noqa: TRY003 # FIXME CoP
kwargs["inplace"] = True
filter_properties_dict(
properties=config_kwargs,
**kwargs,
)
return config_kwargs
@override
def __repr__(self) -> str:
return str(self.get_config())
| ConfigPeer |
python | MongoEngine__mongoengine | tests/test_pymongo_support.py | {
"start": 131,
"end": 563
} | class ____(MongoDBTestCase):
def test_count_documents(self):
class Test(Document):
pass
Test.drop_collection()
Test().save()
Test().save()
assert count_documents(Test._get_collection(), filter={}) == 2
assert count_documents(Test._get_collection(), filter={}, skip=1) == 1
assert count_documents(Test._get_collection(), filter={}, limit=0) == 0
| TestPymongoSupport |
python | google__pytype | pytype/overlays/dataclass_overlay.py | {
"start": 827,
"end": 6443
} | class ____(classgen.Decorator):
"""Implements the @dataclass decorator."""
@classmethod
def make(cls, ctx, module="dataclasses"):
return super().make("dataclass", ctx, module)
@classmethod
def transform(cls, ctx, func):
"""Generate an instance for a func decorated with @dataclass_transform."""
# If an overlay subclasses dataclass_overlay.Dataclass we assume it
# implicitly handles @dataclass_transform itself
if isinstance(func, cls):
return func
ret = cls.make(ctx)
ret.name = func.name
ret.module = func.module
return ret
def _handle_initvar(self, node, cls, name, typ, orig):
"""Unpack or delete an initvar in the class annotations."""
initvar = match_initvar(typ)
if not initvar:
return None
# The InitVar annotation is not retained as a class member, but any default
# value is retained.
if orig is None:
# If an initvar does not have a default, it will not be a class member
# variable, so delete it from the annotated locals. Otherwise, leave the
# annotation as InitVar[...].
del self.ctx.vm.annotated_locals[cls.name][name]
else:
classgen.add_member(node, cls, name, initvar)
return initvar
def get_class_locals(self, node, cls):
del node
return classgen.get_class_locals(
cls.name,
allow_methods=True,
ordering=classgen.Ordering.FIRST_ANNOTATE,
ctx=self.ctx,
)
def decorate(self, node, cls):
"""Processes class members."""
# Collect classvars to convert them to attrs. @dataclass collects vars with
# an explicit type annotation, in order of annotation, so that e.g.
# class A:
# x: int
# y: str = 'hello'
# x = 10
# would have init(x:int = 10, y:str = 'hello')
own_attrs = []
cls_locals = self.get_class_locals(node, cls)
sticky_kwonly = False
for name, local in cls_locals.items():
typ, orig = local.get_type(node, name), local.orig
if (
isinstance(typ, abstract.PyTDClass)
and typ.full_name == "dataclasses.KW_ONLY"
):
if sticky_kwonly:
# TODO(mdemello): If both KW_ONLY tags are named `_` we only get one
# entry in cls_locals
self.ctx.errorlog.dataclass_error(
self.ctx.vm.stack(), "KW_ONLY can only be used once per class"
)
sticky_kwonly = True
continue
kind = ""
init = True
kw_only = sticky_kwonly
assert typ
if match_classvar(typ):
continue
initvar_typ = self._handle_initvar(node, cls, name, typ, orig)
if initvar_typ:
typ = initvar_typ
kind = classgen.AttributeKinds.INITVAR
else:
if not orig:
classgen.add_member(node, cls, name, typ)
if is_field(orig):
field = orig.data[0]
orig = field.default
init = field.init
if field.kw_only is not None:
kw_only = field.kw_only
if orig and orig.data == [self.ctx.convert.none]:
# vm._apply_annotation mostly takes care of checking that the default
# matches the declared type. However, it allows None defaults, and
# dataclasses do not.
self.ctx.check_annotation_type_mismatch(
node, name, typ, orig, local.stack, allow_none=False
)
attr = classgen.Attribute(
name=name,
typ=typ,
init=init,
kw_only=kw_only,
default=orig,
kind=kind,
)
own_attrs.append(attr)
cls.record_attr_ordering(own_attrs)
attrs = cls.compute_attr_metadata(own_attrs, "dataclasses.dataclass")
# Add an __init__ method if one doesn't exist already (dataclasses do not
# overwrite an explicit __init__ method).
if (
"__init__" not in cls.members
and self.args[cls]
and self.args[cls]["init"]
):
init_method = self.make_init(node, cls, attrs)
cls.members["__init__"] = init_method
# Add the __dataclass_fields__ attribute, the presence of which
# dataclasses.is_dataclass uses to determine if an object is a dataclass (or
# an instance of one).
attr_types = self.ctx.convert.merge_values({attr.typ for attr in attrs})
generic_field = abstract.ParameterizedClass(
self.ctx.convert.lookup_value("dataclasses", "Field"),
{abstract_utils.T: attr_types},
self.ctx,
)
dataclass_fields_params = {
abstract_utils.K: self.ctx.convert.str_type,
abstract_utils.V: generic_field,
}
dataclass_fields_typ = abstract.ParameterizedClass(
self.ctx.convert.dict_type, dataclass_fields_params, self.ctx
)
classgen.add_member(node, cls, "__dataclass_fields__", dataclass_fields_typ)
annotations_dict = classgen.get_or_create_annotations_dict(
cls.members, self.ctx
)
annotations_dict.annotated_locals["__dataclass_fields__"] = (
abstract_utils.Local(node, None, dataclass_fields_typ, None, self.ctx)
)
if isinstance(cls, abstract.InterpreterClass):
cls.decorators.append("dataclasses.dataclass")
# Fix up type parameters in methods added by the decorator.
cls.update_method_type_params()
cls.match_args = tuple(attr.name for attr in attrs)
match_args_params = {i: attr.typ for i, attr in enumerate(attrs)}
match_args_params[abstract_utils.T] = attr_types
match_args_typ = abstract.TupleClass(
self.ctx.convert.tuple_type, match_args_params, self.ctx
)
classgen.add_member(node, cls, "__match_args__", match_args_typ)
| Dataclass |
python | joke2k__faker | faker/providers/lorem/en_US/__init__.py | {
"start": 68,
"end": 66081
} | class ____(LoremProvider):
"""Implement lorem provider for ``en_US`` locale.
Word list is based on the source(s) below, and some words have been removed
to make the word list appropriate for public testing.
Sources:
- https://www.educall.com.tr/blog/post/500-most-common-english-verbs
- http://www.ef.edu/english-resources/english-vocabulary/top-1000-words/
- https://www.talkenglish.com/vocabulary/top-1500-nouns.aspx
- https://www.talkenglish.com/vocabulary/top-250-adverbs.aspx
- https://www.talkenglish.com/vocabulary/top-500-adjectives.aspx
"""
word_list = (
"a",
"ability",
"able",
"about",
"above",
"accept",
"according",
"account",
"across",
"act",
"action",
"activity",
"actually",
"add",
"address",
"administration",
"admit",
"adult",
"affect",
"after",
"again",
"against",
"age",
"agency",
"agent",
"ago",
"agree",
"agreement",
"ahead",
"air",
"all",
"allow",
"almost",
"alone",
"along",
"already",
"also",
"although",
"always",
"American",
"among",
"amount",
"analysis",
"and",
"animal",
"another",
"answer",
"any",
"anyone",
"anything",
"appear",
"apply",
"approach",
"area",
"argue",
"arm",
"around",
"arrive",
"art",
"article",
"artist",
"as",
"ask",
"assume",
"at",
"attack",
"attention",
"attorney",
"audience",
"author",
"authority",
"available",
"avoid",
"away",
"baby",
"back",
"bad",
"bag",
"ball",
"bank",
"bar",
"base",
"be",
"beat",
"beautiful",
"because",
"become",
"bed",
"before",
"begin",
"behavior",
"behind",
"believe",
"benefit",
"best",
"better",
"between",
"beyond",
"big",
"bill",
"billion",
"bit",
"black",
"blood",
"blue",
"board",
"body",
"book",
"born",
"both",
"box",
"boy",
"break",
"bring",
"brother",
"budget",
"build",
"building",
"business",
"but",
"buy",
"by",
"call",
"camera",
"campaign",
"can",
"candidate",
"capital",
"car",
"card",
"care",
"career",
"carry",
"case",
"catch",
"cause",
"cell",
"center",
"central",
"century",
"certain",
"certainly",
"chair",
"challenge",
"chance",
"change",
"character",
"charge",
"check",
"child",
"choice",
"choose",
"church",
"citizen",
"city",
"civil",
"claim",
"class",
"clear",
"clearly",
"close",
"coach",
"cold",
"collection",
"college",
"color",
"commercial",
"common",
"community",
"company",
"compare",
"computer",
"concern",
"condition",
"conference",
"Congress",
"consider",
"consumer",
"contain",
"continue",
"control",
"cost",
"could",
"country",
"couple",
"course",
"court",
"cover",
"create",
"crime",
"cultural",
"culture",
"cup",
"current",
"customer",
"cut",
"dark",
"data",
"daughter",
"day",
"deal",
"debate",
"decade",
"decide",
"decision",
"deep",
"defense",
"degree",
"Democrat",
"democratic",
"describe",
"design",
"despite",
"detail",
"determine",
"develop",
"development",
"difference",
"different",
"difficult",
"dinner",
"direction",
"director",
"discover",
"discuss",
"discussion",
"do",
"doctor",
"dog",
"door",
"down",
"draw",
"dream",
"drive",
"drop",
"drug",
"during",
"each",
"early",
"east",
"easy",
"eat",
"economic",
"economy",
"edge",
"education",
"effect",
"effort",
"eight",
"either",
"election",
"else",
"employee",
"end",
"energy",
"enjoy",
"enough",
"enter",
"entire",
"environment",
"environmental",
"especially",
"establish",
"even",
"evening",
"event",
"ever",
"every",
"everybody",
"everyone",
"everything",
"evidence",
"exactly",
"example",
"executive",
"exist",
"expect",
"experience",
"expert",
"explain",
"eye",
"face",
"fact",
"factor",
"fall",
"family",
"far",
"fast",
"father",
"fear",
"federal",
"feel",
"feeling",
"few",
"field",
"fight",
"figure",
"fill",
"film",
"final",
"finally",
"financial",
"find",
"fine",
"finish",
"fire",
"firm",
"first",
"fish",
"five",
"floor",
"fly",
"focus",
"follow",
"food",
"foot",
"for",
"force",
"foreign",
"forget",
"form",
"former",
"forward",
"four",
"free",
"friend",
"from",
"front",
"full",
"fund",
"future",
"game",
"garden",
"gas",
"general",
"generation",
"get",
"girl",
"give",
"glass",
"go",
"goal",
"good",
"government",
"great",
"green",
"ground",
"group",
"grow",
"growth",
"guess",
"gun",
"guy",
"hair",
"half",
"hand",
"happen",
"happy",
"hard",
"have",
"he",
"head",
"health",
"hear",
"heart",
"heavy",
"help",
"her",
"here",
"herself",
"high",
"him",
"himself",
"his",
"history",
"hit",
"hold",
"home",
"hope",
"hospital",
"hot",
"hotel",
"hour",
"house",
"how",
"however",
"huge",
"human",
"hundred",
"husband",
"I",
"idea",
"identify",
"if",
"image",
"imagine",
"impact",
"important",
"improve",
"in",
"include",
"including",
"increase",
"indeed",
"indicate",
"individual",
"industry",
"information",
"inside",
"instead",
"institution",
"interest",
"interesting",
"international",
"interview",
"into",
"investment",
"involve",
"issue",
"it",
"item",
"its",
"itself",
"job",
"join",
"just",
"keep",
"key",
"kid",
"kind",
"kitchen",
"know",
"knowledge",
"land",
"language",
"large",
"last",
"late",
"later",
"laugh",
"law",
"lawyer",
"lay",
"lead",
"leader",
"learn",
"least",
"leave",
"left",
"leg",
"less",
"let",
"letter",
"level",
"life",
"light",
"like",
"likely",
"line",
"list",
"listen",
"little",
"live",
"local",
"long",
"look",
"lose",
"loss",
"lot",
"low",
"machine",
"magazine",
"main",
"maintain",
"major",
"majority",
"make",
"man",
"manage",
"management",
"manager",
"many",
"market",
"marriage",
"material",
"matter",
"may",
"maybe",
"me",
"mean",
"measure",
"media",
"medical",
"meet",
"meeting",
"member",
"memory",
"mention",
"message",
"method",
"middle",
"might",
"military",
"million",
"mind",
"minute",
"miss",
"mission",
"model",
"modern",
"moment",
"money",
"month",
"more",
"morning",
"most",
"mother",
"mouth",
"move",
"movement",
"movie",
"Mr",
"Mrs",
"much",
"music",
"must",
"my",
"myself",
"name",
"nation",
"national",
"natural",
"nature",
"near",
"nearly",
"necessary",
"need",
"network",
"never",
"new",
"news",
"newspaper",
"next",
"nice",
"night",
"no",
"none",
"nor",
"north",
"not",
"note",
"nothing",
"notice",
"now",
"number",
"occur",
"of",
"off",
"offer",
"office",
"officer",
"official",
"often",
"oil",
"ok",
"old",
"on",
"once",
"one",
"only",
"onto",
"open",
"operation",
"opportunity",
"option",
"or",
"order",
"organization",
"other",
"others",
"our",
"out",
"outside",
"over",
"own",
"owner",
"page",
"painting",
"paper",
"parent",
"part",
"participant",
"particular",
"particularly",
"partner",
"party",
"pass",
"past",
"pattern",
"pay",
"peace",
"people",
"per",
"perform",
"performance",
"perhaps",
"person",
"personal",
"phone",
"physical",
"pick",
"picture",
"piece",
"place",
"plan",
"plant",
"play",
"player",
"PM",
"point",
"police",
"policy",
"political",
"politics",
"poor",
"popular",
"population",
"position",
"positive",
"possible",
"power",
"practice",
"prepare",
"present",
"president",
"pressure",
"pretty",
"prevent",
"price",
"probably",
"process",
"produce",
"product",
"production",
"professional",
"professor",
"program",
"project",
"property",
"protect",
"prove",
"provide",
"public",
"pull",
"purpose",
"push",
"put",
"quality",
"question",
"quickly",
"quite",
"race",
"radio",
"raise",
"range",
"rate",
"rather",
"reach",
"read",
"ready",
"real",
"reality",
"realize",
"really",
"reason",
"receive",
"recent",
"recently",
"recognize",
"record",
"red",
"reduce",
"reflect",
"region",
"relate",
"relationship",
"religious",
"remain",
"remember",
"report",
"represent",
"Republican",
"require",
"research",
"resource",
"respond",
"response",
"responsibility",
"rest",
"result",
"return",
"reveal",
"rich",
"right",
"rise",
"risk",
"road",
"rock",
"role",
"room",
"rule",
"run",
"safe",
"same",
"save",
"say",
"scene",
"school",
"science",
"scientist",
"score",
"sea",
"season",
"seat",
"second",
"section",
"security",
"see",
"seek",
"seem",
"sell",
"send",
"senior",
"sense",
"series",
"serious",
"serve",
"service",
"set",
"seven",
"several",
"shake",
"share",
"she",
"short",
"should",
"shoulder",
"show",
"side",
"sign",
"significant",
"similar",
"simple",
"simply",
"since",
"sing",
"single",
"sister",
"sit",
"site",
"situation",
"six",
"size",
"skill",
"skin",
"small",
"smile",
"so",
"social",
"society",
"soldier",
"some",
"somebody",
"someone",
"something",
"sometimes",
"son",
"song",
"soon",
"sort",
"sound",
"source",
"south",
"southern",
"space",
"speak",
"special",
"specific",
"speech",
"spend",
"sport",
"spring",
"staff",
"stage",
"stand",
"standard",
"star",
"start",
"state",
"statement",
"station",
"stay",
"step",
"still",
"stock",
"stop",
"store",
"story",
"strategy",
"street",
"strong",
"structure",
"student",
"study",
"stuff",
"style",
"subject",
"success",
"successful",
"such",
"suddenly",
"suffer",
"suggest",
"summer",
"support",
"sure",
"surface",
"system",
"table",
"take",
"talk",
"task",
"tax",
"teach",
"teacher",
"team",
"technology",
"television",
"tell",
"ten",
"tend",
"term",
"test",
"than",
"thank",
"that",
"the",
"their",
"them",
"themselves",
"then",
"theory",
"there",
"these",
"they",
"thing",
"think",
"third",
"this",
"those",
"though",
"thought",
"thousand",
"threat",
"three",
"through",
"throughout",
"throw",
"thus",
"time",
"to",
"today",
"together",
"tonight",
"too",
"top",
"total",
"tough",
"toward",
"town",
"trade",
"traditional",
"training",
"travel",
"treat",
"treatment",
"tree",
"trial",
"trip",
"trouble",
"true",
"truth",
"try",
"turn",
"TV",
"two",
"type",
"under",
"understand",
"unit",
"until",
"up",
"upon",
"us",
"use",
"usually",
"value",
"various",
"very",
"view",
"visit",
"voice",
"vote",
"wait",
"walk",
"wall",
"want",
"war",
"watch",
"water",
"way",
"we",
"wear",
"week",
"weight",
"well",
"west",
"western",
"what",
"whatever",
"when",
"where",
"whether",
"which",
"while",
"white",
"who",
"whole",
"whom",
"whose",
"why",
"wide",
"wife",
"will",
"win",
"wind",
"window",
"wish",
"with",
"within",
"without",
"woman",
"wonder",
"word",
"work",
"worker",
"world",
"worry",
"would",
"write",
"writer",
"wrong",
"yard",
"yeah",
"year",
"yes",
"yet",
"you",
"young",
"your",
"yourself",
)
parts_of_speech: Dict[str, tuple] = {
"verb": (
"be",
"have",
"do",
"say",
"get",
"make",
"go",
"see",
"know",
"take",
"think",
"come",
"give",
"look",
"use",
"find",
"want",
"tell",
"put",
"mean",
"become",
"leave",
"work",
"need",
"feel",
"seem",
"ask",
"show",
"try",
"call",
"keep",
"provide",
"hold",
"turn",
"follow",
"begin",
"bring",
"like",
"going",
"help",
"start",
"run",
"write",
"set",
"move",
"play",
"pay",
"hear",
"include",
"believe",
"allow",
"meet",
"lead",
"live",
"stand",
"happen",
"carry",
"talk",
"appear",
"produce",
"sit",
"offer",
"consider",
"expect",
"let",
"read",
"require",
"continue",
"lose",
"add",
"change",
"fall",
"remain",
"remember",
"buy",
"speak",
"stop",
"send",
"receive",
"decide",
"win",
"understand",
"describe",
"develop",
"agree",
"open",
"reach",
"build",
"involve",
"spend",
"return",
"draw",
"die",
"hope",
"create",
"walk",
"sell",
"wait",
"cause",
"pass",
"lie",
"accept",
"watch",
"raise",
"base",
"apply",
"break",
"explain",
"learn",
"increase",
"cover",
"grow",
"claim",
"report",
"support",
"cut",
"form",
"stay",
"contain",
"reduce",
"establish",
"join",
"wish",
"seek",
"choose",
"deal",
"face",
"fail",
"serve",
"end",
"kill",
"occur",
"drive",
"represent",
"rise",
"discuss",
"love",
"pick",
"place",
"argue",
"prove",
"wear",
"catch",
"enjoy",
"eat",
"introduce",
"enter",
"present",
"arrive",
"ensure",
"point",
"plan",
"pull",
"refer",
"act",
"relate",
"affect",
"close",
"identify",
"manage",
"thank",
"compare",
"announce",
"obtain",
"note",
"forget",
"indicate",
"wonder",
"maintain",
"publish",
"suffer",
"avoid",
"express",
"suppose",
"finish",
"determine",
"design",
"listen",
"save",
"tend",
"treat",
"control",
"share",
"remove",
"throw",
"visit",
"exist",
"force",
"reflect",
"admit",
"assume",
"smile",
"prepare",
"replace",
"fill",
"improve",
"mention",
"fight",
"intend",
"miss",
"discover",
"drop",
"hit",
"push",
"prevent",
"refuse",
"regard",
"lay",
"reveal",
"teach",
"answer",
"operate",
"state",
"depend",
"enable",
"record",
"check",
"complete",
"cost",
"sound",
"laugh",
"realise",
"extend",
"arise",
"notice",
"define",
"examine",
"fit",
"study",
"bear",
"hang",
"recognise",
"shake",
"sign",
"attend",
"fly",
"gain",
"result",
"travel",
"adopt",
"confirm",
"protect",
"demand",
"stare",
"imagine",
"attempt",
"beat",
"born",
"associate",
"care",
"marry",
"collect",
"voice",
"employ",
"issue",
"release",
"emerge",
"mind",
"aim",
"deny",
"mark",
"shoot",
"appoint",
"order",
"supply",
"drink",
"observe",
"reply",
"ignore",
"link",
"propose",
"ring",
"settle",
"strike",
"press",
"respond",
"arrange",
"survive",
"concentrate",
"lift",
"approach",
"cross",
"test",
"charge",
"experience",
"touch",
"acquire",
"commit",
"demonstrate",
"grant",
"prefer",
"repeat",
"sleep",
"threaten",
"feed",
"insist",
"launch",
"limit",
"promote",
"deliver",
"measure",
"own",
"retain",
"attract",
"belong",
"consist",
"contribute",
"hide",
"promise",
"reject",
"cry",
"impose",
"invite",
"sing",
"vary",
"warn",
"address",
"declare",
"destroy",
"worry",
"divide",
"head",
"name",
"stick",
"nod",
"recognize",
"train",
"attack",
"clear",
"combine",
"handle",
"influence",
"realize",
"recommend",
"shout",
"spread",
"undertake",
"account",
"select",
"climb",
"contact",
"recall",
"secure",
"step",
"transfer",
"welcome",
"conclude",
"disappear",
"display",
"dress",
"illustrate",
"imply",
"organise",
"direct",
"escape",
"generate",
"remind",
"advise",
"afford",
"earn",
"hand",
"inform",
"rely",
"succeed",
"approve",
"burn",
"fear",
"vote",
"conduct",
"cope",
"derive",
"elect",
"gather",
"jump",
"last",
"match",
"matter",
"persuade",
"ride",
"shut",
"blow",
"estimate",
"recover",
"score",
"slip",
"count",
"hate",
"attach",
"exercise",
"house",
"lean",
"roll",
"wash",
"accuse",
"bind",
"judge",
"rest",
"steal",
"comment",
"exclude",
"focus",
"hurt",
"stretch",
"withdraw",
"back",
"fix",
"justify",
"knock",
"pursue",
"switch",
"benefit",
"lack",
"list",
"occupy",
"permit",
"surround",
"abandon",
"blame",
"complain",
"connect",
"construct",
"dominate",
"engage",
"paint",
"quote",
"view",
"incorporate",
"interpret",
"proceed",
"search",
"separate",
"stress",
"alter",
"analyse",
"arrest",
"bother",
"defend",
"expand",
"implement",
"possess",
"review",
"suit",
"tie",
"assist",
"calculate",
"glance",
"mix",
"question",
"resolve",
"rule",
"suspect",
"wake",
"appeal",
"challenge",
"clean",
"damage",
"guess",
"reckon",
"restore",
"restrict",
"specify",
"constitute",
"convert",
"distinguish",
"submit",
"trust",
"urge",
"feature",
"land",
"locate",
"predict",
"preserve",
"solve",
"sort",
"struggle",
"cast",
"cook",
"dance",
"invest",
"lock",
"owe",
"pour",
"shift",
"kick",
"kiss",
"light",
"purchase",
"race",
"retire",
),
"noun": (
"people",
"history",
"way",
"art",
"world",
"information",
"map",
"family",
"government",
"health",
"system",
"computer",
"meat",
"year",
"thanks",
"music",
"person",
"reading",
"method",
"data",
"food",
"understanding",
"theory",
"law",
"bird",
"literature",
"problem",
"software",
"control",
"knowledge",
"power",
"ability",
"economics",
"love",
"internet",
"television",
"science",
"library",
"nature",
"fact",
"product",
"idea",
"temperature",
"investment",
"area",
"society",
"activity",
"story",
"industry",
"media",
"thing",
"oven",
"community",
"definition",
"safety",
"quality",
"development",
"language",
"management",
"player",
"variety",
"video",
"week",
"security",
"country",
"exam",
"movie",
"organization",
"equipment",
"physics",
"analysis",
"policy",
"series",
"thought",
"basis",
"boyfriend",
"direction",
"strategy",
"technology",
"army",
"camera",
"freedom",
"paper",
"environment",
"child",
"instance",
"month",
"truth",
"marketing",
"university",
"writing",
"article",
"department",
"difference",
"goal",
"news",
"audience",
"fishing",
"growth",
"income",
"marriage",
"user",
"combination",
"failure",
"meaning",
"medicine",
"philosophy",
"teacher",
"communication",
"night",
"chemistry",
"disease",
"disk",
"energy",
"nation",
"road",
"role",
"soup",
"advertising",
"location",
"success",
"addition",
"apartment",
"education",
"math",
"moment",
"painting",
"politics",
"attention",
"decision",
"event",
"property",
"shopping",
"student",
"wood",
"competition",
"distribution",
"entertainment",
"office",
"population",
"president",
"unit",
"category",
"cigarette",
"context",
"introduction",
"opportunity",
"performance",
"driver",
"flight",
"length",
"magazine",
"newspaper",
"relationship",
"teaching",
"cell",
"dealer",
"finding",
"lake",
"member",
"message",
"phone",
"scene",
"appearance",
"association",
"concept",
"customer",
"death",
"discussion",
"housing",
"inflation",
"insurance",
"mood",
"woman",
"advice",
"blood",
"effort",
"expression",
"importance",
"opinion",
"payment",
"reality",
"responsibility",
"situation",
"skill",
"statement",
"wealth",
"application",
"city",
"county",
"depth",
"estate",
"foundation",
"grandmother",
"heart",
"perspective",
"photo",
"recipe",
"studio",
"topic",
"collection",
"depression",
"imagination",
"passion",
"percentage",
"resource",
"setting",
"ad",
"agency",
"college",
"connection",
"criticism",
"debt",
"description",
"memory",
"patience",
"secretary",
"solution",
"administration",
"aspect",
"attitude",
"director",
"personality",
"psychology",
"recommendation",
"response",
"selection",
"storage",
"version",
"alcohol",
"argument",
"complaint",
"contract",
"emphasis",
"highway",
"loss",
"membership",
"possession",
"preparation",
"steak",
"union",
"agreement",
"cancer",
"currency",
"employment",
"engineering",
"entry",
"interaction",
"mixture",
"preference",
"region",
"republic",
"tradition",
"virus",
"actor",
"classroom",
"delivery",
"device",
"difficulty",
"drama",
"election",
"engine",
"football",
"guidance",
"hotel",
"owner",
"priority",
"protection",
"suggestion",
"tension",
"variation",
"anxiety",
"atmosphere",
"awareness",
"bath",
"bread",
"candidate",
"climate",
"comparison",
"confusion",
"construction",
"elevator",
"emotion",
"employee",
"employer",
"guest",
"height",
"leadership",
"mall",
"manager",
"operation",
"recording",
"sample",
"transportation",
"charity",
"cousin",
"disaster",
"editor",
"efficiency",
"excitement",
"extent",
"feedback",
"guitar",
"homework",
"leader",
"mom",
"outcome",
"permission",
"presentation",
"promotion",
"reflection",
"refrigerator",
"resolution",
"revenue",
"session",
"singer",
"tennis",
"basket",
"bonus",
"cabinet",
"childhood",
"church",
"clothes",
"coffee",
"dinner",
"drawing",
"hair",
"hearing",
"initiative",
"judgment",
"lab",
"measurement",
"mode",
"mud",
"orange",
"poetry",
"police",
"possibility",
"procedure",
"queen",
"ratio",
"relation",
"restaurant",
"satisfaction",
"sector",
"signature",
"significance",
"song",
"tooth",
"town",
"vehicle",
"volume",
"wife",
"accident",
"airport",
"appointment",
"arrival",
"assumption",
"baseball",
"chapter",
"committee",
"conversation",
"database",
"enthusiasm",
"error",
"explanation",
"farmer",
"gate",
"girl",
"hall",
"historian",
"hospital",
"injury",
"instruction",
"maintenance",
"manufacturer",
"meal",
"perception",
"pie",
"poem",
"presence",
"proposal",
"reception",
"replacement",
"revolution",
"river",
"son",
"speech",
"tea",
"village",
"warning",
"winner",
"worker",
"writer",
"assistance",
"breath",
"buyer",
"chest",
"chocolate",
"conclusion",
"contribution",
"cookie",
"courage",
"desk",
"drawer",
"establishment",
"examination",
"garbage",
"grocery",
"honey",
"impression",
"improvement",
"independence",
"insect",
"inspection",
"inspector",
"king",
"ladder",
"menu",
"penalty",
"piano",
"potato",
"profession",
"professor",
"quantity",
"reaction",
"requirement",
"salad",
"sister",
"supermarket",
"tongue",
"weakness",
"wedding",
"affair",
"ambition",
"analyst",
"apple",
"assignment",
"assistant",
"bathroom",
"bedroom",
"beer",
"birthday",
"celebration",
"championship",
"cheek",
"client",
"consequence",
"departure",
"diamond",
"dirt",
"ear",
"fortune",
"friendship",
"funeral",
"gene",
"girlfriend",
"hat",
"indication",
"intention",
"lady",
"midnight",
"negotiation",
"obligation",
"passenger",
"pizza",
"platform",
"poet",
"pollution",
"recognition",
"reputation",
"shirt",
"sir",
"speaker",
"stranger",
"surgery",
"sympathy",
"tale",
"throat",
"trainer",
"uncle",
"youth",
"time",
"work",
"film",
"water",
"money",
"example",
"while",
"business",
"study",
"game",
"life",
"form",
"air",
"day",
"place",
"number",
"part",
"field",
"fish",
"back",
"process",
"heat",
"hand",
"experience",
"job",
"book",
"end",
"point",
"type",
"home",
"economy",
"value",
"body",
"market",
"guide",
"interest",
"state",
"radio",
"course",
"company",
"price",
"size",
"card",
"list",
"mind",
"trade",
"line",
"care",
"group",
"risk",
"word",
"fat",
"force",
"key",
"light",
"training",
"name",
"school",
"top",
"amount",
"level",
"order",
"practice",
"research",
"sense",
"service",
"piece",
"web",
"boss",
"sport",
"fun",
"house",
"page",
"term",
"test",
"answer",
"sound",
"focus",
"matter",
"kind",
"soil",
"board",
"oil",
"picture",
"access",
"garden",
"range",
"rate",
"reason",
"future",
"site",
"demand",
"exercise",
"image",
"case",
"cause",
"coast",
"action",
"age",
"bad",
"boat",
"record",
"result",
"section",
"building",
"mouse",
"cash",
"class",
"nothing",
"period",
"plan",
"store",
"tax",
"side",
"subject",
"space",
"rule",
"stock",
"weather",
"chance",
"figure",
"man",
"model",
"source",
"beginning",
"earth",
"program",
"chicken",
"design",
"feature",
"head",
"material",
"purpose",
"question",
"rock",
"salt",
"act",
"birth",
"car",
"dog",
"object",
"scale",
"sun",
"note",
"profit",
"rent",
"speed",
"style",
"war",
"bank",
"craft",
"half",
"inside",
"outside",
"standard",
"bus",
"exchange",
"eye",
"fire",
"position",
"pressure",
"stress",
"advantage",
"benefit",
"box",
"frame",
"issue",
"step",
"cycle",
"face",
"item",
"metal",
"paint",
"review",
"room",
"screen",
"structure",
"view",
"account",
"ball",
"discipline",
"medium",
"share",
"balance",
"bit",
"black",
"bottom",
"choice",
"gift",
"impact",
"machine",
"shape",
"tool",
"wind",
"address",
"average",
"career",
"culture",
"morning",
"pot",
"sign",
"table",
"task",
"condition",
"contact",
"credit",
"egg",
"hope",
"ice",
"network",
"north",
"square",
"attempt",
"date",
"effect",
"link",
"post",
"star",
"voice",
"capital",
"challenge",
"friend",
"self",
"shot",
"brush",
"couple",
"debate",
"exit",
"front",
"function",
"lack",
"living",
"plant",
"plastic",
"spot",
"summer",
"taste",
"theme",
"track",
"wing",
"brain",
"button",
"click",
"desire",
"foot",
"gas",
"influence",
"notice",
"rain",
"wall",
"base",
"damage",
"distance",
"feeling",
"pair",
"savings",
"staff",
"sugar",
"target",
"text",
"animal",
"author",
"budget",
"discount",
"file",
"ground",
"lesson",
"minute",
"officer",
"phase",
"reference",
"register",
"sky",
"stage",
"stick",
"title",
"trouble",
"bowl",
"bridge",
"campaign",
"character",
"club",
"edge",
"evidence",
"fan",
"letter",
"lock",
"maximum",
"novel",
"option",
"pack",
"park",
"plenty",
"quarter",
"skin",
"sort",
"weight",
"baby",
"background",
"carry",
"dish",
"factor",
"fruit",
"glass",
"joint",
"master",
"muscle",
"red",
"strength",
"traffic",
"trip",
"vegetable",
"appeal",
"chart",
"gear",
"ideal",
"kitchen",
"land",
"log",
"mother",
"net",
"party",
"principle",
"relative",
"sale",
"season",
"signal",
"spirit",
"street",
"tree",
"wave",
"belt",
"bench",
"commission",
"copy",
"drop",
"minimum",
"path",
"progress",
"project",
"sea",
"south",
"status",
"stuff",
"ticket",
"tour",
"angle",
"blue",
"breakfast",
"confidence",
"daughter",
"degree",
"doctor",
"dot",
"dream",
"duty",
"essay",
"father",
"fee",
"finance",
"hour",
"juice",
"limit",
"luck",
"milk",
"mouth",
"peace",
"pipe",
"seat",
"stable",
"storm",
"substance",
"team",
"trick",
"afternoon",
"bat",
"beach",
"blank",
"catch",
"chain",
"consideration",
"cream",
"crew",
"detail",
"gold",
"interview",
"kid",
"mark",
"match",
"mission",
"pain",
"pleasure",
"score",
"screw",
"sex",
"shop",
"shower",
"suit",
"tone",
"window",
"agent",
"band",
"block",
"bone",
"calendar",
"cap",
"coat",
"contest",
"corner",
"court",
"cup",
"district",
"door",
"east",
"finger",
"garage",
"guarantee",
"hole",
"hook",
"implement",
"layer",
"lecture",
"lie",
"manner",
"meeting",
"nose",
"parking",
"partner",
"profile",
"respect",
"rice",
"routine",
"schedule",
"swimming",
"telephone",
"tip",
"winter",
"airline",
"bag",
"battle",
"bed",
"bill",
"bother",
"cake",
"code",
"curve",
"designer",
"dimension",
"dress",
"ease",
"emergency",
"evening",
"extension",
"farm",
"fight",
"gap",
"grade",
"holiday",
"horror",
"horse",
"host",
"husband",
"loan",
"mistake",
"mountain",
"nail",
"noise",
"occasion",
"package",
"patient",
"pause",
"phrase",
"proof",
"race",
"relief",
"sand",
"sentence",
"shoulder",
"smoke",
"stomach",
"string",
"tourist",
"towel",
"vacation",
"west",
"wheel",
"wine",
"arm",
"aside",
"associate",
"bet",
"blow",
"border",
"branch",
"breast",
"brother",
"buddy",
"bunch",
"chip",
"coach",
"cross",
"document",
"draft",
"dust",
"expert",
"floor",
"god",
"golf",
"habit",
"iron",
"judge",
"knife",
"landscape",
"league",
"mail",
"mess",
"native",
"opening",
"parent",
"pattern",
"pin",
"pool",
"pound",
"request",
"salary",
"shame",
"shelter",
"shoe",
"silver",
"tackle",
"tank",
"trust",
"assist",
"bake",
"bar",
"bell",
"bike",
"blame",
"boy",
"brick",
"chair",
"closet",
"clue",
"collar",
"comment",
"conference",
"devil",
"diet",
"fear",
"fuel",
"glove",
"jacket",
"lunch",
"monitor",
"mortgage",
"nurse",
"pace",
"panic",
"peak",
"plane",
"reward",
"row",
"sandwich",
"shock",
"spite",
"spray",
"surprise",
"till",
"transition",
"weekend",
"welcome",
"yard",
"alarm",
"bend",
"bicycle",
"bite",
"blind",
"bottle",
"cable",
"candle",
"clerk",
"cloud",
"concert",
"counter",
"flower",
"grandfather",
"harm",
"knee",
"lawyer",
"leather",
"load",
"mirror",
"neck",
"pension",
"plate",
"purple",
"ruin",
"ship",
"skirt",
"slice",
"snow",
"specialist",
"stroke",
"switch",
"trash",
"tune",
"zone",
"anger",
"award",
"bid",
"bitter",
"boot",
"bug",
"camp",
"candy",
"carpet",
"cat",
"champion",
"channel",
"clock",
"comfort",
"cow",
"crack",
"engineer",
"entrance",
"fault",
"grass",
"guy",
"hell",
"highlight",
"incident",
"island",
"joke",
"jury",
"leg",
"lip",
"mate",
"motor",
"nerve",
"passage",
"pen",
"pride",
"priest",
"prize",
"promise",
"resident",
"resort",
"ring",
"roof",
"rope",
"sail",
"scheme",
"script",
"sock",
"station",
"toe",
"tower",
"truck",
"witness",
),
"adverb": (
"not",
"also",
"very",
"often",
"however",
"too",
"usually",
"really",
"early",
"never",
"always",
"sometimes",
"together",
"likely",
"simply",
"generally",
"instead",
"actually",
"again",
"rather",
"almost",
"especially",
"ever",
"quickly",
"probably",
"already",
"below",
"directly",
"therefore",
"else",
"thus",
"easily",
"eventually",
"exactly",
"certainly",
"normally",
"currently",
"extremely",
"finally",
"constantly",
"properly",
"soon",
"specifically",
"ahead",
"daily",
"highly",
"immediately",
"relatively",
"slowly",
"fairly",
"primarily",
"completely",
"ultimately",
"widely",
"recently",
"seriously",
"frequently",
"fully",
"mostly",
"naturally",
"nearly",
"occasionally",
"carefully",
"clearly",
"essentially",
"possibly",
"slightly",
"somewhat",
"equally",
"greatly",
"necessarily",
"personally",
"rarely",
"regularly",
"similarly",
"basically",
"closely",
"effectively",
"initially",
"literally",
"mainly",
"merely",
"gently",
"hopefully",
"originally",
"roughly",
"significantly",
"totally",
"twice",
"elsewhere",
"everywhere",
"obviously",
"perfectly",
"physically",
"successfully",
"suddenly",
"truly",
"virtually",
"altogether",
"anyway",
"automatically",
"deeply",
"definitely",
"deliberately",
"hardly",
"readily",
"terribly",
"unfortunately",
"forth",
"briefly",
"moreover",
"strongly",
"honestly",
"previously",
"as",
"there",
"when",
"how",
"so",
"up",
"out",
"only",
"well",
"then",
"first",
"where",
"why",
"now",
"around",
"once",
"down",
"off",
"here",
"tonight",
"away",
"today",
"far",
"quite",
"later",
"above",
"yet",
"maybe",
"otherwise",
"near",
"forward",
"somewhere",
"anywhere",
"please",
"forever",
"somehow",
"absolutely",
"abroad",
"yeah",
"nowhere",
"tomorrow",
"yesterday",
),
"adjective": (
"different",
"used",
"important",
"every",
"large",
"available",
"popular",
"able",
"basic",
"known",
"various",
"difficult",
"several",
"united",
"historical",
"hot",
"useful",
"mental",
"scared",
"additional",
"emotional",
"old",
"political",
"similar",
"healthy",
"financial",
"medical",
"traditional",
"federal",
"entire",
"strong",
"actual",
"significant",
"successful",
"electrical",
"expensive",
"pregnant",
"intelligent",
"interesting",
"poor",
"happy",
"responsible",
"cute",
"helpful",
"recent",
"willing",
"nice",
"wonderful",
"impossible",
"serious",
"huge",
"rare",
"technical",
"typical",
"competitive",
"critical",
"electronic",
"immediate",
"aware",
"educational",
"environmental",
"global",
"legal",
"relevant",
"accurate",
"capable",
"dangerous",
"dramatic",
"efficient",
"powerful",
"foreign",
"hungry",
"practical",
"psychological",
"severe",
"suitable",
"numerous",
"sufficient",
"unusual",
"consistent",
"cultural",
"existing",
"famous",
"pure",
"afraid",
"obvious",
"careful",
"latter",
"unhappy",
"acceptable",
"aggressive",
"boring",
"distinct",
"eastern",
"logical",
"reasonable",
"strict",
"administrative",
"automatic",
"civil",
"former",
"massive",
"southern",
"unfair",
"visible",
"alive",
"angry",
"desperate",
"exciting",
"friendly",
"lucky",
"realistic",
"sorry",
"ugly",
"unlikely",
"anxious",
"comprehensive",
"curious",
"impressive",
"informal",
"inner",
"pleasant",
"sexual",
"sudden",
"terrible",
"unable",
"weak",
"wooden",
"asleep",
"confident",
"conscious",
"decent",
"embarrassed",
"guilty",
"lonely",
"mad",
"nervous",
"odd",
"remarkable",
"substantial",
"suspicious",
"tall",
"tiny",
"more",
"some",
"all",
"many",
"most",
"other",
"such",
"even",
"new",
"just",
"good",
"any",
"each",
"much",
"own",
"great",
"another",
"same",
"few",
"free",
"right",
"still",
"best",
"public",
"human",
"both",
"local",
"sure",
"better",
"general",
"specific",
"enough",
"long",
"small",
"less",
"high",
"certain",
"little",
"common",
"next",
"simple",
"hard",
"past",
"big",
"possible",
"particular",
"real",
"major",
"personal",
"current",
"left",
"national",
"least",
"natural",
"physical",
"short",
"last",
"single",
"individual",
"main",
"potential",
"professional",
"international",
"lower",
"open",
"according",
"alternative",
"special",
"working",
"true",
"whole",
"clear",
"dry",
"easy",
"cold",
"commercial",
"full",
"low",
"primary",
"worth",
"necessary",
"positive",
"present",
"close",
"creative",
"green",
"late",
"fit",
"glad",
"proper",
"complex",
"content",
"due",
"effective",
"middle",
"regular",
"fast",
"independent",
"original",
"wide",
"beautiful",
"complete",
"active",
"negative",
"safe",
"visual",
"wrong",
"ago",
"quick",
"ready",
"straight",
"white",
"direct",
"excellent",
"extra",
"junior",
"pretty",
"unique",
"classic",
"final",
"overall",
"private",
"separate",
"western",
"alone",
"familiar",
"official",
"perfect",
"bright",
"broad",
"comfortable",
"flat",
"rich",
"warm",
"young",
"heavy",
"valuable",
"correct",
"leading",
"slow",
"clean",
"fresh",
"normal",
"secret",
"tough",
"brown",
"cheap",
"deep",
"objective",
"secure",
"thin",
"chemical",
"cool",
"extreme",
"exact",
"fair",
"fine",
"formal",
"opposite",
"remote",
"total",
"vast",
"lost",
"smooth",
"dark",
"double",
"equal",
"firm",
"frequent",
"internal",
"sensitive",
"constant",
"minor",
"previous",
"raw",
"soft",
"solid",
"weird",
"amazing",
"annual",
"busy",
"dead",
"false",
"round",
"sharp",
"thick",
"wise",
"equivalent",
"initial",
"narrow",
"nearby",
"proud",
"spiritual",
"wild",
"adult",
"apart",
"brief",
"crazy",
"prior",
"rough",
"sad",
"sick",
"strange",
"external",
"illegal",
"loud",
"mobile",
"nasty",
"ordinary",
"royal",
"senior",
"super",
"tight",
"upper",
"yellow",
"dependent",
"funny",
"gross",
"ill",
"spare",
"sweet",
"upstairs",
"usual",
"brave",
"calm",
"dirty",
"downtown",
"grand",
"honest",
"loose",
"male",
"quiet",
"brilliant",
"dear",
"drunk",
"empty",
"female",
"inevitable",
"neat",
"ok",
"representative",
"silly",
"slight",
"smart",
"stupid",
"temporary",
"weekly",
),
}
| Provider |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/deep_learning/activation_functions.py | {
"start": 121,
"end": 287
} | class ____():
def __call__(self, x):
return 1 / (1 + np.exp(-x))
def gradient(self, x):
return self.__call__(x) * (1 - self.__call__(x))
| Sigmoid |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.