language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | doc/source/tune/doc_code/key_concepts.py | {
"start": 555,
"end": 4752
} | class ____(tune.Trainable):
def setup(self, config):
# config (dict): A dict of hyperparameters
self.x = 0
self.a = config["a"]
self.b = config["b"]
def step(self): # This is called iteratively.
score = objective(self.x, self.a, self.b)
self.x += 1
return {"score": score}
# __class_api_end__
# TODO: this example does not work as advertised. Errors out.
def save_checkpoint(self, checkpoint_dir):
pass
def load_checkpoint(self, checkpoint_dir):
pass
# __run_tunable_start__
# Pass in a Trainable class or function, along with a search space "config".
tuner = tune.Tuner(trainable, param_space={"a": 2, "b": 4})
tuner.fit()
# __run_tunable_end__
# __run_tunable_samples_start__
tuner = tune.Tuner(
trainable, param_space={"a": 2, "b": 4}, tune_config=tune.TuneConfig(num_samples=10)
)
tuner.fit()
# __run_tunable_samples_end__
# __search_space_start__
space = {"a": tune.uniform(0, 1), "b": tune.uniform(0, 1)}
tuner = tune.Tuner(
trainable, param_space=space, tune_config=tune.TuneConfig(num_samples=10)
)
tuner.fit()
# __search_space_end__
# __config_start__
config = {
"uniform": tune.uniform(-5, -1), # Uniform float between -5 and -1
"quniform": tune.quniform(3.2, 5.4, 0.2), # Round to multiples of 0.2
"loguniform": tune.loguniform(1e-4, 1e-1), # Uniform float in log space
"qloguniform": tune.qloguniform(1e-4, 1e-1, 5e-5), # Round to multiples of 0.00005
"randn": tune.randn(10, 2), # Normal distribution with mean 10 and sd 2
"qrandn": tune.qrandn(10, 2, 0.2), # Round to multiples of 0.2
"randint": tune.randint(-9, 15), # Random integer between -9 and 15
"qrandint": tune.qrandint(-21, 12, 3), # Round to multiples of 3 (includes 12)
"lograndint": tune.lograndint(1, 10), # Random integer in log space
"qlograndint": tune.qlograndint(1, 10, 2), # Round to multiples of 2
"choice": tune.choice(["a", "b", "c"]), # Choose one of these options uniformly
"func": tune.sample_from(
lambda spec: spec.config.uniform * 0.01
), # Depends on other value
"grid": tune.grid_search([32, 64, 128]), # Search over all these values
}
# __config_end__
# __bayes_start__
from ray.tune.search.bayesopt import BayesOptSearch
# Define the search space
search_space = {"a": tune.uniform(0, 1), "b": tune.uniform(0, 20)}
algo = BayesOptSearch(random_search_steps=4)
tuner = tune.Tuner(
trainable,
tune_config=tune.TuneConfig(
metric="score",
mode="min",
search_alg=algo,
),
run_config=tune.RunConfig(stop={"training_iteration": 20}),
param_space=search_space,
)
tuner.fit()
# __bayes_end__
# __hyperband_start__
from ray.tune.schedulers import HyperBandScheduler
# Create HyperBand scheduler and minimize the score
hyperband = HyperBandScheduler(metric="score", mode="max")
config = {"a": tune.uniform(0, 1), "b": tune.uniform(0, 1)}
tuner = tune.Tuner(
trainable,
tune_config=tune.TuneConfig(
num_samples=20,
scheduler=hyperband,
),
param_space=config,
)
tuner.fit()
# __hyperband_end__
# __analysis_start__
tuner = tune.Tuner(
trainable,
tune_config=tune.TuneConfig(
metric="score",
mode="min",
search_alg=BayesOptSearch(random_search_steps=4),
),
run_config=tune.RunConfig(
stop={"training_iteration": 20},
),
param_space=config,
)
results = tuner.fit()
best_result = results.get_best_result() # Get best result object
best_config = best_result.config # Get best trial's hyperparameters
best_logdir = best_result.path # Get best trial's result directory
best_checkpoint = best_result.checkpoint # Get best trial's best checkpoint
best_metrics = best_result.metrics # Get best trial's last results
best_result_df = best_result.metrics_dataframe # Get best result as pandas dataframe
# __analysis_end__
# __results_start__
# Get a dataframe with the last results for each trial
df_results = results.get_dataframe()
# Get a dataframe of results for a specific score or mode
df = results.get_dataframe(filter_metric="score", filter_mode="max")
# __results_end__
| Trainable |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 170830,
"end": 174904
} | class ____:
# elision is only triggered on relatively large arrays
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy._core._multiarray_tests import incref_elide
d = np.ones(100000)
orig, res = incref_elide(d)
d + d
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy._core._multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwritten
l = [1, 1, 1, 1, np.ones(100000)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(100000))
assert_array_equal(res, l[4] + l[4])
def test_temporary_with_cast(self):
# check that we don't elide into a temporary which would need casting
d = np.ones(200000, dtype=np.int64)
r = ((d + d) + np.array(2**222, dtype='O'))
assert_equal(r.dtype, np.dtype('O'))
r = ((d + d) / 2)
assert_equal(r.dtype, np.dtype('f8'))
r = np.true_divide((d + d), 2)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) / 2.)
assert_equal(r.dtype, np.dtype('f8'))
r = ((d + d) // 2)
assert_equal(r.dtype, np.dtype(np.int64))
# commutative elision into the astype result
f = np.ones(100000, dtype=np.float32)
assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
# no elision into lower type
d = f.astype(np.float64)
assert_equal(((f + f) + d).dtype, d.dtype)
l = np.ones(100000, dtype=np.longdouble)
assert_equal(((d + d) + l).dtype, l.dtype)
# test unary abs with different output dtype
for dt in (np.complex64, np.complex128, np.clongdouble):
c = np.ones(100000, dtype=dt)
r = abs(c * 2.0)
assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
def test_elide_broadcast(self):
# test no elision on broadcast to higher dimension
# only triggers elision code path in debug mode as triggering it in
# normal mode needs 256kb large matching dimension, so a lot of memory
d = np.ones((2000, 1), dtype=int)
b = np.ones((2000), dtype=bool)
r = (1 - d) + b
assert_equal(r, 1)
assert_equal(r.shape, (2000, 2000))
def test_elide_scalar(self):
# check inplace op does not create ndarray from scalars
a = np.bool()
assert_(type(~(a & a)) is np.bool)
def test_elide_scalar_readonly(self):
# The imaginary part of a real array is readonly. This needs to go
# through fast_scalar_power which is only called for powers of
# +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
# elision which can be gotten for the imaginary part of a real
# array. Should not error.
a = np.empty(100000, dtype=np.float64)
a.imag ** 2
def test_elide_readonly(self):
# don't try to elide readonly temporaries
r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
assert_equal(r, 0)
def test_elide_updateifcopy(self):
a = np.ones(2**20)[::2]
b = a.flat.__array__() + 1
del b
assert_equal(a, 1)
| TestTemporaryElide |
python | huggingface__transformers | tests/models/aria/test_modeling_aria.py | {
"start": 7885,
"end": 26272
} | class ____(unittest.TestCase):
def setUp(self):
self.processor = AutoProcessor.from_pretrained("rhymes-ai/Aria")
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@require_torch_large_accelerator
@require_bitsandbytes
def test_small_model_integration_test(self):
# Let's make sure we test the preprocessing to replace what is used
model = AriaForConditionalGeneration.from_pretrained(
"rhymes-ai/Aria",
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
)
prompt = "<|img|>\nUSER: What are the things I should be cautious about when I visit this place?\nASSISTANT:"
raw_image = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw)
inputs = self.processor(images=raw_image, text=prompt, return_tensors="pt").to(model.device, model.dtype)
non_img_tokens = [
109, 3905, 2000, 93415, 4551, 1162, 901, 3894, 970, 2478, 1017, 19312, 2388, 1596, 1809, 970, 5449, 1235,
3333, 93483, 109, 61081, 11984, 14800, 93415
] # fmt: skip
EXPECTED_INPUT_IDS = torch.tensor([[9] * 256 + non_img_tokens]).to(inputs["input_ids"].device)
self.assertTrue(torch.equal(inputs["input_ids"], EXPECTED_INPUT_IDS))
output = model.generate(**inputs, max_new_tokens=20)
decoded_output = self.processor.decode(output[0], skip_special_tokens=True)
expected_output = Expectations(
{
(
"cuda",
None,
): "\nUSER: What are the things I should be cautious about when I visit this place?\nASSISTANT: When visiting this place, there are a few things one should be cautious about. Firstly,",
(
"rocm",
(9, 5),
): "\n USER: What are the things I should be cautious about when I visit this place?\n ASSISTANT: When you visit this place, you should be cautious about the following things:\n\n- The",
}
).get_expectation()
self.assertEqual(decoded_output, expected_output)
@require_torch_large_accelerator
@require_bitsandbytes
def test_small_model_integration_test_llama_single(self):
# Let's make sure we test the preprocessing to replace what is used
model_id = "rhymes-ai/Aria"
model = AriaForConditionalGeneration.from_pretrained(
model_id,
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
)
processor = AutoProcessor.from_pretrained(model_id)
prompt = "USER: <|img|>\nWhat are the things I should be cautious about when I visit this place? ASSISTANT:"
raw_image = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw)
inputs = processor(images=raw_image, text=prompt, return_tensors="pt").to(model.device, model.dtype)
output = model.generate(**inputs, max_new_tokens=90, do_sample=False)
EXPECTED_DECODED_TEXT = Expectations(
{
("cuda", (8, 0)): "USER: \n What are the things I should be cautious about when I visit this place? ASSISTANT: When visiting this beautiful location, it's important to be mindful of a few things to ensure both your safety and the preservation of the environment. Firstly, always be cautious when walking on the wooden pier, as it can be slippery, especially during or after rain. Secondly, be aware of the local wildlife and do not feed or disturb them. Lastly, respect the natural surroundings by not littering and sticking to",
("rocm", (9, 5)): "USER: \n What are the things I should be cautious about when I visit this place? ASSISTANT: \n\nWhen visiting this place, you should be cautious about the following:\n\n1. **Weather Conditions**: The weather can be unpredictable, so it's important to check the forecast and dress in layers. Sudden changes in weather can occur, so be prepared for rain or cold temperatures.\n\n2. **Safety on the Dock**: The dock may be slippery, especially when",
}
).get_expectation() # fmt: off
decoded_output = processor.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
self.assertEqual(
decoded_output,
EXPECTED_DECODED_TEXT,
f"Expected: {repr(EXPECTED_DECODED_TEXT)}\nActual: {repr(decoded_output)}",
)
@require_torch_large_accelerator
@require_bitsandbytes
def test_small_model_integration_test_llama_batched(self):
# Let's make sure we test the preprocessing to replace what is used
model_id = "rhymes-ai/Aria"
model = AriaForConditionalGeneration.from_pretrained(
model_id,
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
)
processor = AutoProcessor.from_pretrained(model_id)
prompts = [
"USER: <|img|>\nWhat are the things I should be cautious about when I visit this place? What should I bring with me? ASSISTANT:",
"USER: <|img|>\nWhat is this? ASSISTANT:",
]
image1 = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw)
image2 = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
inputs = processor(images=[image1, image2], text=prompts, return_tensors="pt", padding=True).to(
model.device, model.dtype
)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = Expectations(
{
("cuda", None): [
"USER: \nWhat are the things I should be cautious about when I visit this place? What should I bring with me? ASSISTANT: When visiting this place, which is a pier or dock extending over a body of water, you",
"USER: \nWhat is this? ASSISTANT: The image features two cats lying down on a pink couch. One cat is located on",
],
("rocm", (9, 5)): [
"USER: \n What are the things I should be cautious about when I visit this place? What should I bring with me? ASSISTANT: \n\nWhen visiting this place, you should be cautious about the weather conditions, as it",
"USER: \n What is this? ASSISTANT: This is a picture of two cats sleeping on a couch. USER: What is the color of",
],
}
).get_expectation()
decoded_output = processor.batch_decode(output, skip_special_tokens=True)
self.assertEqual(decoded_output, EXPECTED_DECODED_TEXT)
@require_torch_large_accelerator
@require_bitsandbytes
def test_small_model_integration_test_batch(self):
# Let's make sure we test the preprocessing to replace what is used
model = AriaForConditionalGeneration.from_pretrained(
"rhymes-ai/Aria",
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
)
# The first batch is longer in terms of text, but only has 1 image. The second batch will be padded in text, but the first will be padded because images take more space!.
prompts = [
"USER: <|img|>\nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nASSISTANT:",
"USER: <|img|>\nWhat is this?\nASSISTANT:",
]
image1 = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw)
image2 = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
inputs = self.processor(images=[image1, image2], text=prompts, return_tensors="pt", padding=True).to(
model.device, model.dtype
)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = Expectations({
("cuda", None): [
'USER: \nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nASSISTANT: When visiting this place, there are a few things to be cautious about and items to bring.',
'USER: \nWhat is this?\nASSISTANT: Cats',
],
("rocm", (9, 5)): [
'USER: \n What are the things I should be cautious about when I visit this place? What should I bring with me?\n ASSISTANT: \n\nWhen visiting this place, you should be cautious about the following:\n\n-',
'USER: \n What is this?\n ASSISTANT: This is a picture of two cats sleeping on a couch. The couch is red, and the cats',
],
}).get_expectation() # fmt: skip
decoded_output = self.processor.batch_decode(output, skip_special_tokens=True)
self.assertEqual(decoded_output, EXPECTED_DECODED_TEXT)
@require_torch_large_accelerator
@require_bitsandbytes
def test_small_model_integration_test_llama_batched_regression(self):
# Let's make sure we test the preprocessing to replace what is used
model_id = "rhymes-ai/Aria"
# Multi-image & multi-prompt (e.g. 3 images and 2 prompts now fails with SDPA, this tests if "eager" works as before)
model = AriaForConditionalGeneration.from_pretrained(
model_id,
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
)
processor = AutoProcessor.from_pretrained(model_id, pad_token="<pad>")
prompts = [
"USER: <|img|>\nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nASSISTANT:",
"USER: <|img|>\nWhat is this?\nASSISTANT: Two cats lying on a bed!\nUSER: <|img|>\nAnd this?\nASSISTANT:",
]
image1 = Image.open(requests.get(IMAGE_OF_VIEW_URL, stream=True).raw)
image2 = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
inputs = processor(images=[image1, image2, image1], text=prompts, return_tensors="pt", padding=True)
inputs = inputs.to(model.device, model.dtype)
output = model.generate(**inputs, max_new_tokens=20)
EXPECTED_DECODED_TEXT = Expectations({
("cuda", None): ['USER: \nWhat are the things I should be cautious about when I visit this place? What should I bring with me?\nASSISTANT: When visiting this place, which appears to be a dock or pier extending over a body of water', 'USER: \nWhat is this?\nASSISTANT: Two cats lying on a bed!\nUSER: \nAnd this?\nASSISTANT: A cat sleeping on a bed.'],
("rocm", (9, 5)): ['USER: \n What are the things I should be cautious about when I visit this place? What should I bring with me?\n ASSISTANT: \n\nWhen visiting this place, you should be cautious about the weather conditions, as it', 'USER: \n What is this?\n ASSISTANT: Two cats lying on a bed!\n USER: \n And this?\n ASSISTANT: A serene lake scene with a wooden dock extending into the water.\n USER: \n']
}).get_expectation() # fmt: skip
decoded_output = processor.batch_decode(output, skip_special_tokens=True)
self.assertEqual(decoded_output, EXPECTED_DECODED_TEXT)
@require_torch_large_accelerator
@require_vision
@require_bitsandbytes
def test_batched_generation(self):
# Skip multihead_attn for 4bit because MHA will read the original weight without dequantize.
# See https://github.com/huggingface/transformers/pull/37444#discussion_r2045852538.
model = AriaForConditionalGeneration.from_pretrained(
"rhymes-ai/Aria",
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
)
processor = AutoProcessor.from_pretrained("rhymes-ai/Aria")
prompt1 = "<image>\n<image>\nUSER: What's the difference of two images?\nASSISTANT:"
prompt2 = "<image>\nUSER: Describe the image.\nASSISTANT:"
prompt3 = "<image>\nUSER: Describe the image.\nASSISTANT:"
url1 = "https://images.unsplash.com/photo-1552053831-71594a27632d?q=80&w=3062&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D"
url2 = "https://images.unsplash.com/photo-1617258683320-61900b281ced?q=80&w=3087&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D"
image1 = Image.open(requests.get(url1, stream=True).raw)
image2 = Image.open(requests.get(url2, stream=True).raw)
# Create inputs
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": prompt1},
{"type": "image"},
{"type": "text", "text": prompt2},
],
},
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": prompt3},
],
},
]
prompts = [processor.apply_chat_template([message], add_generation_prompt=True) for message in messages]
images = [[image1, image2], [image2]]
inputs = processor(text=prompts, images=images, padding=True, return_tensors="pt").to(
device=model.device, dtype=model.dtype
)
EXPECTED_OUTPUTS = Expectations(
{
("cpu", None): [
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n <image>\n USER: What's the difference of two images?\n ASSISTANT:<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The first image features a cute, light-colored puppy sitting on a paved surface with",
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The image shows a young alpaca standing on a grassy hill. The alpaca has",
],
("cuda", None): [
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n <image>\n USER: What's the difference of two images?\n ASSISTANT:<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The first image features a cute, light-colored puppy sitting on a paved surface with",
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The image shows a young alpaca standing on a patch of ground with some dry grass. The",
],
("xpu", 3): [
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n <image>\n USER: What's the difference of two images?\n ASSISTANT:<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The first image features a cute, light-colored puppy sitting on a paved surface with",
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The image shows a young alpaca standing on a patch of ground with some dry grass. The",
],
("rocm", (9, 5)): [
"<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n <image>\n USER: What's the difference of two images?\n ASSISTANT:<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The first image shows a cute golden retriever puppy sitting on a paved surface with a stick",
'<|im_start|>user\n<fim_prefix><fim_suffix> <image>\n USER: Describe the image.\n ASSISTANT:<|im_end|>\n <|im_start|>assistant\n The image shows a young llama standing on a patch of ground with some dry grass and dirt. The'
],
}
) # fmt: skip
EXPECTED_OUTPUT = EXPECTED_OUTPUTS.get_expectation()
generate_ids = model.generate(**inputs, max_new_tokens=20)
outputs = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
self.assertListEqual(outputs, EXPECTED_OUTPUT)
def test_tokenizer_integration(self):
model_id = "rhymes-ai/Aria"
slow_tokenizer = AutoTokenizer.from_pretrained(
model_id, bos_token="<|startoftext|>", eos_token="<|endoftext|>", use_fast=False
)
slow_tokenizer.add_tokens("<image>", True)
fast_tokenizer = AutoTokenizer.from_pretrained(
model_id,
bos_token="<|startoftext|>",
eos_token="<|endoftext|>",
from_slow=True,
legacy=False,
)
fast_tokenizer.add_tokens("<image>", True)
prompt = "<|startoftext|><|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|>"
EXPECTED_OUTPUT = ['<|startoftext|>', '<', '|', 'im', '_', 'start', '|', '>', 'system', '\n', 'Answer', '▁the', '▁questions', '.<', '|', 'im', '_', 'end', '|', '><', '|', 'im', '_', 'start', '|', '>', 'user', '\n', '<image>', '\n', 'What', '▁is', '▁shown', '▁in', '▁this', '▁image', '?', '<', '|', 'im', '_', 'end', '|', '>'] # fmt: skip
self.assertEqual(slow_tokenizer.tokenize(prompt), EXPECTED_OUTPUT)
self.assertEqual(fast_tokenizer.tokenize(prompt), EXPECTED_OUTPUT)
@require_torch_large_accelerator
@require_bitsandbytes
def test_generation_no_images(self):
model_id = "rhymes-ai/Aria"
model = AriaForConditionalGeneration.from_pretrained(
model_id,
quantization_config=BitsAndBytesConfig(load_in_4bit=True, llm_int8_skip_modules=["multihead_attn"]),
)
processor = AutoProcessor.from_pretrained(model_id)
# Prepare inputs with no images
inputs = processor(text="Hello, I am", return_tensors="pt").to(torch_device)
# Make sure that `generate` works
_ = model.generate(**inputs, max_new_tokens=20)
| AriaForConditionalGenerationIntegrationTest |
python | mkdocs__mkdocs | mkdocs/tests/utils/utils_tests.py | {
"start": 16374,
"end": 19142
} | class ____(unittest.TestCase):
def setUp(self):
utils.get_themes.cache_clear()
def test_get_themes(self):
themes = utils.get_theme_names()
self.assertIn('mkdocs', themes)
self.assertIn('readthedocs', themes)
@mock.patch('mkdocs.utils.entry_points', autospec=True)
def test_get_theme_dir(self, mock_iter):
path = 'some/path'
theme = mock.Mock()
theme.name = 'mkdocs2'
theme.dist.name = 'mkdocs2'
theme.load().__file__ = os.path.join(path, '__init__.py')
mock_iter.return_value = [theme]
self.assertEqual(utils.get_theme_dir(theme.name), os.path.abspath(path))
def test_get_theme_dir_error(self):
with self.assertRaises(KeyError):
utils.get_theme_dir('nonexistanttheme')
@mock.patch('mkdocs.utils.entry_points', autospec=True)
def test_get_theme_dir_importerror(self, mock_iter):
theme = mock.Mock()
theme.name = 'mkdocs2'
theme.dist.name = 'mkdocs2'
theme.load.side_effect = ImportError()
mock_iter.return_value = [theme]
with self.assertRaises(ImportError):
utils.get_theme_dir(theme.name)
@mock.patch('mkdocs.utils.entry_points', autospec=True)
def test_get_themes_warning(self, mock_iter):
theme1 = mock.Mock()
theme1.name = 'mkdocs2'
theme1.dist.name = 'mkdocs2'
theme1.load().__file__ = "some/path1"
theme2 = mock.Mock()
theme2.name = 'mkdocs2'
theme2.dist.name = 'mkdocs3'
theme2.load().__file__ = "some/path2"
mock_iter.return_value = [theme1, theme2]
with self.assertLogs('mkdocs') as cm:
theme_names = utils.get_theme_names()
self.assertEqual(
'\n'.join(cm.output),
"WARNING:mkdocs.utils:A theme named 'mkdocs2' is provided by the Python "
"packages 'mkdocs3' and 'mkdocs2'. The one in 'mkdocs3' will be used.",
)
self.assertCountEqual(theme_names, ['mkdocs2'])
@mock.patch('mkdocs.utils.entry_points', autospec=True)
def test_get_themes_error(self, mock_iter):
theme1 = mock.Mock()
theme1.name = 'mkdocs'
theme1.dist.name = 'mkdocs'
theme1.load().__file__ = "some/path1"
theme2 = mock.Mock()
theme2.name = 'mkdocs'
theme2.dist.name = 'mkdocs2'
theme2.load().__file__ = "some/path2"
mock_iter.return_value = [theme1, theme2]
with self.assertRaisesRegex(
exceptions.ConfigurationError,
"The theme 'mkdocs' is a builtin theme but the package 'mkdocs2' "
"attempts to provide a theme with the same name.",
):
utils.get_theme_names()
| ThemeUtilsTests |
python | google__pytype | pytype/pytd/optimize.py | {
"start": 21153,
"end": 21852
} | class ____(visitors.Visitor):
"""Converts mutable parameters to unions. This is lossy.
For example, this will change
def f(x: list[int]):
x = list[Union[int, float]]
to
def f(x: Union[list[int], list[Union[int, float]])
.
(Use optimize.CombineContainers to then change x to list[Union[int, float]].)
This also works for methods - it will then potentially change the type of
"self". The resulting AST is temporary and needs careful handling.
"""
def VisitParameter(self, p):
if p.mutated_type is None:
return p
else:
return p.Replace(
type=pytd_utils.JoinTypes([p.type, p.mutated_type]), mutated_type=None
)
| AbsorbMutableParameters |
python | bokeh__bokeh | src/bokeh/models/dom.py | {
"start": 2526,
"end": 2909
} | class ____(DOMNode):
""" Base class for DOM elements. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
style = Either(Instance(Styles), Dict(String, String), default={})
children = List(Either(String, Instance(DOMNode), Instance(UIElement)), default=[])
| DOMElement |
python | imageio__imageio | imageio/plugins/ffmpeg.py | {
"start": 26979,
"end": 30141
} | class ____(threading.Thread):
"""Thread to keep reading the frame data from stdout. This is
useful when streaming from a webcam. Otherwise, if the user code
does not grab frames fast enough, the buffer will fill up, leading
to lag, and ffmpeg can also stall (experienced on Linux). The
get_frame() method always returns the last available image.
"""
def __init__(self, gen):
self._gen = gen
self._frame = None
self._frame_is_new = False
self._lock = threading.RLock()
threading.Thread.__init__(self)
self.daemon = True # do not let this thread hold up Python shutdown
self._should_stop = False
self.start()
def stop_me(self):
self._should_stop = True
while self.is_alive():
time.sleep(0.001)
def get_frame(self):
while self._frame is None: # pragma: no cover - an init thing
time.sleep(0.001)
with self._lock:
is_new = self._frame_is_new
self._frame_is_new = False # reset
return self._frame, is_new
def run(self):
# This runs in the worker thread
try:
while not self._should_stop:
time.sleep(0) # give control to other threads
frame = self._gen.__next__()
with self._lock:
self._frame = frame
self._frame_is_new = True
except (StopIteration, EOFError):
pass
def parse_device_names(ffmpeg_output):
"""Parse the output of the ffmpeg -list-devices command"""
# Collect device names - get [friendly_name, alt_name] of each
device_names = []
in_video_devices = False
for line in ffmpeg_output.splitlines():
if line.startswith("[dshow"):
logger.debug(line)
line = line.split("]", 1)[1].strip()
if in_video_devices and line.startswith('"'):
friendly_name = line[1:-1]
device_names.append([friendly_name, ""])
elif in_video_devices and line.lower().startswith("alternative name"):
alt_name = line.split(" name ", 1)[1].strip()[1:-1]
if sys.platform.startswith("win"):
alt_name = alt_name.replace("&", "^&") # Tested to work
else:
alt_name = alt_name.replace("&", "\\&") # Does this work?
device_names[-1][-1] = alt_name
elif "video devices" in line:
in_video_devices = True
elif "devices" in line:
# set False for subsequent "devices" sections
in_video_devices = False
# Post-process, see #441
# prefer friendly names, use alt name if two cams have same friendly name
device_names2 = []
for friendly_name, alt_name in device_names:
if friendly_name not in device_names2:
device_names2.append(friendly_name)
elif alt_name:
device_names2.append(alt_name)
else:
device_names2.append(friendly_name) # duplicate, but not much we can do
return device_names2
| FrameCatcher |
python | matplotlib__matplotlib | galleries/examples/event_handling/cursor_demo.py | {
"start": 946,
"end": 3102
} | class ____:
"""
A cross hair cursor.
"""
def __init__(self, ax):
self.ax = ax
self.horizontal_line = ax.axhline(color='k', lw=0.8, ls='--')
self.vertical_line = ax.axvline(color='k', lw=0.8, ls='--')
# text location in axes coordinates
self.text = ax.text(0.72, 0.9, '', transform=ax.transAxes)
def set_cross_hair_visible(self, visible):
need_redraw = self.horizontal_line.get_visible() != visible
self.horizontal_line.set_visible(visible)
self.vertical_line.set_visible(visible)
self.text.set_visible(visible)
return need_redraw
def on_mouse_move(self, event):
if not event.inaxes:
need_redraw = self.set_cross_hair_visible(False)
if need_redraw:
self.ax.figure.canvas.draw()
else:
self.set_cross_hair_visible(True)
x, y = event.xdata, event.ydata
# update the line positions
self.horizontal_line.set_ydata([y])
self.vertical_line.set_xdata([x])
self.text.set_text(f'x={x:1.2f}, y={y:1.2f}')
self.ax.figure.canvas.draw()
x = np.arange(0, 1, 0.01)
y = np.sin(2 * 2 * np.pi * x)
fig, ax = plt.subplots()
ax.set_title('Simple cursor')
ax.plot(x, y, 'o')
cursor = Cursor(ax)
fig.canvas.mpl_connect('motion_notify_event', cursor.on_mouse_move)
# Simulate a mouse move to (0.5, 0.5), needed for online docs
t = ax.transData
MouseEvent(
"motion_notify_event", ax.figure.canvas, *t.transform((0.5, 0.5))
)._process()
# %%
# Faster redrawing using blitting
# """""""""""""""""""""""""""""""
# This technique stores the rendered plot as a background image. Only the
# changed artists (cross-hair lines and text) are rendered anew. They are
# combined with the background using blitting.
#
# This technique is significantly faster. It requires a bit more setup because
# the background has to be stored without the cross-hair lines (see
# ``create_new_background()``). Additionally, a new background has to be
# created whenever the figure changes. This is achieved by connecting to the
# ``'draw_event'``.
| Cursor |
python | run-llama__llama_index | llama-index-core/llama_index/core/node_parser/file/json.py | {
"start": 426,
"end": 3613
} | class ____(NodeParser):
"""
JSON node parser.
Splits a document into Nodes using custom JSON splitting logic.
Args:
include_metadata (bool): whether to include metadata in nodes
include_prev_next_rel (bool): whether to include prev/next relationships
"""
@classmethod
def from_defaults(
cls,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
) -> "JSONNodeParser":
callback_manager = callback_manager or CallbackManager([])
return cls(
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "JSONNodeParser"
def _parse_nodes(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> List[BaseNode]:
all_nodes: List[BaseNode] = []
nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes")
for node in nodes_with_progress:
nodes = self.get_nodes_from_node(node)
all_nodes.extend(nodes)
return all_nodes
def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]:
"""Get nodes from document."""
text = node.get_content(metadata_mode=MetadataMode.NONE)
try:
data = json.loads(text)
except json.JSONDecodeError:
# Handle invalid JSON input here
return []
json_nodes = []
if isinstance(data, dict):
lines = [*self._depth_first_yield(data, 0, [])]
json_nodes.extend(
build_nodes_from_splits(["\n".join(lines)], node, id_func=self.id_func)
)
elif isinstance(data, list):
for json_object in data:
lines = [*self._depth_first_yield(json_object, 0, [])]
json_nodes.extend(
build_nodes_from_splits(
["\n".join(lines)], node, id_func=self.id_func
)
)
else:
raise ValueError("JSON is invalid")
return json_nodes
def _depth_first_yield(
self, json_data: Dict, levels_back: int, path: List[str]
) -> Generator[str, None, None]:
"""
Do depth first yield of all of the leaf nodes of a JSON.
Combines keys in the JSON tree using spaces.
If levels_back is set to 0, prints all levels.
"""
if isinstance(json_data, dict):
for key, value in json_data.items():
new_path = path[:]
new_path.append(key)
yield from self._depth_first_yield(value, levels_back, new_path)
elif isinstance(json_data, list):
for _, value in enumerate(json_data):
yield from self._depth_first_yield(value, levels_back, path)
else:
new_path = path[-levels_back:]
new_path.append(str(json_data))
yield " ".join(new_path)
| JSONNodeParser |
python | django-haystack__django-haystack | test_haystack/whoosh_tests/test_whoosh_backend.py | {
"start": 3806,
"end": 28390
} | class ____(WhooshTestCase):
fixtures = ["bulk_data.json"]
def setUp(self):
super().setUp()
self.old_ui = connections["whoosh"].get_unified_index()
self.ui = UnifiedIndex()
self.wmmi = WhooshMockSearchIndex()
self.wmmidni = WhooshMockSearchIndexWithSkipDocument()
self.wmtmmi = WhooshMaintainTypeMockSearchIndex()
self.ui.build(indexes=[self.wmmi])
self.sb = connections["whoosh"].get_backend()
connections["whoosh"]._index = self.ui
self.sb.setup()
self.raw_whoosh = self.sb.index
self.parser = QueryParser(self.sb.content_field_name, schema=self.sb.schema)
self.sb.delete_index()
self.sample_objs = MockModel.objects.all()
def tearDown(self):
connections["whoosh"]._index = self.old_ui
super().tearDown()
def whoosh_search(self, query):
self.raw_whoosh = self.raw_whoosh.refresh()
searcher = self.raw_whoosh.searcher()
return searcher.search(self.parser.parse(query), limit=1000)
def test_non_silent(self):
bad_sb = connections["whoosh"].backend(
"bad", PATH="/tmp/bad_whoosh", SILENTLY_FAIL=False
)
bad_sb.use_file_storage = False
bad_sb.storage = "omg.wtf.bbq"
try:
bad_sb.update(self.wmmi, self.sample_objs)
self.fail()
except:
pass
try:
bad_sb.remove("core.mockmodel.1")
self.fail()
except:
pass
try:
bad_sb.clear()
self.fail()
except:
pass
try:
bad_sb.search("foo")
self.fail()
except:
pass
def test_update(self):
self.sb.update(self.wmmi, self.sample_objs)
# Check what Whoosh thinks is there.
self.assertEqual(len(self.whoosh_search("*")), 23)
self.assertEqual(
[doc.fields()["id"] for doc in self.whoosh_search("*")],
["core.mockmodel.%s" % i for i in range(1, 24)],
)
def test_update_with_SkipDocument_raised(self):
self.sb.update(self.wmmidni, self.sample_objs)
# Check what Whoosh thinks is there.
res = self.whoosh_search("*")
self.assertEqual(len(res), 14)
ids = [1, 2, 5, 6, 7, 8, 9, 11, 12, 14, 15, 18, 20, 21]
self.assertListEqual(
[doc.fields()["id"] for doc in res], ["core.mockmodel.%s" % i for i in ids]
)
def test_remove(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.remove(self.sample_objs[0])
self.assertEqual(self.sb.index.doc_count(), 22)
def test_clear(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.clear()
self.assertEqual(self.sb.index.doc_count(), 0)
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.clear([AnotherMockModel])
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.clear([MockModel])
self.assertEqual(self.sb.index.doc_count(), 0)
self.sb.index.refresh()
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(self.sb.index.doc_count(), 23)
self.sb.clear([AnotherMockModel, MockModel])
self.assertEqual(self.raw_whoosh.doc_count(), 0)
def test_search(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(len(self.whoosh_search("*")), 23)
# No query string should always yield zero results.
self.assertEqual(self.sb.search(""), {"hits": 0, "results": []})
# A one letter query string gets nabbed by a stopwords filter. Should
# always yield zero results.
self.assertEqual(self.sb.search("a"), {"hits": 0, "results": []})
# Possible AttributeError?
# self.assertEqual(self.sb.search(u'a b'), {'hits': 0, 'results': [], 'spelling_suggestion': '', 'facets': {}})
self.assertEqual(self.sb.search("*")["hits"], 23)
self.assertEqual(
[result.pk for result in self.sb.search("*")["results"]],
["%s" % i for i in range(1, 24)],
)
self.assertEqual(self.sb.search("Indexe")["hits"], 23)
self.assertEqual(self.sb.search("Indexe")["spelling_suggestion"], "indexed")
self.assertEqual(
self.sb.search("", facets=["name"]), {"hits": 0, "results": []}
)
results = self.sb.search("index*", facets=["name"])
self.assertEqual(results["hits"], 23)
self.assertEqual(results["facets"]["dates"], {})
self.assertEqual(results["facets"]["queries"], {})
self.assertEqual(
results["facets"]["fields"]["name"],
[("daniel3", 9), ("daniel1", 7), ("daniel2", 7)],
)
self.assertEqual(
self.sb.search(
"",
date_facets={
"pub_date": {
"start_date": date(2007, 2, 26),
"end_date": date(2008, 2, 26),
"gap_by": "month",
}
},
),
{"hits": 0, "results": []},
)
results = self.sb.search(
"Index*",
date_facets={
"pub_date": {
"start_date": date(2007, 2, 26),
"end_date": date(2008, 2, 26),
"gap_by": "month",
}
},
)
results = self.sb.search(
"index*",
date_facets={
"pub_date": {
"start_date": date(2007, 2, 26),
"end_date": date(2008, 2, 26),
"gap_by": "month",
}
},
)
self.assertEqual(results["hits"], 23)
self.assertEqual(results["facets"]["fields"], {})
self.assertEqual(results["facets"]["queries"], {})
self.assertEqual(results["facets"]["dates"]["pub_date"], [(None, 23)])
results = self.sb.search(
"index*",
date_facets={
"pub_date": {
"start_date": date(2009, 3, 26),
"end_date": date(2010, 2, 26),
"gap_by": "month",
"gap_amount": 2,
}
},
)
self.assertEqual(results["hits"], 23)
self.assertEqual(
results["facets"]["dates"]["pub_date"],
[
((datetime(2009, 5, 26, 0, 0), datetime(2009, 7, 26, 0, 0)), 23),
],
)
results = self.sb.search(
"index*",
date_facets={
"pub_date": {
"start_date": date(2009, 7, 1),
"end_date": date(2009, 8, 1),
"gap_by": "day",
"gap_amount": 1,
}
},
)
self.assertEqual(results["hits"], 23)
self.assertEqual(
results["facets"]["dates"]["pub_date"],
[
((datetime(2009, 7, 17, 0, 0), datetime(2009, 7, 18, 0, 0)), 21),
(None, 2),
],
)
results = self.sb.search(
"index*",
date_facets={
"pub_date": {
"start_date": datetime(2009, 6, 1),
"end_date": datetime(2009, 8, 1),
"gap_by": "hour",
}
},
)
self.assertEqual(results["hits"], 23)
self.assertEqual(
results["facets"]["dates"]["pub_date"],
[
((datetime(2009, 6, 18, 6, 0), datetime(2009, 6, 18, 7, 0)), 1),
((datetime(2009, 6, 18, 8, 0), datetime(2009, 6, 18, 9, 0)), 1),
((datetime(2009, 7, 17, 0, 0), datetime(2009, 7, 17, 1, 0)), 1),
((datetime(2009, 7, 17, 1, 0), datetime(2009, 7, 17, 2, 0)), 1),
((datetime(2009, 7, 17, 2, 0), datetime(2009, 7, 17, 3, 0)), 1),
((datetime(2009, 7, 17, 3, 0), datetime(2009, 7, 17, 4, 0)), 1),
((datetime(2009, 7, 17, 4, 0), datetime(2009, 7, 17, 5, 0)), 1),
((datetime(2009, 7, 17, 5, 0), datetime(2009, 7, 17, 6, 0)), 1),
((datetime(2009, 7, 17, 6, 0), datetime(2009, 7, 17, 7, 0)), 1),
((datetime(2009, 7, 17, 7, 0), datetime(2009, 7, 17, 8, 0)), 1),
((datetime(2009, 7, 17, 8, 0), datetime(2009, 7, 17, 9, 0)), 1),
((datetime(2009, 7, 17, 9, 0), datetime(2009, 7, 17, 10, 0)), 1),
((datetime(2009, 7, 17, 10, 0), datetime(2009, 7, 17, 11, 0)), 1),
((datetime(2009, 7, 17, 11, 0), datetime(2009, 7, 17, 12, 0)), 1),
((datetime(2009, 7, 17, 12, 0), datetime(2009, 7, 17, 13, 0)), 1),
((datetime(2009, 7, 17, 13, 0), datetime(2009, 7, 17, 14, 0)), 1),
((datetime(2009, 7, 17, 14, 0), datetime(2009, 7, 17, 15, 0)), 1),
((datetime(2009, 7, 17, 15, 0), datetime(2009, 7, 17, 16, 0)), 1),
((datetime(2009, 7, 17, 16, 0), datetime(2009, 7, 17, 17, 0)), 1),
((datetime(2009, 7, 17, 17, 0), datetime(2009, 7, 17, 18, 0)), 1),
((datetime(2009, 7, 17, 18, 0), datetime(2009, 7, 17, 19, 0)), 1),
((datetime(2009, 7, 17, 19, 0), datetime(2009, 7, 17, 20, 0)), 1),
((datetime(2009, 7, 17, 20, 0), datetime(2009, 7, 17, 21, 0)), 1),
],
)
self.assertEqual(
self.sb.search("", query_facets={"name": "[* TO e]"}),
{"hits": 0, "results": []},
)
results = self.sb.search("Index*", query_facets={"name": "[* TO e]"})
results = self.sb.search("index*", query_facets={"name": "[* TO e]"})
self.assertEqual(results["hits"], 23)
self.assertEqual(results["facets"], {})
self.assertEqual(
self.sb.search("", narrow_queries=set(["name:daniel1"])),
{"hits": 0, "results": []},
)
results = self.sb.search("Index*", narrow_queries=set(["name:daniel1"]))
self.assertEqual(results["hits"], 7)
# Ensure that swapping the ``result_class`` works.
self.assertTrue(
isinstance(
self.sb.search("Index*", result_class=MockSearchResult)["results"][0],
MockSearchResult,
)
)
# Check the use of ``limit_to_registered_models``.
self.assertEqual(
self.sb.search("", limit_to_registered_models=False),
{"hits": 0, "results": []},
)
self.assertEqual(
self.sb.search("*", limit_to_registered_models=False)["hits"], 23
)
self.assertEqual(
[
result.pk
for result in self.sb.search("*", limit_to_registered_models=False)[
"results"
]
],
["%s" % i for i in range(1, 24)],
)
# Stow.
old_limit_to_registered_models = getattr(
settings, "HAYSTACK_LIMIT_TO_REGISTERED_MODELS", True
)
settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = False
self.assertEqual(self.sb.search(""), {"hits": 0, "results": []})
self.assertEqual(self.sb.search("*")["hits"], 23)
self.assertEqual(
[result.pk for result in self.sb.search("*")["results"]],
["%s" % i for i in range(1, 24)],
)
# Restore.
settings.HAYSTACK_LIMIT_TO_REGISTERED_MODELS = old_limit_to_registered_models
def test_highlight(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(len(self.whoosh_search("*")), 23)
self.assertEqual(self.sb.search("", highlight=True), {"hits": 0, "results": []})
self.assertEqual(self.sb.search("index*", highlight=True)["hits"], 23)
query = self.sb.search("Index*", highlight=True)["results"]
result = [result.highlighted["text"][0] for result in query]
self.assertEqual(result, ["<em>Indexed</em>!\n%d" % i for i in range(1, 24)])
def test_search_all_models(self):
wamsi = WhooshAnotherMockSearchIndex()
self.ui.build(indexes=[self.wmmi, wamsi])
self.sb.update(self.wmmi, self.sample_objs)
self.sb.update(wamsi, AnotherMockModel.objects.all())
self.assertEqual(len(self.whoosh_search("*")), 25)
self.ui.build(indexes=[self.wmmi])
def test_more_like_this(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(len(self.whoosh_search("*")), 23)
# Now supported by Whoosh (as of 1.8.4). See the ``LiveWhooshMoreLikeThisTestCase``.
self.assertEqual(self.sb.more_like_this(self.sample_objs[0])["hits"], 22)
# Make sure that swapping the ``result_class`` doesn't blow up.
try:
self.sb.more_like_this(self.sample_objs[0], result_class=MockSearchResult)
except:
self.fail()
def test_delete_index(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertTrue(self.sb.index.doc_count() > 0)
self.sb.delete_index()
self.assertEqual(self.sb.index.doc_count(), 0)
def test_order_by(self):
self.sb.update(self.wmmi, self.sample_objs)
results = self.sb.search("*", sort_by=["pub_date"])
self.assertEqual(
[result.pk for result in results["results"]],
[
"1",
"3",
"2",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"23",
],
)
results = self.sb.search("*", sort_by=["-pub_date"])
self.assertEqual(
[result.pk for result in results["results"]],
[
"23",
"22",
"21",
"20",
"19",
"18",
"17",
"16",
"15",
"14",
"13",
"12",
"11",
"10",
"9",
"8",
"7",
"6",
"5",
"4",
"2",
"3",
"1",
],
)
results = self.sb.search("*", sort_by=["id"])
self.assertEqual(
[result.pk for result in results["results"]],
[
"1",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"2",
"20",
"21",
"22",
"23",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
],
)
results = self.sb.search("*", sort_by=["-id"])
self.assertEqual(
[result.pk for result in results["results"]],
[
"9",
"8",
"7",
"6",
"5",
"4",
"3",
"23",
"22",
"21",
"20",
"2",
"19",
"18",
"17",
"16",
"15",
"14",
"13",
"12",
"11",
"10",
"1",
],
)
results = self.sb.search("*", sort_by=["-pub_date", "-id"])
self.assertEqual(
[result.pk for result in results["results"]],
[
"23",
"22",
"21",
"20",
"19",
"18",
"17",
"16",
"15",
"14",
"13",
"12",
"11",
"10",
"9",
"8",
"7",
"6",
"5",
"4",
"2",
"3",
"1",
],
)
self.assertRaises(
SearchBackendError, self.sb.search, "*", sort_by=["-pub_date", "id"]
)
def test__from_python(self):
self.assertEqual(self.sb._from_python("abc"), "abc")
self.assertEqual(self.sb._from_python(1), 1)
self.assertEqual(self.sb._from_python(2653), 2653)
self.assertEqual(self.sb._from_python(25.5), 25.5)
self.assertEqual(self.sb._from_python([1, 2, 3]), "1,2,3")
self.assertTrue("a': 1" in self.sb._from_python({"a": 1, "c": 3, "b": 2}))
self.assertEqual(
self.sb._from_python(datetime(2009, 5, 9, 16, 14)),
datetime(2009, 5, 9, 16, 14),
)
self.assertEqual(
self.sb._from_python(datetime(2009, 5, 9, 0, 0)), datetime(2009, 5, 9, 0, 0)
)
self.assertEqual(
self.sb._from_python(datetime(1899, 5, 18, 0, 0)),
datetime(1899, 5, 18, 0, 0),
)
self.assertEqual(
self.sb._from_python(datetime(2009, 5, 18, 1, 16, 30, 250)),
datetime(2009, 5, 18, 1, 16, 30, 250),
)
def test__to_python(self):
self.assertEqual(self.sb._to_python("abc"), "abc")
self.assertEqual(self.sb._to_python("1"), 1)
self.assertEqual(self.sb._to_python("2653"), 2653)
self.assertEqual(self.sb._to_python("25.5"), 25.5)
self.assertEqual(self.sb._to_python("[1, 2, 3]"), [1, 2, 3])
self.assertEqual(
self.sb._to_python('{"a": 1, "b": 2, "c": 3}'), {"a": 1, "c": 3, "b": 2}
)
self.assertEqual(
self.sb._to_python("2009-05-09T16:14:00"), datetime(2009, 5, 9, 16, 14)
)
self.assertEqual(
self.sb._to_python("2009-05-09T00:00:00"), datetime(2009, 5, 9, 0, 0)
)
self.assertEqual(self.sb._to_python(None), None)
def test_range_queries(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(len(self.whoosh_search("[d TO]")), 23)
self.assertEqual(len(self.whoosh_search("name:[d TO]")), 23)
self.assertEqual(len(self.whoosh_search("Ind* AND name:[d to]")), 23)
self.assertEqual(len(self.whoosh_search("Ind* AND name:[to c]")), 0)
def test_date_queries(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(len(self.whoosh_search("pub_date:20090717003000")), 1)
self.assertEqual(len(self.whoosh_search("pub_date:20090717000000")), 0)
self.assertEqual(
len(self.whoosh_search("Ind* AND pub_date:[to 20090717003000]")), 3
)
def test_escaped_characters_queries(self):
self.sb.update(self.wmmi, self.sample_objs)
self.assertEqual(len(self.whoosh_search("Indexed\!")), 23)
self.assertEqual(len(self.whoosh_search("http\:\/\/www\.example\.com")), 0)
def test_build_schema(self):
ui = UnifiedIndex()
ui.build(indexes=[AllTypesWhooshMockSearchIndex()])
(content_field_name, schema) = self.sb.build_schema(ui.all_searchfields())
self.assertEqual(content_field_name, "text")
schema_names = set(schema.names())
required_schema = {
"django_ct",
"django_id",
"id",
"is_active",
"name",
"pub_date",
"seen_count",
"sites",
"text",
}
self.assertTrue(required_schema.issubset(schema_names))
self.assertIsInstance(schema._fields["text"], TEXT)
self.assertIsInstance(schema._fields["pub_date"], DATETIME)
self.assertIsInstance(schema._fields["seen_count"], NUMERIC)
self.assertIsInstance(schema._fields["sites"], KEYWORD)
self.assertIsInstance(schema._fields["is_active"], BOOLEAN)
def test_verify_type(self):
old_ui = connections["whoosh"].get_unified_index()
ui = UnifiedIndex()
wmtmmi = WhooshMaintainTypeMockSearchIndex()
ui.build(indexes=[wmtmmi])
connections["whoosh"]._index = ui
sb = connections["whoosh"].get_backend()
sb.setup()
sb.update(wmtmmi, self.sample_objs)
self.assertEqual(sb.search("*")["hits"], 23)
self.assertEqual(
[result.month for result in sb.search("*")["results"]],
[
"06",
"07",
"06",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
"07",
],
)
connections["whoosh"]._index = old_ui
@unittest.skipIf(
settings.HAYSTACK_CONNECTIONS["whoosh"].get("STORAGE") != "file",
"testing writability requires Whoosh to use STORAGE=file",
)
def test_writable(self):
if not os.path.exists(settings.HAYSTACK_CONNECTIONS["whoosh"]["PATH"]):
os.makedirs(settings.HAYSTACK_CONNECTIONS["whoosh"]["PATH"])
os.chmod(settings.HAYSTACK_CONNECTIONS["whoosh"]["PATH"], 0o400)
try:
self.sb.setup()
self.fail()
except IOError:
# Yay. We failed
pass
os.chmod(settings.HAYSTACK_CONNECTIONS["whoosh"]["PATH"], 0o755)
def test_slicing(self):
self.sb.update(self.wmmi, self.sample_objs)
page_1 = self.sb.search("*", start_offset=0, end_offset=20)
page_2 = self.sb.search("*", start_offset=20, end_offset=30)
self.assertEqual(len(page_1["results"]), 20)
self.assertEqual(
[result.pk for result in page_1["results"]],
["%s" % i for i in range(1, 21)],
)
self.assertEqual(len(page_2["results"]), 3)
self.assertEqual(
[result.pk for result in page_2["results"]], ["21", "22", "23"]
)
# This used to throw an error.
page_0 = self.sb.search("*", start_offset=0, end_offset=0)
self.assertEqual(len(page_0["results"]), 1)
@unittest.expectedFailure
def test_scoring(self):
self.sb.update(self.wmmi, self.sample_objs)
page_1 = self.sb.search("index", start_offset=0, end_offset=20)
page_2 = self.sb.search("index", start_offset=20, end_offset=30)
self.assertEqual(len(page_1["results"]), 20)
self.assertEqual(
["%0.2f" % result.score for result in page_1["results"]],
[
"0.51",
"0.51",
"0.51",
"0.51",
"0.51",
"0.51",
"0.51",
"0.51",
"0.51",
"0.40",
"0.40",
"0.40",
"0.40",
"0.40",
"0.40",
"0.40",
"0.40",
"0.40",
"0.40",
"0.40",
],
)
self.assertEqual(len(page_2["results"]), 3)
self.assertEqual(
["%0.2f" % result.score for result in page_2["results"]],
["0.40", "0.40", "0.40"],
)
def test_analyzed_fields(self):
self.sb.update(self.wmmi, self.sample_objs)
results = self.whoosh_search("name_analyzed:1234daniel5678")
self.assertEqual(len(results), 23)
| WhooshSearchBackendTestCase |
python | ray-project__ray | python/ray/data/datasource/datasource.py | {
"start": 12685,
"end": 15168
} | class ____(Callable[[], Iterable[Block]]):
"""A function used to read blocks from the :class:`~ray.data.Dataset`.
Read tasks are generated by :meth:`~ray.data.Datasource.get_read_tasks`,
and return a list of ``ray.data.Block`` when called. Initial metadata about the read
operation can be retrieved via the ``metadata`` attribute prior to executing the
read. Final metadata is returned after the read along with the blocks.
Ray will execute read tasks in remote functions to parallelize execution.
Note that the number of blocks returned can vary at runtime. For example,
if a task is reading a single large file it can return multiple blocks to
avoid running out of memory during the read.
The initial metadata should reflect all the blocks returned by the read,
e.g., if the metadata says ``num_rows=1000``, the read can return a single
block of 1000 rows, or multiple blocks with 1000 rows altogether.
The final metadata (returned with the actual block) reflects the exact
contents of the block itself.
"""
def __init__(
self,
read_fn: Callable[[], Iterable[Block]],
metadata: BlockMetadata,
schema: Optional["Schema"] = None,
per_task_row_limit: Optional[int] = None,
):
self._metadata = metadata
self._read_fn = read_fn
self._schema = schema
self._per_task_row_limit = per_task_row_limit
@property
def metadata(self) -> BlockMetadata:
return self._metadata
# TODO(justin): We want to remove schema from `ReadTask` later on
@property
def schema(self) -> Optional["Schema"]:
return self._schema
@property
def read_fn(self) -> Callable[[], Iterable[Block]]:
return self._read_fn
@property
def per_task_row_limit(self) -> Optional[int]:
"""Get the per-task row limit for this read task."""
return self._per_task_row_limit
def __call__(self) -> Iterable[Block]:
result = self._read_fn()
if not hasattr(result, "__iter__"):
DeprecationWarning(
"Read function must return Iterable[Block], got {}. "
"Probably you need to return `[block]` instead of "
"`block`.".format(result)
)
if self._per_task_row_limit is None:
yield from result
return
yield from _iter_sliced_blocks(result, self._per_task_row_limit)
@DeveloperAPI
| ReadTask |
python | celery__celery | celery/utils/imports.py | {
"start": 716,
"end": 5048
} | class ____(Exception):
"""Raised when importing a package, but it's not a package."""
def qualname(obj):
"""Return object name."""
if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):
obj = obj.__class__
q = getattr(obj, '__qualname__', None)
if '.' not in q:
q = '.'.join((obj.__module__, q))
return q
def instantiate(name, *args, **kwargs):
"""Instantiate class by name.
See Also:
:func:`symbol_by_name`.
"""
return symbol_by_name(name)(*args, **kwargs)
@contextmanager
def cwd_in_path():
"""Context adding the current working directory to sys.path."""
try:
cwd = os.getcwd()
except FileNotFoundError:
cwd = None
if not cwd:
yield
elif cwd in sys.path:
yield
else:
sys.path.insert(0, cwd)
try:
yield cwd
finally:
try:
sys.path.remove(cwd)
except ValueError: # pragma: no cover
pass
def find_module(module, path=None, imp=None):
"""Version of :func:`imp.find_module` supporting dots."""
if imp is None:
imp = import_module
with cwd_in_path():
try:
return imp(module)
except ImportError:
# Raise a more specific error if the problem is that one of the
# dot-separated segments of the module name is not a package.
if '.' in module:
parts = module.split('.')
for i, part in enumerate(parts[:-1]):
package = '.'.join(parts[:i + 1])
try:
mpart = imp(package)
except ImportError:
# Break out and re-raise the original ImportError
# instead.
break
try:
mpart.__path__
except AttributeError:
raise NotAPackage(package)
raise
def import_from_cwd(module, imp=None, package=None):
"""Import module, temporarily including modules in the current directory.
Modules located in the current directory has
precedence over modules located in `sys.path`.
"""
if imp is None:
imp = import_module
with cwd_in_path():
return imp(module, package=package)
def reload_from_cwd(module, reloader=None):
"""Reload module (ensuring that CWD is in sys.path)."""
if reloader is None:
reloader = reload
with cwd_in_path():
return reloader(module)
def module_file(module):
"""Return the correct original file name of a module."""
name = module.__file__
return name[:-1] if name.endswith('.pyc') else name
def gen_task_name(app, name, module_name):
"""Generate task name from name/module pair."""
module_name = module_name or '__main__'
try:
module = sys.modules[module_name]
except KeyError:
# Fix for manage.py shell_plus (Issue #366)
module = None
if module is not None:
module_name = module.__name__
# - If the task module is used as the __main__ script
# - we need to rewrite the module part of the task name
# - to match App.main.
if MP_MAIN_FILE and module.__file__ == MP_MAIN_FILE:
# - see comment about :envvar:`MP_MAIN_FILE` above.
module_name = '__main__'
if module_name == '__main__' and app.main:
return '.'.join([app.main, name])
return '.'.join(p for p in (module_name, name) if p)
def load_extension_class_names(namespace):
if sys.version_info >= (3, 10):
_entry_points = entry_points(group=namespace)
else:
try:
_entry_points = entry_points().get(namespace, [])
except AttributeError:
_entry_points = entry_points().select(group=namespace)
for ep in _entry_points:
yield ep.name, ep.value
def load_extension_classes(namespace):
for name, class_name in load_extension_class_names(namespace):
try:
cls = symbol_by_name(class_name)
except (ImportError, SyntaxError) as exc:
warnings.warn(
f'Cannot load {namespace} extension {class_name!r}: {exc!r}')
else:
yield name, cls
| NotAPackage |
python | huggingface__transformers | src/transformers/models/edgetam/modeling_edgetam.py | {
"start": 3459,
"end": 6114
} | class ____(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, height, width, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
fpn_hidden_states (`tuple(torch.FloatTensor)`):
Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape
`(batch_size, hidden_size, height, width)`. Feature maps from the Feature Pyramid Network neck.
fpn_position_encoding (`tuple(torch.FloatTensor)`):
Tuple of `torch.FloatTensor` (one for each feature level, from high to low resolution) of shape
`(batch_size, hidden_size, height, width)`. Positional encodings corresponding to the `fpn_hidden_states`.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each stage) of shape `(batch_size, height, width, hidden_size)`. Hidden-states of the
model at the output of each stage.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
the self-attention heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
fpn_hidden_states: Optional[torch.FloatTensor] = None
fpn_position_encoding: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| EdgeTamVisionEncoderOutput |
python | dask__dask | dask/dataframe/dask_expr/_shuffle.py | {
"start": 40111,
"end": 45530
} | class ____(Blockwise):
_parameters = ["frame", "other", "drop", "new_divisions", "append"]
_defaults = {"append": False, "new_divisions": None, "drop": True}
_keyword_only = ["drop", "new_divisions", "append"]
_is_length_preserving = True
_preserves_partitioning_information = True
@staticmethod
def operation(df, *args, new_divisions, **kwargs):
return df.set_index(*args, **kwargs)
def _divisions(self):
if self.new_divisions is None:
return (None,) * (self.frame.npartitions + 1)
return tuple(self.new_divisions)
def _simplify_up(self, parent, dependents):
if isinstance(parent, Projection):
columns = determine_column_projection(
self,
parent,
dependents,
additional_columns=_convert_to_list(self.other),
)
if self.frame.columns == columns:
return
columns = [col for col in self.frame.columns if col in columns]
return type(parent)(
type(self)(self.frame[columns], *self.operands[1:]),
parent.operand("columns"),
)
divisions_lru = LRU(10) # type: ignore[var-annotated]
def _get_divisions(
frame,
other,
npartitions: int,
ascending: bool = True,
partition_size: float = 128e6,
upsample: float = 1.0,
):
key = (other._name, npartitions, ascending, partition_size, upsample)
if key in divisions_lru:
return divisions_lru[key]
result = _calculate_divisions(
frame, other, npartitions, ascending, partition_size, upsample
)
divisions_lru[key] = result
return result
def _calculate_divisions(
frame,
other,
npartitions: int,
ascending: bool = True,
partition_size: float = 128e6,
upsample: float = 1.0,
):
from dask.dataframe.dask_expr import RepartitionQuantiles, new_collection
if is_index_like(other._meta):
other = ToSeriesIndex(other)
if is_categorical_dtype(other._meta.dtype):
other = new_collection(other).cat.as_ordered()._expr
try:
divisions, mins, maxes = compute(
new_collection(RepartitionQuantiles(other, npartitions, upsample=upsample)),
new_collection(other).map_partitions(M.min),
new_collection(other).map_partitions(M.max),
)
except TypeError as e:
# When there are nulls and a column is non-numeric, a TypeError is sometimes raised as a result of
# 1) computing mins/maxes above, 2) every null being switched to NaN, and 3) NaN being a float.
# Also, Pandas ExtensionDtypes may cause TypeErrors when dealing with special nulls such as pd.NaT or pd.NA.
# If this happens, we hint the user about eliminating nulls beforehand.
if not pd.api.types.is_numeric_dtype(other._meta.dtype):
obj, suggested_method = (
("column", f"`.dropna(subset=['{other.name}'])`")
if any(other._name == frame[c]._name for c in frame.columns)
else ("series", "`.loc[series[~series.isna()]]`")
)
raise NotImplementedError(
f"Divisions calculation failed for non-numeric {obj} '{other.name}'.\n"
f"This is probably due to the presence of nulls, which Dask does not entirely support in the index.\n"
f"We suggest you try with {suggested_method}."
) from e
# For numeric types there shouldn't be problems with nulls, so we raise as-it-is this particular TypeError
else:
raise e
sizes = [] # type: ignore[var-annotated]
empty_dataframe_detected = pd.isna(divisions).all()
if empty_dataframe_detected:
total = sum(sizes)
npartitions = max(math.ceil(total / partition_size), 1)
npartitions = min(npartitions, frame.npartitions)
n = divisions.size
try:
divisions = np.interp(
x=np.linspace(0, n - 1, npartitions + 1),
xp=np.linspace(0, n - 1, n),
fp=divisions.tolist(),
).tolist()
except (TypeError, ValueError): # str type
indexes = np.linspace(0, n - 1, npartitions + 1).astype(int)
divisions = divisions.iloc[indexes].tolist()
else:
# Drop duplicate divisions returned by partition quantiles
n = divisions.size
divisions = (
list(divisions.iloc[: n - 1].unique()) + divisions.iloc[n - 1 :].tolist()
)
mins = mins.bfill()
maxes = maxes.bfill()
if isinstance(other._meta.dtype, pd.CategoricalDtype):
dtype = other._meta.dtype
mins = mins.astype(dtype)
maxes = maxes.astype(dtype)
if mins.isna().any() or maxes.isna().any():
presorted = False
else:
n = mins.size
maxes2 = (maxes.iloc[: n - 1] if ascending else maxes.iloc[1:]).reset_index(
drop=True
)
mins2 = (mins.iloc[1:] if ascending else mins.iloc[: n - 1]).reset_index(
drop=True
)
presorted = (
mins.tolist() == mins.sort_values(ascending=ascending).tolist()
and maxes.tolist() == maxes.sort_values(ascending=ascending).tolist()
and (maxes2 < mins2).all()
)
return divisions, mins.tolist(), maxes.tolist(), presorted
| SetIndexBlockwise |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/ccroot.py | {
"start": 18582,
"end": 19729
} | class ____(Task.Task):
def runnable_status(self):
return Task.SKIP_ME
@extension('.o', '.obj')
def add_those_o_files(self, node):
tsk = self.create_task('fake_o', [], node)
try:
self.compiled_tasks.append(tsk)
except AttributeError:
self.compiled_tasks = [tsk]
@feature('fake_obj')
@before_method('process_source')
def process_objs(self):
for node in self.to_nodes(self.source):
self.add_those_o_files(node)
self.source = []
@conf
def read_object(self, obj):
if not isinstance(obj, self.path.__class__):
obj = self.path.find_resource(obj)
return self(features='fake_obj', source=obj, name=obj.name)
@feature('cxxprogram', 'cprogram')
@after_method('apply_link', 'process_use')
def set_full_paths_hpux(self):
if self.env.DEST_OS != 'hp-ux':
return
base = self.bld.bldnode.abspath()
for var in ['LIBPATH', 'STLIBPATH']:
lst = []
for x in self.env[var]:
if x.startswith('/'):
lst.append(x)
else:
lst.append(os.path.normpath(os.path.join(base, x)))
self.env[var] = lst
| fake_o |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 412152,
"end": 412534
} | class ____:
def test_sf(self):
# During development of gh-18822, we found that the override of
# kappa3.sf could experience overflow where the version in main did
# not. Check that this does not happen in final implementation.
sf0 = 1 - stats.kappa3.cdf(0.5, 1e5)
sf1 = stats.kappa3.sf(0.5, 1e5)
assert_allclose(sf1, sf0)
| TestKappa3 |
python | django__django | tests/invalid_models_tests/test_relative_fields.py | {
"start": 56193,
"end": 61487
} | class ____(SimpleTestCase):
def test_clash_between_accessors(self):
class Model(models.Model):
first_m2m = models.ManyToManyField("self", symmetrical=False)
second_m2m = models.ManyToManyField("self", symmetrical=False)
self.assertEqual(
Model.check(),
[
Error(
"Reverse accessor 'Model.model_set' for "
"'invalid_models_tests.Model.first_m2m' clashes with reverse "
"accessor for 'invalid_models_tests.Model.second_m2m'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'invalid_models_tests.Model.first_m2m' or "
"'invalid_models_tests.Model.second_m2m'."
),
obj=Model._meta.get_field("first_m2m"),
id="fields.E304",
),
Error(
"Reverse accessor 'Model.model_set' for "
"'invalid_models_tests.Model.second_m2m' clashes with reverse "
"accessor for 'invalid_models_tests.Model.first_m2m'.",
hint=(
"Add or change a related_name argument to the definition "
"for 'invalid_models_tests.Model.second_m2m' or "
"'invalid_models_tests.Model.first_m2m'."
),
obj=Model._meta.get_field("second_m2m"),
id="fields.E304",
),
],
)
def test_accessor_clash(self):
class Model(models.Model):
model_set = models.ManyToManyField("self", symmetrical=False)
self.assertEqual(
Model.check(),
[
Error(
"Reverse accessor 'Model.model_set' for "
"'invalid_models_tests.Model.model_set' clashes with field "
"name 'invalid_models_tests.Model.model_set'.",
hint=(
"Rename field 'invalid_models_tests.Model.model_set', or "
"add/change a related_name argument to the definition for "
"field 'invalid_models_tests.Model.model_set'."
),
obj=Model._meta.get_field("model_set"),
id="fields.E302",
),
],
)
def test_reverse_query_name_clash(self):
class Model(models.Model):
model = models.ManyToManyField("self", symmetrical=False)
self.assertEqual(
Model.check(),
[
Error(
"Reverse query name for 'invalid_models_tests.Model.model' "
"clashes with field name 'invalid_models_tests.Model.model'.",
hint=(
"Rename field 'invalid_models_tests.Model.model', or "
"add/change a related_name argument to the definition for "
"field 'invalid_models_tests.Model.model'."
),
obj=Model._meta.get_field("model"),
id="fields.E303",
),
],
)
def test_clash_under_explicit_related_name(self):
class Model(models.Model):
clash = models.IntegerField()
m2m = models.ManyToManyField(
"self", symmetrical=False, related_name="clash"
)
self.assertEqual(
Model.check(),
[
Error(
"Reverse accessor 'Model.clash' for "
"'invalid_models_tests.Model.m2m' clashes with field name "
"'invalid_models_tests.Model.clash'.",
hint=(
"Rename field 'invalid_models_tests.Model.clash', or "
"add/change a related_name argument to the definition for "
"field 'invalid_models_tests.Model.m2m'."
),
obj=Model._meta.get_field("m2m"),
id="fields.E302",
),
Error(
"Reverse query name for 'invalid_models_tests.Model.m2m' "
"clashes with field name 'invalid_models_tests.Model.clash'.",
hint=(
"Rename field 'invalid_models_tests.Model.clash', or "
"add/change a related_name argument to the definition for "
"field 'invalid_models_tests.Model.m2m'."
),
obj=Model._meta.get_field("m2m"),
id="fields.E303",
),
],
)
def test_valid_model(self):
class Model(models.Model):
first = models.ManyToManyField(
"self", symmetrical=False, related_name="first_accessor"
)
second = models.ManyToManyField(
"self", symmetrical=False, related_name="second_accessor"
)
self.assertEqual(Model.check(), [])
@isolate_apps("invalid_models_tests")
| SelfReferentialM2MClashTests |
python | huggingface__transformers | tests/models/glm/test_modeling_glm.py | {
"start": 1255,
"end": 1394
} | class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = GlmModelTester
@slow
@require_torch_large_accelerator
| GlmModelTest |
python | getsentry__sentry | src/sentry/rules/history/endpoints/project_rule_group_history.py | {
"start": 2066,
"end": 3276
} | class ____(RuleEndpoint):
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
@extend_schema(
operation_id="Retrieve a Group Firing History for an Issue Alert",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
IssueAlertParams.ISSUE_RULE_ID,
],
responses={
200: RuleGroupHistorySerializer,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def get(self, request: Request, project: Project, rule: Rule) -> Response:
per_page = self.get_per_page(request)
cursor = self.get_cursor_from_request(request)
try:
start, end = get_date_range_from_params(request.GET)
except InvalidParams:
raise ParseError(detail="Invalid start and end dates")
results = fetch_rule_groups_paginated(rule, start, end, cursor, per_page)
response = Response(serialize(results.results, request.user, RuleGroupHistorySerializer()))
self.add_cursor_headers(request, response, results)
return response
| ProjectRuleGroupHistoryIndexEndpoint |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 45242,
"end": 45722
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("repository_id", "name", "client_mutation_id")
repository_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="repositoryId"
)
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| AcceptTopicSuggestionInput |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/partition_sets.py | {
"start": 3372,
"end": 3521
} | class ____(graphene.ObjectType):
results = non_null_list(GraphenePipelineTag)
class Meta:
name = "PartitionTags"
| GraphenePartitionTags |
python | celery__celery | t/unit/utils/test_local.py | {
"start": 6873,
"end": 8215
} | class ____:
def test_only_evaluated_once(self):
class X:
attr = 123
evals = 0
def __init__(self):
self.__class__.evals += 1
p = PromiseProxy(X)
assert p.attr == 123
assert p.attr == 123
assert X.evals == 1
def test_callbacks(self):
source = Mock(name='source')
p = PromiseProxy(source)
cbA = Mock(name='cbA')
cbB = Mock(name='cbB')
cbC = Mock(name='cbC')
p.__then__(cbA, p)
p.__then__(cbB, p)
assert not p.__evaluated__()
assert object.__getattribute__(p, '__pending__')
assert repr(p)
assert p.__evaluated__()
with pytest.raises(AttributeError):
object.__getattribute__(p, '__pending__')
cbA.assert_called_with(p)
cbB.assert_called_with(p)
assert p.__evaluated__()
p.__then__(cbC, p)
cbC.assert_called_with(p)
with pytest.raises(AttributeError):
object.__getattribute__(p, '__pending__')
def test_maybe_evaluate(self):
x = PromiseProxy(lambda: 30)
assert not x.__evaluated__()
assert maybe_evaluate(x) == 30
assert maybe_evaluate(x) == 30
assert maybe_evaluate(30) == 30
assert x.__evaluated__()
| test_PromiseProxy |
python | networkx__networkx | networkx/classes/coreviews.py | {
"start": 1531,
"end": 2124
} | class ____(AtlasView):
"""An AdjacencyView is a Read-only Map of Maps of Maps.
It is a View into a dict-of-dict-of-dict data structure.
The inner level of dict is read-write. But the
outer levels are read-only.
See Also
========
AtlasView: View into dict-of-dict
MultiAdjacencyView: View into dict-of-dict-of-dict-of-dict
"""
__slots__ = () # Still uses AtlasView slots names _atlas
def __getitem__(self, name):
return AtlasView(self._atlas[name])
def copy(self):
return {n: self[n].copy() for n in self._atlas}
| AdjacencyView |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 12436,
"end": 12591
} | class ____(nodes.Part, nodes.Inline, nodes.FixedTextElement):
"""Node for a single grammar production rule."""
# other directive-level nodes
| production |
python | google__jax | jax/experimental/mosaic/gpu/layout_inference.py | {
"start": 2433,
"end": 2615
} | class ____(enum.IntEnum):
"""The type of a variable.
Variables are operands, results, or arguments of MLIR operations.
"""
OPERAND = 0
RESULT = 1
ARGUMENT = 2
| VariableType |
python | celery__celery | t/unit/worker/test_heartbeat.py | {
"start": 798,
"end": 2384
} | class ____:
def test_start_stop(self):
timer = MockTimer()
eventer = MockDispatcher()
h = Heart(timer, eventer, interval=1)
h.start()
assert h.tref
h.stop()
assert h.tref is None
h.stop()
def test_send_sends_signal(self):
h = Heart(MockTimer(), MockDispatcher(), interval=1)
h._send_sent_signal = None
h._send('worker-heartbeat')
h._send_sent_signal = Mock(name='send_sent_signal')
h._send('worker')
h._send_sent_signal.assert_called_with(sender=h)
def test_start_when_disabled(self):
timer = MockTimer()
eventer = MockDispatcher()
eventer.enabled = False
h = Heart(timer, eventer)
h.start()
assert not h.tref
assert not eventer.sent
def test_stop_when_disabled(self):
timer = MockTimer()
eventer = MockDispatcher()
eventer.enabled = False
h = Heart(timer, eventer)
h.stop()
assert not eventer.sent
def test_message_retries(self):
timer = MockTimer()
eventer = MockDispatcher()
eventer.enabled = True
h = Heart(timer, eventer, interval=1)
h.start()
assert eventer.sent[-1][0] == "worker-online"
# Invoke a heartbeat
h.tref[1](*h.tref[2], **h.tref[3])
assert eventer.sent[-1][0] == "worker-heartbeat"
assert eventer.sent[-1][1]["retry"]
h.stop()
assert eventer.sent[-1][0] == "worker-offline"
assert not eventer.sent[-1][1]["retry"]
| test_Heart |
python | zarr-developers__zarr-python | tests/test_dtype/test_npy/test_complex.py | {
"start": 190,
"end": 463
} | class ____(BaseTestZDType):
def scalar_equals(self, scalar1: object, scalar2: object) -> bool:
if np.isnan(scalar1) and np.isnan(scalar2): # type: ignore[call-overload]
return True
return super().scalar_equals(scalar1, scalar2)
| _BaseTestFloat |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_legend07.py | {
"start": 315,
"end": 1255
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_legend07.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "pie"})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$3",
"values": "=Sheet1!$B$1:$B$3",
}
)
chart.set_legend({"fill": {"color": "yellow"}})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | src/transformers/models/t5/modeling_t5.py | {
"start": 28478,
"end": 36231
} | class ____(T5PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model)
self.is_decoder = config.is_decoder
self.block = nn.ModuleList(
[T5Block(config, has_relative_attention_bias=bool(i == 0), layer_idx=i) for i in range(config.num_layers)]
)
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
# Initialize weights and apply final processing
self.post_init()
self.gradient_checkpointing = False
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
cache_position=None,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(
f"You cannot specify both {err_msg_prefix}input_ids and {err_msg_prefix}inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
err_msg_prefix = "decoder_" if self.is_decoder else ""
raise ValueError(f"You have to specify either {err_msg_prefix}input_ids or {err_msg_prefix}inputs_embeds")
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if inputs_embeds is None:
if self.embed_tokens is None:
raise ValueError("You have to initialize the model with valid token embeddings")
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
if use_cache is True:
if not self.is_decoder:
raise ValueError(f"`use_cache` can only be set to `True` if {self} is used as a decoder")
if self.is_decoder:
if use_cache and past_key_values is None:
if self.config.is_encoder_decoder:
past_key_values = EncoderDecoderCache(
DynamicCache(config=self.config), DynamicCache(config=self.config)
)
else:
past_key_values = DynamicCache(config=self.config)
elif not self.is_decoder:
# do not pass cache object down the line for encoder stack
# it messes indexing later in decoder-stack because cache object is modified in-place
past_key_values = None
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
)
if self.config.is_decoder:
attention_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values.self_attention_cache
if isinstance(past_key_values, EncoderDecoderCache)
else past_key_values,
)
else:
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
)
encoder_extended_attention_mask = None
if self.is_decoder and encoder_hidden_states is not None:
encoder_extended_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions and self.is_decoder) else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for layer_module in self.block:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask,
position_bias,
encoder_hidden_states,
encoder_extended_attention_mask,
encoder_decoder_position_bias, # as a positional argument for gradient checkpointing
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention position bias), (self-attention weights),
# (cross-attention position bias), (cross-attention weights)
position_bias = layer_outputs[1]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[3 if output_attentions else 2]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[2],)
if self.is_decoder:
all_cross_attentions = all_cross_attentions + (layer_outputs[4],)
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
past_key_values,
all_hidden_states,
all_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_attentions,
cross_attentions=all_cross_attentions,
)
@auto_docstring
| T5Stack |
python | PrefectHQ__prefect | tests/server/utilities/test_text_search_parser.py | {
"start": 11699,
"end": 14813
} | class ____:
"""Test edge cases and documented limitations"""
def test_literal_dash_at_start_not_supported(self):
# Searching for literal `-` at start is not supported
# This should be treated as exclusion, not literal dash
result = parse_text_search_query("-")
assert result == TextSearchQuery(include=[], exclude=[], required=[])
def test_special_characters_preserved_in_terms(self):
result = parse_text_search_query("error@domain.com task#123 flow$var")
assert result == TextSearchQuery(
include=["error@domain.com", "task#123", "flow$var"],
exclude=[],
required=[],
)
def test_unicode_characters(self):
result = parse_text_search_query("errör ñame 中文")
assert result == TextSearchQuery(
include=["errör", "ñame", "中文"], exclude=[], required=[]
)
def test_very_long_terms(self):
long_term = "a" * 100
result = parse_text_search_query(f"error {long_term}")
assert result == TextSearchQuery(
include=["error", long_term], exclude=[], required=[]
)
def test_many_terms(self):
# Test parsing many terms efficiently (keep under 200 char limit)
terms = [f"t{i}" for i in range(30)] # t0 t1 t2... fits in 200 chars
query = " ".join(terms)
result = parse_text_search_query(query)
assert result == TextSearchQuery(include=terms, exclude=[], required=[])
def test_alternating_prefixes(self):
result = parse_text_search_query(
"include -exclude +required -exclude2 include2"
)
assert result == TextSearchQuery(
include=["include", "include2"],
exclude=["exclude", "exclude2"],
required=["required"],
)
def test_quoted_phrases_with_prefixes_inside(self):
# Prefixes inside quotes should be literal
result = parse_text_search_query('"error -debug +required"')
assert result == TextSearchQuery(
include=["error -debug +required"], exclude=[], required=[]
)
def test_mixed_quote_styles_not_supported(self):
# Only double quotes have special meaning
result = parse_text_search_query("'single quotes' error")
assert result == TextSearchQuery(
include=["'single", "quotes'", "error"], exclude=[], required=[]
)
def test_backslash_escape_only_for_quotes(self):
# Backslashes only escape quotes, other backslashes are literal
result = parse_text_search_query(r'error\test "quote\"inside"')
assert result == TextSearchQuery(
include=[r"error\test", 'quote"inside'], exclude=[], required=[]
)
def test_prefix_with_quotes_complex(self):
result = parse_text_search_query(
'+"required phrase" -"excluded phrase" "normal phrase"'
)
assert result == TextSearchQuery(
include=["normal phrase"],
exclude=["excluded phrase"],
required=["required phrase"],
)
| TestEdgeCasesAndLimitations |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/sequential.py | {
"start": 403,
"end": 4506
} | class ____(Chain):
"""Chain where the outputs of one chain feed directly into next."""
chains: list[Chain]
input_variables: list[str]
output_variables: list[str]
return_all: bool = False
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@property
def input_keys(self) -> list[str]:
"""Return expected input keys to the chain."""
return self.input_variables
@property
def output_keys(self) -> list[str]:
"""Return output key."""
return self.output_variables
@model_validator(mode="before")
@classmethod
def validate_chains(cls, values: dict) -> Any:
"""Validate that the correct inputs exist for all chains."""
chains = values["chains"]
input_variables = values["input_variables"]
memory_keys = []
if "memory" in values and values["memory"] is not None:
"""Validate that prompt input variables are consistent."""
memory_keys = values["memory"].memory_variables
if set(input_variables).intersection(set(memory_keys)):
overlapping_keys = set(input_variables) & set(memory_keys)
msg = (
f"The input key(s) {''.join(overlapping_keys)} are found "
f"in the Memory keys ({memory_keys}) - please use input and "
f"memory keys that don't overlap."
)
raise ValueError(msg)
known_variables = set(input_variables + memory_keys)
for chain in chains:
missing_vars = set(chain.input_keys).difference(known_variables)
if chain.memory:
missing_vars = missing_vars.difference(chain.memory.memory_variables)
if missing_vars:
msg = (
f"Missing required input keys: {missing_vars}, "
f"only had {known_variables}"
)
raise ValueError(msg)
overlapping_keys = known_variables.intersection(chain.output_keys)
if overlapping_keys:
msg = f"Chain returned keys that already exist: {overlapping_keys}"
raise ValueError(msg)
known_variables |= set(chain.output_keys)
if "output_variables" not in values:
if values.get("return_all", False):
output_keys = known_variables.difference(input_variables)
else:
output_keys = chains[-1].output_keys
values["output_variables"] = output_keys
else:
missing_vars = set(values["output_variables"]).difference(known_variables)
if missing_vars:
msg = f"Expected output variables that were not found: {missing_vars}."
raise ValueError(msg)
return values
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, str]:
known_values = inputs.copy()
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
for _i, chain in enumerate(self.chains):
callbacks = _run_manager.get_child()
outputs = chain(known_values, return_only_outputs=True, callbacks=callbacks)
known_values.update(outputs)
return {k: known_values[k] for k in self.output_variables}
async def _acall(
self,
inputs: dict[str, Any],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
known_values = inputs.copy()
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
callbacks = _run_manager.get_child()
for _i, chain in enumerate(self.chains):
outputs = await chain.acall(
known_values,
return_only_outputs=True,
callbacks=callbacks,
)
known_values.update(outputs)
return {k: known_values[k] for k in self.output_variables}
| SequentialChain |
python | getsentry__sentry | src/sentry/sentry_apps/services/app/model.py | {
"start": 5130,
"end": 5739
} | class ____(RpcModel):
id: str = ""
label: str = ""
action_type: str = ""
enabled: bool = True
@property
def actionType(self) -> str:
return self.action_type
def is_enabled(self) -> bool:
return self.enabled
@classmethod
def from_event(cls, data_interface: SentryAppEventDataInterface) -> "RpcSentryAppEventData":
return RpcSentryAppEventData(
id=data_interface.id,
label=data_interface.label,
action_type=data_interface.actionType,
enabled=data_interface.is_enabled(),
)
| RpcSentryAppEventData |
python | huggingface__transformers | src/transformers/models/mbart/modeling_mbart.py | {
"start": 44960,
"end": 53133
} | class ____(MBartPreTrainedModel, GenerationMixin):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = ["final_logits_bias"]
_tied_weights_keys = {"lm_head.weight": "model.shared.weight"}
def __init__(self, config: MBartConfig):
super().__init__(config)
self.model = MBartModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
def resize_token_embeddings(
self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True
) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[Seq2SeqLMOutput, tuple[torch.FloatTensor]]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are decoder input IDs?](../glossary#decoder-input-ids)
MBart uses a specific language id token as the starting token for `decoder_input_ids` generation that
varies according to source and target language, *e.g.* 25004 for *en_XX*, and 25003 for *de_DE*. If
`past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
`past_key_values`).
For translation and summarization training, `decoder_input_ids` should be provided. If no
`decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
for denoising pre-training following the paper.
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example Translation:
```python
>>> from transformers import AutoTokenizer, MBartForConditionalGeneration
>>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-en-ro")
>>> example_english_phrase = "42 is the answer"
>>> inputs = tokenizer(example_english_phrase, return_tensors="pt")
>>> # Translate
>>> generated_ids = model.generate(**inputs, num_beams=4, max_length=5)
>>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
'42 este răspuns'
```
Mask filling example:
```python
>>> from transformers import AutoTokenizer, MBartForConditionalGeneration
>>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
>>> # de_DE is the language symbol id <LID> for German
>>> TXT = "</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE"
>>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="pt")["input_ids"]
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = logits[0, masked_index].softmax(dim=0)
>>> values, predictions = probs.topk(5)
>>> tokenizer.decode(predictions).split()
['nett', 'sehr', 'ganz', 'nicht', 'so']
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id)
@auto_docstring(
custom_intro="""
MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
tasks.
"""
)
| MBartForConditionalGeneration |
python | encode__django-rest-framework | tests/test_routers.py | {
"start": 26509,
"end": 26640
} | class ____(BasenameTestCase, TestCase):
def setUp(self):
self.router = DefaultRouter()
| TestDuplicateBasenameDefaultRouter |
python | mlflow__mlflow | mlflow/server/auth/entities.py | {
"start": 4753,
"end": 5952
} | class ____:
def __init__(
self,
experiment_id,
scorer_name,
user_id,
permission,
):
self._experiment_id = experiment_id
self._scorer_name = scorer_name
self._user_id = user_id
self._permission = permission
@property
def experiment_id(self):
return self._experiment_id
@property
def scorer_name(self):
return self._scorer_name
@property
def user_id(self):
return self._user_id
@property
def permission(self):
return self._permission
@permission.setter
def permission(self, permission):
self._permission = permission
def to_json(self):
return {
"experiment_id": self.experiment_id,
"scorer_name": self.scorer_name,
"user_id": self.user_id,
"permission": self.permission,
}
@classmethod
def from_json(cls, dictionary):
return cls(
experiment_id=dictionary["experiment_id"],
scorer_name=dictionary["scorer_name"],
user_id=dictionary["user_id"],
permission=dictionary["permission"],
)
| ScorerPermission |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/schema.py | {
"start": 135160,
"end": 138032
} | class ____(DefaultGenerator, ABC):
"""A plain default value on a column.
This could correspond to a constant, a callable function,
or a SQL clause.
:class:`.ColumnDefault` is generated automatically
whenever the ``default``, ``onupdate`` arguments of
:class:`_schema.Column` are used. A :class:`.ColumnDefault`
can be passed positionally as well.
For example, the following::
Column("foo", Integer, default=50)
Is equivalent to::
Column("foo", Integer, ColumnDefault(50))
"""
arg: Any
@overload
def __new__(
cls, arg: Callable[..., Any], for_update: bool = ...
) -> CallableColumnDefault: ...
@overload
def __new__(
cls, arg: ColumnElement[Any], for_update: bool = ...
) -> ColumnElementColumnDefault: ...
# if I return ScalarElementColumnDefault here, which is what's actually
# returned, mypy complains that
# overloads overlap w/ incompatible return types.
@overload
def __new__(cls, arg: object, for_update: bool = ...) -> ColumnDefault: ...
def __new__(
cls, arg: Any = None, for_update: bool = False
) -> ColumnDefault:
"""Construct a new :class:`.ColumnDefault`.
:param arg: argument representing the default value.
May be one of the following:
* a plain non-callable Python value, such as a
string, integer, boolean, or other simple type.
The default value will be used as is each time.
* a SQL expression, that is one which derives from
:class:`_expression.ColumnElement`. The SQL expression will
be rendered into the INSERT or UPDATE statement,
or in the case of a primary key column when
RETURNING is not used may be
pre-executed before an INSERT within a SELECT.
* A Python callable. The function will be invoked for each
new row subject to an INSERT or UPDATE.
The callable must accept exactly
zero or one positional arguments. The one-argument form
will receive an instance of the :class:`.ExecutionContext`,
which provides contextual information as to the current
:class:`_engine.Connection` in use as well as the current
statement and parameters.
"""
if isinstance(arg, FetchedValue):
raise exc.ArgumentError(
"ColumnDefault may not be a server-side default type."
)
elif callable(arg):
cls = CallableColumnDefault
elif isinstance(arg, ClauseElement):
cls = ColumnElementColumnDefault
elif arg is not None:
cls = ScalarElementColumnDefault
return object.__new__(cls)
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.arg!r})"
| ColumnDefault |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/experimental/benchmarks/pdsh.py | {
"start": 1035,
"end": 31158
} | class ____:
"""PDS-H query definitions."""
name: str = "pdsh"
@staticmethod
def q0(run_config: RunConfig) -> pl.LazyFrame:
"""Query 0."""
return pl.LazyFrame()
@staticmethod
def q1(run_config: RunConfig) -> pl.LazyFrame:
"""Query 1."""
lineitem = get_data(run_config.dataset_path, "lineitem", run_config.suffix)
var1 = date(1998, 9, 2)
return (
lineitem.filter(pl.col("l_shipdate") <= var1)
.group_by("l_returnflag", "l_linestatus")
.agg(
pl.sum("l_quantity").alias("sum_qty"),
pl.sum("l_extendedprice").alias("sum_base_price"),
(pl.col("l_extendedprice") * (1.0 - pl.col("l_discount")))
.sum()
.alias("sum_disc_price"),
(
pl.col("l_extendedprice")
* (1.0 - pl.col("l_discount"))
* (1.0 + pl.col("l_tax"))
)
.sum()
.alias("sum_charge"),
pl.mean("l_quantity").alias("avg_qty"),
pl.mean("l_extendedprice").alias("avg_price"),
pl.mean("l_discount").alias("avg_disc"),
pl.len().alias("count_order"),
)
.sort("l_returnflag", "l_linestatus")
)
@staticmethod
def q2(run_config: RunConfig) -> pl.LazyFrame:
"""Query 2."""
nation = get_data(run_config.dataset_path, "nation", run_config.suffix)
part = get_data(run_config.dataset_path, "part", run_config.suffix)
partsupp = get_data(run_config.dataset_path, "partsupp", run_config.suffix)
region = get_data(run_config.dataset_path, "region", run_config.suffix)
supplier = get_data(run_config.dataset_path, "supplier", run_config.suffix)
var1 = 15
var2 = "BRASS"
var3 = "EUROPE"
q1 = (
part.join(partsupp, left_on="p_partkey", right_on="ps_partkey")
.join(supplier, left_on="ps_suppkey", right_on="s_suppkey")
.join(nation, left_on="s_nationkey", right_on="n_nationkey")
.join(region, left_on="n_regionkey", right_on="r_regionkey")
.filter(pl.col("p_size") == var1)
.filter(pl.col("p_type").str.ends_with(var2))
.filter(pl.col("r_name") == var3)
)
return (
q1.group_by("p_partkey")
.agg(pl.min("ps_supplycost"))
.join(q1, on=["p_partkey", "ps_supplycost"])
.select(
"s_acctbal",
"s_name",
"n_name",
"p_partkey",
"p_mfgr",
"s_address",
"s_phone",
"s_comment",
)
.sort(
by=["s_acctbal", "n_name", "s_name", "p_partkey"],
descending=[True, False, False, False],
)
.head(100)
)
@staticmethod
def q3(run_config: RunConfig) -> pl.LazyFrame:
"""Query 3."""
customer = get_data(run_config.dataset_path, "customer", run_config.suffix)
lineitem = get_data(run_config.dataset_path, "lineitem", run_config.suffix)
orders = get_data(run_config.dataset_path, "orders", run_config.suffix)
var1 = "BUILDING"
var2 = date(1995, 3, 15)
return (
customer.filter(pl.col("c_mktsegment") == var1)
.join(orders, left_on="c_custkey", right_on="o_custkey")
.join(lineitem, left_on="o_orderkey", right_on="l_orderkey")
.filter(pl.col("o_orderdate") < var2)
.filter(pl.col("l_shipdate") > var2)
.with_columns(
(pl.col("l_extendedprice") * (1 - pl.col("l_discount"))).alias(
"revenue"
)
)
.group_by("o_orderkey", "o_orderdate", "o_shippriority")
.agg(pl.sum("revenue"))
.select(
pl.col("o_orderkey").alias("l_orderkey"),
"revenue",
"o_orderdate",
"o_shippriority",
)
.sort(by=["revenue", "o_orderdate"], descending=[True, False])
.head(10)
)
@staticmethod
def q4(run_config: RunConfig) -> pl.LazyFrame:
"""Query 4."""
lineitem = get_data(run_config.dataset_path, "lineitem", run_config.suffix)
orders = get_data(run_config.dataset_path, "orders", run_config.suffix)
var1 = date(1993, 7, 1)
var2 = date(1993, 10, 1)
return (
# SQL exists translates to semi join in Polars API
orders.join(
(lineitem.filter(pl.col("l_commitdate") < pl.col("l_receiptdate"))),
left_on="o_orderkey",
right_on="l_orderkey",
how="semi",
)
.filter(pl.col("o_orderdate").is_between(var1, var2, closed="left"))
.group_by("o_orderpriority")
.agg(pl.len().alias("order_count"))
.sort("o_orderpriority")
)
@staticmethod
def q5(run_config: RunConfig) -> pl.LazyFrame:
"""Query 5."""
path = run_config.dataset_path
suffix = run_config.suffix
customer = get_data(path, "customer", suffix)
lineitem = get_data(path, "lineitem", suffix)
nation = get_data(path, "nation", suffix)
orders = get_data(path, "orders", suffix)
region = get_data(path, "region", suffix)
supplier = get_data(path, "supplier", suffix)
var1 = "ASIA"
var2 = date(1994, 1, 1)
var3 = date(1995, 1, 1)
return (
region.join(nation, left_on="r_regionkey", right_on="n_regionkey")
.join(customer, left_on="n_nationkey", right_on="c_nationkey")
.join(orders, left_on="c_custkey", right_on="o_custkey")
.join(lineitem, left_on="o_orderkey", right_on="l_orderkey")
.join(
supplier,
left_on=["l_suppkey", "n_nationkey"],
right_on=["s_suppkey", "s_nationkey"],
)
.filter(pl.col("r_name") == var1)
.filter(pl.col("o_orderdate").is_between(var2, var3, closed="left"))
.with_columns(
(pl.col("l_extendedprice") * (1 - pl.col("l_discount"))).alias(
"revenue"
)
)
.group_by("n_name")
.agg(pl.sum("revenue"))
.sort(by="revenue", descending=True)
)
@staticmethod
def q6(run_config: RunConfig) -> pl.LazyFrame:
"""Query 6."""
path = run_config.dataset_path
suffix = run_config.suffix
lineitem = get_data(path, "lineitem", suffix)
var1 = date(1994, 1, 1)
var2 = date(1995, 1, 1)
var3 = 0.05
var4 = 0.07
var5 = 24
return (
lineitem.filter(pl.col("l_shipdate").is_between(var1, var2, closed="left"))
.filter(pl.col("l_discount").is_between(var3, var4))
.filter(pl.col("l_quantity") < var5)
.with_columns(
(pl.col("l_extendedprice") * pl.col("l_discount")).alias("revenue")
)
.select(pl.sum("revenue"))
)
@staticmethod
def q7(run_config: RunConfig) -> pl.LazyFrame:
"""Query 7."""
customer = get_data(run_config.dataset_path, "customer", run_config.suffix)
lineitem = get_data(run_config.dataset_path, "lineitem", run_config.suffix)
nation = get_data(run_config.dataset_path, "nation", run_config.suffix)
orders = get_data(run_config.dataset_path, "orders", run_config.suffix)
supplier = get_data(run_config.dataset_path, "supplier", run_config.suffix)
var1 = "FRANCE"
var2 = "GERMANY"
var3 = date(1995, 1, 1)
var4 = date(1996, 12, 31)
n1 = nation.filter(pl.col("n_name") == var1)
n2 = nation.filter(pl.col("n_name") == var2)
q1 = (
customer.join(n1, left_on="c_nationkey", right_on="n_nationkey")
.join(orders, left_on="c_custkey", right_on="o_custkey")
.rename({"n_name": "cust_nation"})
.join(lineitem, left_on="o_orderkey", right_on="l_orderkey")
.join(supplier, left_on="l_suppkey", right_on="s_suppkey")
.join(n2, left_on="s_nationkey", right_on="n_nationkey")
.rename({"n_name": "supp_nation"})
)
q2 = (
customer.join(n2, left_on="c_nationkey", right_on="n_nationkey")
.join(orders, left_on="c_custkey", right_on="o_custkey")
.rename({"n_name": "cust_nation"})
.join(lineitem, left_on="o_orderkey", right_on="l_orderkey")
.join(supplier, left_on="l_suppkey", right_on="s_suppkey")
.join(n1, left_on="s_nationkey", right_on="n_nationkey")
.rename({"n_name": "supp_nation"})
)
return (
pl.concat([q1, q2])
.filter(pl.col("l_shipdate").is_between(var3, var4))
.with_columns(
(pl.col("l_extendedprice") * (1 - pl.col("l_discount"))).alias(
"volume"
),
pl.col("l_shipdate").dt.year().alias("l_year"),
)
.group_by("supp_nation", "cust_nation", "l_year")
.agg(pl.sum("volume").alias("revenue"))
.sort(by=["supp_nation", "cust_nation", "l_year"])
)
@staticmethod
def q8(run_config: RunConfig) -> pl.LazyFrame:
"""Query 8."""
customer = get_data(run_config.dataset_path, "customer", run_config.suffix)
lineitem = get_data(run_config.dataset_path, "lineitem", run_config.suffix)
nation = get_data(run_config.dataset_path, "nation", run_config.suffix)
orders = get_data(run_config.dataset_path, "orders", run_config.suffix)
part = get_data(run_config.dataset_path, "part", run_config.suffix)
region = get_data(run_config.dataset_path, "region", run_config.suffix)
supplier = get_data(run_config.dataset_path, "supplier", run_config.suffix)
var1 = "BRAZIL"
var2 = "AMERICA"
var3 = "ECONOMY ANODIZED STEEL"
var4 = date(1995, 1, 1)
var5 = date(1996, 12, 31)
n1 = nation.select("n_nationkey", "n_regionkey")
n2 = nation.select("n_nationkey", "n_name")
return (
part.join(lineitem, left_on="p_partkey", right_on="l_partkey")
.join(supplier, left_on="l_suppkey", right_on="s_suppkey")
.join(orders, left_on="l_orderkey", right_on="o_orderkey")
.join(customer, left_on="o_custkey", right_on="c_custkey")
.join(n1, left_on="c_nationkey", right_on="n_nationkey")
.join(region, left_on="n_regionkey", right_on="r_regionkey")
.filter(pl.col("r_name") == var2)
.join(n2, left_on="s_nationkey", right_on="n_nationkey")
.filter(pl.col("o_orderdate").is_between(var4, var5))
.filter(pl.col("p_type") == var3)
.select(
pl.col("o_orderdate").dt.year().alias("o_year"),
(pl.col("l_extendedprice") * (1 - pl.col("l_discount"))).alias(
"volume"
),
pl.col("n_name").alias("nation"),
)
.with_columns(
pl.when(pl.col("nation") == var1)
.then(pl.col("volume"))
.otherwise(0)
.alias("_tmp")
)
.group_by("o_year")
.agg((pl.sum("_tmp") / pl.sum("volume")).round(2).alias("mkt_share"))
.sort("o_year")
)
@staticmethod
def q9(run_config: RunConfig) -> pl.LazyFrame:
"""Query 9."""
path = run_config.dataset_path
suffix = run_config.suffix
lineitem = get_data(path, "lineitem", suffix)
nation = get_data(path, "nation", suffix)
orders = get_data(path, "orders", suffix)
part = get_data(path, "part", suffix)
partsupp = get_data(path, "partsupp", suffix)
supplier = get_data(path, "supplier", suffix)
return (
part.join(partsupp, left_on="p_partkey", right_on="ps_partkey")
.join(supplier, left_on="ps_suppkey", right_on="s_suppkey")
.join(
lineitem,
left_on=["p_partkey", "ps_suppkey"],
right_on=["l_partkey", "l_suppkey"],
)
.join(orders, left_on="l_orderkey", right_on="o_orderkey")
.join(nation, left_on="s_nationkey", right_on="n_nationkey")
.filter(pl.col("p_name").str.contains("green"))
.select(
pl.col("n_name").alias("nation"),
pl.col("o_orderdate").dt.year().alias("o_year"),
(
pl.col("l_extendedprice") * (1 - pl.col("l_discount"))
- pl.col("ps_supplycost") * pl.col("l_quantity")
).alias("amount"),
)
.group_by("nation", "o_year")
.agg(pl.sum("amount").round(2).alias("sum_profit"))
.sort(by=["nation", "o_year"], descending=[False, True])
)
@staticmethod
def q10(run_config: RunConfig) -> pl.LazyFrame:
"""Query 10."""
path = run_config.dataset_path
suffix = run_config.suffix
customer = get_data(path, "customer", suffix)
lineitem = get_data(path, "lineitem", suffix)
nation = get_data(path, "nation", suffix)
orders = get_data(path, "orders", suffix)
var1 = date(1993, 10, 1)
var2 = date(1994, 1, 1)
return (
customer.join(orders, left_on="c_custkey", right_on="o_custkey")
.join(lineitem, left_on="o_orderkey", right_on="l_orderkey")
.join(nation, left_on="c_nationkey", right_on="n_nationkey")
.filter(pl.col("o_orderdate").is_between(var1, var2, closed="left"))
.filter(pl.col("l_returnflag") == "R")
.group_by(
"c_custkey",
"c_name",
"c_acctbal",
"c_phone",
"n_name",
"c_address",
"c_comment",
)
.agg(
(pl.col("l_extendedprice") * (1 - pl.col("l_discount")))
.sum()
.round(2)
.alias("revenue")
)
.select(
"c_custkey",
"c_name",
"revenue",
"c_acctbal",
"n_name",
"c_address",
"c_phone",
"c_comment",
)
.sort(by="revenue", descending=True)
.head(20)
)
@staticmethod
def q11(run_config: RunConfig) -> pl.LazyFrame:
"""Query 11."""
nation = get_data(run_config.dataset_path, "nation", run_config.suffix)
partsupp = get_data(run_config.dataset_path, "partsupp", run_config.suffix)
supplier = get_data(run_config.dataset_path, "supplier", run_config.suffix)
var1 = "GERMANY"
var2 = 0.0001 / run_config.scale_factor
q1 = (
partsupp.join(supplier, left_on="ps_suppkey", right_on="s_suppkey")
.join(nation, left_on="s_nationkey", right_on="n_nationkey")
.filter(pl.col("n_name") == var1)
)
q2 = q1.select(
(pl.col("ps_supplycost") * pl.col("ps_availqty"))
.sum()
.round(2)
.alias("tmp")
* var2
)
return (
q1.group_by("ps_partkey")
.agg(
(pl.col("ps_supplycost") * pl.col("ps_availqty"))
.sum()
.round(2)
.alias("value")
)
.join(q2, how="cross")
.filter(pl.col("value") > pl.col("tmp"))
.select("ps_partkey", "value")
.sort("value", descending=True)
)
@staticmethod
def q12(run_config: RunConfig) -> pl.LazyFrame:
"""Query 12."""
lineitem = get_data(run_config.dataset_path, "lineitem", run_config.suffix)
orders = get_data(run_config.dataset_path, "orders", run_config.suffix)
var1 = "MAIL"
var2 = "SHIP"
var3 = date(1994, 1, 1)
var4 = date(1995, 1, 1)
return (
orders.join(lineitem, left_on="o_orderkey", right_on="l_orderkey")
.filter(pl.col("l_shipmode").is_in([var1, var2]))
.filter(pl.col("l_commitdate") < pl.col("l_receiptdate"))
.filter(pl.col("l_shipdate") < pl.col("l_commitdate"))
.filter(pl.col("l_receiptdate").is_between(var3, var4, closed="left"))
.with_columns(
pl.when(pl.col("o_orderpriority").is_in(["1-URGENT", "2-HIGH"]))
.then(1)
.otherwise(0)
.alias("high_line_count"),
pl.when(pl.col("o_orderpriority").is_in(["1-URGENT", "2-HIGH"]).not_())
.then(1)
.otherwise(0)
.alias("low_line_count"),
)
.group_by("l_shipmode")
.agg(pl.col("high_line_count").sum(), pl.col("low_line_count").sum())
.sort("l_shipmode")
)
@staticmethod
def q13(run_config: RunConfig) -> pl.LazyFrame:
"""Query 13."""
customer = get_data(run_config.dataset_path, "customer", run_config.suffix)
orders = get_data(run_config.dataset_path, "orders", run_config.suffix)
var1 = "special"
var2 = "requests"
orders = orders.filter(
pl.col("o_comment").str.contains(f"{var1}.*{var2}").not_()
)
return (
customer.join(orders, left_on="c_custkey", right_on="o_custkey", how="left")
.group_by("c_custkey")
.agg(pl.col("o_orderkey").count().alias("c_count"))
.group_by("c_count")
.len()
.select(pl.col("c_count"), pl.col("len").alias("custdist"))
.sort(by=["custdist", "c_count"], descending=[True, True])
)
@staticmethod
def q14(run_config: RunConfig) -> pl.LazyFrame:
"""Query 14."""
lineitem = get_data(run_config.dataset_path, "lineitem", run_config.suffix)
part = get_data(run_config.dataset_path, "part", run_config.suffix)
var1 = date(1995, 9, 1)
var2 = date(1995, 10, 1)
return (
lineitem.join(part, left_on="l_partkey", right_on="p_partkey")
.filter(pl.col("l_shipdate").is_between(var1, var2, closed="left"))
.select(
(
100.00
* pl.when(pl.col("p_type").str.contains("PROMO*"))
.then(pl.col("l_extendedprice") * (1 - pl.col("l_discount")))
.otherwise(0)
.sum()
/ (pl.col("l_extendedprice") * (1 - pl.col("l_discount"))).sum()
)
.round(2)
.alias("promo_revenue")
)
)
@staticmethod
def q15(run_config: RunConfig) -> pl.LazyFrame:
"""Query 15."""
lineitem = get_data(run_config.dataset_path, "lineitem", run_config.suffix)
supplier = get_data(run_config.dataset_path, "supplier", run_config.suffix)
var1 = date(1996, 1, 1)
var2 = date(1996, 4, 1)
revenue = (
lineitem.filter(pl.col("l_shipdate").is_between(var1, var2, closed="left"))
.group_by("l_suppkey")
.agg(
(pl.col("l_extendedprice") * (1 - pl.col("l_discount")))
.sum()
.alias("total_revenue")
)
.select(pl.col("l_suppkey").alias("supplier_no"), pl.col("total_revenue"))
)
return (
supplier.join(revenue, left_on="s_suppkey", right_on="supplier_no")
.filter(pl.col("total_revenue") == pl.col("total_revenue").max())
.with_columns(pl.col("total_revenue").round(2))
.select("s_suppkey", "s_name", "s_address", "s_phone", "total_revenue")
.sort("s_suppkey")
)
@staticmethod
def q16(run_config: RunConfig) -> pl.LazyFrame:
"""Query 16."""
part = get_data(run_config.dataset_path, "part", run_config.suffix)
partsupp = get_data(run_config.dataset_path, "partsupp", run_config.suffix)
supplier = get_data(run_config.dataset_path, "supplier", run_config.suffix)
var1 = "Brand#45"
supplier = supplier.filter(
pl.col("s_comment").str.contains(".*Customer.*Complaints.*")
).select(pl.col("s_suppkey"), pl.col("s_suppkey").alias("ps_suppkey"))
return (
part.join(partsupp, left_on="p_partkey", right_on="ps_partkey")
.filter(pl.col("p_brand") != var1)
.filter(pl.col("p_type").str.contains("MEDIUM POLISHED*").not_())
.filter(pl.col("p_size").is_in([49, 14, 23, 45, 19, 3, 36, 9]))
.join(supplier, left_on="ps_suppkey", right_on="s_suppkey", how="left")
.filter(pl.col("ps_suppkey_right").is_null())
.group_by("p_brand", "p_type", "p_size")
.agg(pl.col("ps_suppkey").n_unique().alias("supplier_cnt"))
.sort(
by=["supplier_cnt", "p_brand", "p_type", "p_size"],
descending=[True, False, False, False],
)
)
@staticmethod
def q17(run_config: RunConfig) -> pl.LazyFrame:
"""Query 17."""
lineitem = get_data(run_config.dataset_path, "lineitem", run_config.suffix)
part = get_data(run_config.dataset_path, "part", run_config.suffix)
var1 = "Brand#23"
var2 = "MED BOX"
q1 = (
part.filter(pl.col("p_brand") == var1)
.filter(pl.col("p_container") == var2)
.join(lineitem, how="inner", left_on="p_partkey", right_on="l_partkey")
)
return (
q1.group_by("p_partkey")
.agg((0.2 * pl.col("l_quantity").mean()).alias("avg_quantity"))
.select(pl.col("p_partkey").alias("key"), pl.col("avg_quantity"))
.join(q1, left_on="key", right_on="p_partkey")
.filter(pl.col("l_quantity") < pl.col("avg_quantity"))
.select(
(pl.col("l_extendedprice").sum() / 7.0).round(2).alias("avg_yearly")
)
)
@staticmethod
def q18(run_config: RunConfig) -> pl.LazyFrame:
"""Query 18."""
path = run_config.dataset_path
suffix = run_config.suffix
customer = get_data(path, "customer", suffix)
lineitem = get_data(path, "lineitem", suffix)
orders = get_data(path, "orders", suffix)
var1 = 300
q1 = (
lineitem.group_by("l_orderkey")
.agg(pl.col("l_quantity").sum().alias("sum_quantity"))
.filter(pl.col("sum_quantity") > var1)
)
return (
orders.join(q1, left_on="o_orderkey", right_on="l_orderkey", how="semi")
.join(lineitem, left_on="o_orderkey", right_on="l_orderkey")
.join(customer, left_on="o_custkey", right_on="c_custkey")
.group_by(
"c_name", "o_custkey", "o_orderkey", "o_orderdate", "o_totalprice"
)
.agg(pl.col("l_quantity").sum().alias("col6"))
.select(
pl.col("c_name"),
pl.col("o_custkey").alias("c_custkey"),
pl.col("o_orderkey"),
pl.col("o_orderdate").alias("o_orderdat"),
pl.col("o_totalprice"),
pl.col("col6"),
)
.sort(by=["o_totalprice", "o_orderdat"], descending=[True, False])
.head(100)
)
@staticmethod
def q19(run_config: RunConfig) -> pl.LazyFrame:
"""Query 19."""
lineitem = get_data(run_config.dataset_path, "lineitem", run_config.suffix)
part = get_data(run_config.dataset_path, "part", run_config.suffix)
return (
part.join(lineitem, left_on="p_partkey", right_on="l_partkey")
.filter(pl.col("l_shipmode").is_in(["AIR", "AIR REG"]))
.filter(pl.col("l_shipinstruct") == "DELIVER IN PERSON")
.filter(
(
(pl.col("p_brand") == "Brand#12")
& pl.col("p_container").is_in(
["SM CASE", "SM BOX", "SM PACK", "SM PKG"]
)
& (pl.col("l_quantity").is_between(1, 11))
& (pl.col("p_size").is_between(1, 5))
)
| (
(pl.col("p_brand") == "Brand#23")
& pl.col("p_container").is_in(
["MED BAG", "MED BOX", "MED PKG", "MED PACK"]
)
& (pl.col("l_quantity").is_between(10, 20))
& (pl.col("p_size").is_between(1, 10))
)
| (
(pl.col("p_brand") == "Brand#34")
& pl.col("p_container").is_in(
["LG CASE", "LG BOX", "LG PACK", "LG PKG"]
)
& (pl.col("l_quantity").is_between(20, 30))
& (pl.col("p_size").is_between(1, 15))
)
)
.select(
(pl.col("l_extendedprice") * (1 - pl.col("l_discount")))
.sum()
.round(2)
.alias("revenue")
)
)
@staticmethod
def q20(run_config: RunConfig) -> pl.LazyFrame:
"""Query 20."""
lineitem = get_data(run_config.dataset_path, "lineitem", run_config.suffix)
nation = get_data(run_config.dataset_path, "nation", run_config.suffix)
part = get_data(run_config.dataset_path, "part", run_config.suffix)
partsupp = get_data(run_config.dataset_path, "partsupp", run_config.suffix)
supplier = get_data(run_config.dataset_path, "supplier", run_config.suffix)
var1 = date(1994, 1, 1)
var2 = date(1995, 1, 1)
var3 = "CANADA"
var4 = "forest"
q1 = (
lineitem.filter(pl.col("l_shipdate").is_between(var1, var2, closed="left"))
.group_by("l_partkey", "l_suppkey")
.agg((pl.col("l_quantity").sum() * 0.5).alias("sum_quantity"))
)
q2 = nation.filter(pl.col("n_name") == var3)
q3 = supplier.join(q2, left_on="s_nationkey", right_on="n_nationkey")
return (
part.filter(pl.col("p_name").str.starts_with(var4))
.select(pl.col("p_partkey").unique())
.join(partsupp, left_on="p_partkey", right_on="ps_partkey")
.join(
q1,
left_on=["ps_suppkey", "p_partkey"],
right_on=["l_suppkey", "l_partkey"],
)
.filter(pl.col("ps_availqty") > pl.col("sum_quantity"))
.select(pl.col("ps_suppkey").unique())
.join(q3, left_on="ps_suppkey", right_on="s_suppkey")
.select("s_name", "s_address")
.sort("s_name")
)
@staticmethod
def q21(run_config: RunConfig) -> pl.LazyFrame:
"""Query 21."""
lineitem = get_data(run_config.dataset_path, "lineitem", run_config.suffix)
nation = get_data(run_config.dataset_path, "nation", run_config.suffix)
orders = get_data(run_config.dataset_path, "orders", run_config.suffix)
supplier = get_data(run_config.dataset_path, "supplier", run_config.suffix)
var1 = "SAUDI ARABIA"
q1 = (
lineitem.group_by("l_orderkey")
.agg(pl.col("l_suppkey").len().alias("n_supp_by_order"))
.filter(pl.col("n_supp_by_order") > 1)
.join(
lineitem.filter(pl.col("l_receiptdate") > pl.col("l_commitdate")),
on="l_orderkey",
)
)
return (
q1.group_by("l_orderkey")
.agg(pl.col("l_suppkey").len().alias("n_supp_by_order"))
.join(q1, on="l_orderkey")
.join(supplier, left_on="l_suppkey", right_on="s_suppkey")
.join(nation, left_on="s_nationkey", right_on="n_nationkey")
.join(orders, left_on="l_orderkey", right_on="o_orderkey")
.filter(pl.col("n_supp_by_order") == 1)
.filter(pl.col("n_name") == var1)
.filter(pl.col("o_orderstatus") == "F")
.group_by("s_name")
.agg(pl.len().alias("numwait"))
.sort(by=["numwait", "s_name"], descending=[True, False])
.head(100)
)
@staticmethod
def q22(run_config: RunConfig) -> pl.LazyFrame:
"""Query 22."""
customer = get_data(run_config.dataset_path, "customer", run_config.suffix)
orders = get_data(run_config.dataset_path, "orders", run_config.suffix)
q1 = (
customer.with_columns(pl.col("c_phone").str.slice(0, 2).alias("cntrycode"))
.filter(pl.col("cntrycode").str.contains("13|31|23|29|30|18|17"))
.select("c_acctbal", "c_custkey", "cntrycode")
)
q2 = q1.filter(pl.col("c_acctbal") > 0.0).select(
pl.col("c_acctbal").mean().alias("avg_acctbal")
)
q3 = orders.select(pl.col("o_custkey").unique()).with_columns(
pl.col("o_custkey").alias("c_custkey")
)
return (
q1.join(q3, on="c_custkey", how="left")
.filter(pl.col("o_custkey").is_null())
.join(q2, how="cross")
.filter(pl.col("c_acctbal") > pl.col("avg_acctbal"))
.group_by("cntrycode")
.agg(
pl.col("c_acctbal").count().alias("numcust"),
pl.col("c_acctbal").sum().round(2).alias("totacctbal"),
)
.sort("cntrycode")
)
| PDSHQueries |
python | getsentry__sentry | src/sentry/uptime/endpoints/serializers.py | {
"start": 1729,
"end": 2025
} | class ____(UptimeSubscriptionSerializerResponse):
id: str
projectSlug: str
environment: str | None
name: str
status: str
uptimeStatus: int
mode: int
owner: ActorSerializerResponse
recoveryThreshold: int
downtimeThreshold: int
| UptimeDetectorSerializerResponse |
python | sqlalchemy__sqlalchemy | test/orm/test_subquery_relations.py | {
"start": 1796,
"end": 46884
} | class ____(_fixtures.FixtureTest, testing.AssertsCompiledSQL):
run_inserts = "once"
run_deletes = None
def test_basic(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
order_by=Address.id,
)
},
)
sess = fixture_session()
q = sess.query(User).options(subqueryload(User.addresses))
def go():
eq_(
[
User(
id=7,
addresses=[
Address(id=1, email_address="jack@bean.com")
],
)
],
q.filter(User.id == 7).all(),
)
self.assert_sql_count(testing.db, go, 2)
def go():
eq_(self.static.user_address_result, q.order_by(User.id).all())
self.assert_sql_count(testing.db, go, 2)
@testing.combinations(True, False)
def test_from_statement(self, legacy):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
order_by=Address.id,
)
},
)
sess = fixture_session()
stmt = select(User).where(User.id == 7)
with self.sql_execution_asserter(testing.db) as asserter:
if legacy:
ret = (
sess.query(User)
# .where(User.id == 7)
.from_statement(stmt)
.options(subqueryload(User.addresses))
.all()
)
else:
ret = sess.scalars(
select(User)
.from_statement(stmt)
.options(subqueryload(User.addresses))
).all()
eq_(self.static.user_address_result[0:1], ret)
asserter.assert_(
Or(
CompiledSQL(
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id = :id_1",
[{"id_1": 7}],
),
CompiledSQL(
"SELECT users.id, users.name "
"FROM users WHERE users.id = :id_1",
[{"id_1": 7}],
),
),
# issue 7505
# subqueryload degrades for a from_statement. this is a lazyload
CompiledSQL(
"SELECT addresses.id, addresses.user_id, "
"addresses.email_address "
"FROM addresses "
"WHERE :param_1 = addresses.user_id ORDER BY addresses.id",
[{"param_1": 7}],
),
)
def test_params_arent_cached(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="subquery",
order_by=Address.id,
)
},
)
query_cache = {}
sess = fixture_session()
u1 = (
sess.query(User)
.execution_options(compiled_cache=query_cache)
.filter(User.id == 7)
.one()
)
u2 = (
sess.query(User)
.execution_options(compiled_cache=query_cache)
.filter(User.id == 8)
.one()
)
eq_(len(u1.addresses), 1)
eq_(len(u2.addresses), 3)
def user_dingaling_fixture(self):
users, Dingaling, User, dingalings, Address, addresses = (
self.tables.users,
self.classes.Dingaling,
self.classes.User,
self.tables.dingalings,
self.classes.Address,
self.tables.addresses,
)
self.mapper_registry.map_imperatively(Dingaling, dingalings)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"dingalings": relationship(Dingaling, order_by=Dingaling.id)
},
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(Address, order_by=Address.id)
},
)
return User, Dingaling, Address
def test_from_aliased_w_cache_one(self):
User, Dingaling, Address = self.user_dingaling_fixture()
for i in range(3):
sess = fixture_session()
u = aliased(User)
q = sess.query(u).options(subqueryload(u.addresses))
def go():
eq_(
[
User(
id=7,
addresses=[
Address(id=1, email_address="jack@bean.com")
],
)
],
q.filter(u.id == 7).all(),
)
self.assert_sql_count(testing.db, go, 2)
def test_from_aliased_w_cache_two(self):
User, Dingaling, Address = self.user_dingaling_fixture()
for i in range(3):
sess = fixture_session()
u = aliased(User)
q = sess.query(u).options(subqueryload(u.addresses))
def go():
eq_(self.static.user_address_result, q.order_by(u.id).all())
self.assert_sql_count(testing.db, go, 2)
def test_from_aliased_w_cache_three(self):
User, Dingaling, Address = self.user_dingaling_fixture()
for i in range(3):
sess = fixture_session()
u = aliased(User)
q = sess.query(u).options(
subqueryload(u.addresses).subqueryload(Address.dingalings)
)
def go():
eq_(
[
User(
id=8,
addresses=[
Address(
id=2,
email_address="ed@wood.com",
dingalings=[Dingaling()],
),
Address(
id=3, email_address="ed@bettyboop.com"
),
Address(id=4, email_address="ed@lala.com"),
],
),
User(
id=9,
addresses=[
Address(id=5, dingalings=[Dingaling()])
],
),
],
q.filter(u.id.in_([8, 9])).all(),
)
self.assert_sql_count(testing.db, go, 3)
@testing.combinations((True,), (False,), argnames="use_alias")
@testing.combinations((1,), (2,), argnames="levels")
def test_multilevel_sub_options(self, use_alias, levels):
User, Dingaling, Address = self.user_dingaling_fixture()
s = fixture_session()
def go():
if use_alias:
u = aliased(User)
else:
u = User
q = s.query(u)
if levels == 1:
q = q.options(
subqueryload(u.addresses).options(
defer(Address.email_address)
)
).order_by(u.id)
eq_(
[
address.email_address
for user in q
for address in user.addresses
],
[
"jack@bean.com",
"ed@wood.com",
"ed@bettyboop.com",
"ed@lala.com",
"fred@fred.com",
],
)
else:
q = q.options(
joinedload(u.addresses)
.subqueryload(Address.dingalings)
.options(defer(Dingaling.data))
).order_by(u.id)
eq_(
[
ding.data
for user in q
for address in user.addresses
for ding in address.dingalings
],
["ding 1/2", "ding 2/5"],
)
for i in range(2):
if levels == 1:
# address.email_address
self.assert_sql_count(testing.db, go, 7)
else:
# dingaling.data
self.assert_sql_count(testing.db, go, 4)
s.close()
def test_from_get(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
order_by=Address.id,
)
},
)
sess = fixture_session()
def go():
eq_(
User(
id=7,
addresses=[Address(id=1, email_address="jack@bean.com")],
),
sess.get(User, 7, options=[subqueryload(User.addresses)]),
)
self.assert_sql_count(testing.db, go, 2)
def test_from_params(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
order_by=Address.id,
)
},
)
sess = fixture_session()
q = sess.query(User).options(subqueryload(User.addresses))
def go():
eq_(
User(
id=7,
addresses=[Address(id=1, email_address="jack@bean.com")],
),
q.filter(User.id == bindparam("foo")).params(foo=7).one(),
)
self.assert_sql_count(testing.db, go, 2)
def test_disable_dynamic(self):
"""test no subquery option on a dynamic."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, lazy="dynamic")},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
# previously this would not raise, but would emit
# the query needlessly and put the result nowhere.
assert_raises_message(
sa.exc.InvalidRequestError,
"User.addresses' does not support object population - eager "
"loading cannot be applied.",
sess.query(User).options(subqueryload(User.addresses)).first,
)
def test_many_to_many_plain(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(
Keyword,
secondary=item_keywords,
lazy="subquery",
order_by=keywords.c.id,
)
),
)
q = fixture_session().query(Item).order_by(Item.id)
def go():
eq_(self.static.item_keyword_result, q.all())
self.assert_sql_count(testing.db, go, 2)
def test_many_to_many_with_join(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(
Keyword,
secondary=item_keywords,
lazy="subquery",
order_by=keywords.c.id,
)
),
)
q = fixture_session().query(Item).order_by(Item.id)
def go():
eq_(
self.static.item_keyword_result[0:2],
q.join(Item.keywords).filter(Keyword.name == "red").all(),
)
self.assert_sql_count(testing.db, go, 2)
def test_many_to_many_with_join_alias(self):
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(
Keyword,
secondary=item_keywords,
lazy="subquery",
order_by=keywords.c.id,
)
),
)
q = fixture_session().query(Item).order_by(Item.id)
def go():
ka = aliased(Keyword)
eq_(
self.static.item_keyword_result[0:2],
(q.join(ka, Item.keywords).filter(ka.name == "red")).all(),
)
self.assert_sql_count(testing.db, go, 2)
def test_orderby(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="subquery",
order_by=addresses.c.email_address,
)
},
)
q = fixture_session().query(User)
eq_(
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
Address(id=2, email_address="ed@wood.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
User(id=10, addresses=[]),
],
q.order_by(User.id).all(),
)
def test_orderby_multi(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="subquery",
order_by=[addresses.c.email_address, addresses.c.id],
)
},
)
q = fixture_session().query(User)
eq_(
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
Address(id=2, email_address="ed@wood.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
User(id=10, addresses=[]),
],
q.order_by(User.id).all(),
)
def test_orderby_related(self):
"""A regular mapper select on a single table can
order by a relationship to a second table"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="subquery", order_by=addresses.c.id
)
),
)
q = fixture_session().query(User)
result = (
q.filter(User.id == Address.user_id)
.order_by(Address.email_address)
.all()
)
eq_(
[
User(
id=8,
addresses=[
Address(id=2, email_address="ed@wood.com"),
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
User(id=7, addresses=[Address(id=1)]),
],
result,
)
def test_orderby_desc(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address,
lazy="subquery",
order_by=[sa.desc(addresses.c.email_address)],
)
),
)
sess = fixture_session()
eq_(
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=2, email_address="ed@wood.com"),
Address(id=4, email_address="ed@lala.com"),
Address(id=3, email_address="ed@bettyboop.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
User(id=10, addresses=[]),
],
sess.query(User).order_by(User.id).all(),
)
_pathing_runs = [
("lazyload", "lazyload", "lazyload", 15),
("subqueryload", "lazyload", "lazyload", 12),
("subqueryload", "subqueryload", "lazyload", 8),
("joinedload", "subqueryload", "lazyload", 7),
("lazyload", "lazyload", "subqueryload", 12),
("subqueryload", "subqueryload", "subqueryload", 4),
("subqueryload", "subqueryload", "joinedload", 3),
]
def test_options_pathing(self):
self._do_options_test(self._pathing_runs)
def test_mapper_pathing(self):
self._do_mapper_test(self._pathing_runs)
def _do_options_test(self, configs):
(
users,
Keyword,
orders,
items,
order_items,
Order,
Item,
User,
keywords,
item_keywords,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"orders": relationship(Order, order_by=orders.c.id) # o2m, m2o
},
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, order_by=items.c.id
) # m2m
},
)
self.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(
Keyword, secondary=item_keywords, order_by=keywords.c.id
) # m2m
},
)
self.mapper_registry.map_imperatively(Keyword, keywords)
callables = {"joinedload": joinedload, "subqueryload": subqueryload}
for o, i, k, count in configs:
options = []
if o in callables:
options.append(callables[o](User.orders))
if i in callables:
options.append(callables[i](User.orders, Order.items))
if k in callables:
options.append(
callables[k](User.orders, Order.items, Item.keywords)
)
self._do_query_tests(options, count)
def _do_mapper_test(self, configs):
(
users,
Keyword,
orders,
items,
order_items,
Order,
Item,
User,
keywords,
item_keywords,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords,
)
opts = {
"lazyload": "select",
"joinedload": "joined",
"subqueryload": "subquery",
}
for o, i, k, count in configs:
self.mapper_registry.map_imperatively(
User,
users,
properties={
"orders": relationship(
Order, lazy=opts[o], order_by=orders.c.id
)
},
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy=opts[i],
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(
Keyword,
lazy=opts[k],
secondary=item_keywords,
order_by=keywords.c.id,
)
},
)
self.mapper_registry.map_imperatively(Keyword, keywords)
try:
self._do_query_tests([], count)
finally:
clear_mappers()
def _do_query_tests(self, opts, count):
Order, User = self.classes.Order, self.classes.User
with fixture_session() as sess:
def go():
eq_(
sess.query(User).options(*opts).order_by(User.id).all(),
self.static.user_item_keyword_result,
)
self.assert_sql_count(testing.db, go, count)
eq_(
sess.query(User)
.options(*opts)
.filter(User.name == "fred")
.order_by(User.id)
.all(),
self.static.user_item_keyword_result[2:3],
)
with fixture_session() as sess:
eq_(
sess.query(User)
.options(*opts)
.join(User.orders)
.filter(Order.id == 3)
.order_by(User.id)
.all(),
self.static.user_item_keyword_result[0:1],
)
def test_cyclical(self):
"""A circular eager relationship breaks the cycle with a lazy loader"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address,
lazy="subquery",
backref=sa.orm.backref("user", lazy="subquery"),
order_by=Address.id,
)
),
)
is_(
sa.orm.class_mapper(User).get_property("addresses").lazy,
"subquery",
)
is_(sa.orm.class_mapper(Address).get_property("user").lazy, "subquery")
sess = fixture_session()
eq_(
self.static.user_address_result,
sess.query(User).order_by(User.id).all(),
)
def test_cyclical_explicit_join_depth(self):
"""A circular eager relationship breaks the cycle with a lazy loader"""
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address,
lazy="subquery",
join_depth=1,
backref=sa.orm.backref(
"user", lazy="subquery", join_depth=1
),
order_by=Address.id,
)
),
)
is_(
sa.orm.class_mapper(User).get_property("addresses").lazy,
"subquery",
)
is_(sa.orm.class_mapper(Address).get_property("user").lazy, "subquery")
sess = fixture_session()
eq_(
self.static.user_address_result,
sess.query(User).order_by(User.id).all(),
)
def test_add_arbitrary_exprs(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(addresses=relationship(Address, lazy="subquery")),
)
sess = fixture_session()
self.assert_compile(
sess.query(User, literal_column("1")),
"SELECT users.id AS users_id, users.name AS users_name, "
"1 FROM users",
)
def test_double_w_ac_against_subquery(self):
(
users,
orders,
User,
Address,
Order,
addresses,
Item,
items,
order_items,
) = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
self.classes.Item,
self.tables.items,
self.tables.order_items,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="subquery",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(Item, items)
open_mapper = aliased(
Order, select(orders).where(orders.c.isopen == 1).alias()
)
closed_mapper = aliased(
Order, select(orders).where(orders.c.isopen == 0).alias()
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="subquery", order_by=addresses.c.id
),
open_orders=relationship(
open_mapper, lazy="subquery", order_by=open_mapper.id
),
closed_orders=relationship(
closed_mapper, lazy="subquery", order_by=closed_mapper.id
),
),
)
self._run_double_test()
def test_double_w_ac(self):
(
users,
orders,
User,
Address,
Order,
addresses,
Item,
items,
order_items,
) = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
self.classes.Item,
self.tables.items,
self.tables.order_items,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="subquery",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(Item, items)
open_mapper = aliased(Order, orders)
closed_mapper = aliased(Order, orders)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="subquery", order_by=addresses.c.id
),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
open_mapper.isopen == 1,
users.c.id == open_mapper.user_id,
),
lazy="subquery",
order_by=open_mapper.id,
overlaps="closed_orders",
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closed_mapper.isopen == 0,
users.c.id == closed_mapper.user_id,
),
lazy="subquery",
order_by=closed_mapper.id,
overlaps="open_orders",
),
),
)
self._run_double_test()
def test_double_same_mappers(self):
"""Eager loading with two relationships simultaneously,
from the same table, using aliases."""
(
addresses,
items,
order_items,
orders,
Item,
User,
Address,
Order,
users,
) = (
self.tables.addresses,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="subquery",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(Item, items)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="subquery", order_by=addresses.c.id
),
open_orders=relationship(
Order,
primaryjoin=sa.and_(
orders.c.isopen == 1, users.c.id == orders.c.user_id
),
lazy="subquery",
order_by=orders.c.id,
viewonly=True,
),
closed_orders=relationship(
Order,
primaryjoin=sa.and_(
orders.c.isopen == 0, users.c.id == orders.c.user_id
),
lazy="subquery",
order_by=orders.c.id,
viewonly=True,
),
),
)
self._run_double_test()
def _run_double_test(self, no_items=False):
User, Address, Order, Item = self.classes(
"User", "Address", "Order", "Item"
)
q = fixture_session().query(User).order_by(User.id)
def items(*ids):
if no_items:
return {}
else:
return {"items": [Item(id=id_) for id_ in ids]}
def go():
eq_(
[
User(
id=7,
addresses=[Address(id=1)],
open_orders=[Order(id=3, **items(3, 4, 5))],
closed_orders=[
Order(id=1, **items(1, 2, 3)),
Order(id=5, **items(5)),
],
),
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
open_orders=[],
closed_orders=[],
),
User(
id=9,
addresses=[Address(id=5)],
open_orders=[Order(id=4, **items(1, 5))],
closed_orders=[Order(id=2, **items(1, 2, 3))],
),
User(id=10),
],
q.all(),
)
if no_items:
self.assert_sql_count(testing.db, go, 4)
else:
self.assert_sql_count(testing.db, go, 6)
@testing.combinations(
("plain",), ("cte", testing.requires.ctes), ("subquery",), id_="s"
)
def test_map_to_cte_subq(self, type_):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
if type_ == "plain":
target = users
elif type_ == "cte":
target = select(users).cte()
elif type_ == "subquery":
target = select(users).subquery()
self.mapper_registry.map_imperatively(
User,
target,
properties={"addresses": relationship(Address, backref="user")},
)
self.mapper_registry.map_imperatively(Address, addresses)
sess = fixture_session()
q = (
sess.query(Address)
.options(subqueryload(Address.user))
.order_by(Address.id)
)
eq_(q.all(), self.static.address_user_result)
def test_limit(self):
"""Limit operations combined with lazy-load relationships."""
(
users,
items,
order_items,
orders,
Item,
User,
Address,
Order,
addresses,
) = (
self.tables.users,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
self.mapper_registry.map_imperatively(Item, items)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="subquery",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="subquery",
order_by=addresses.c.id,
),
"orders": relationship(
Order, lazy="select", order_by=orders.c.id
),
},
)
sess = fixture_session()
q = sess.query(User)
result = q.order_by(User.id).limit(2).offset(1).all()
eq_(self.static.user_all_result[1:3], result)
result = q.order_by(sa.desc(User.id)).limit(2).offset(2).all()
eq_(list(reversed(self.static.user_all_result[0:2])), result)
def test_group_by_only(self):
# test group_by() not impacting results, similarly to joinedload
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="subquery",
order_by=addresses.c.email_address,
)
},
)
q = fixture_session().query(User)
eq_(
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=3, email_address="ed@bettyboop.com"),
Address(id=4, email_address="ed@lala.com"),
Address(id=2, email_address="ed@wood.com"),
],
),
User(id=9, addresses=[Address(id=5)]),
User(id=10, addresses=[]),
],
q.order_by(User.id).group_by(User).all(), # group by all columns
)
def test_one_to_many_scalar(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
address=relationship(
self.mapper_registry.map_imperatively(Address, addresses),
lazy="subquery",
uselist=False,
)
),
)
q = fixture_session().query(User)
def go():
result = q.filter(users.c.id == 7).all()
eq_([User(id=7, address=Address(id=1))], result)
self.assert_sql_count(testing.db, go, 2)
def test_many_to_one(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties=dict(
user=relationship(
self.mapper_registry.map_imperatively(User, users),
lazy="subquery",
)
),
)
sess = fixture_session()
q = sess.query(Address)
def go():
a = q.filter(addresses.c.id == 1).one()
is_not(a.user, None)
u1 = sess.get(User, 7)
is_(a.user, u1)
self.assert_sql_count(testing.db, go, 2)
def test_double_with_aggregate(self):
User, users, orders, Order = (
self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order,
)
max_orders_by_user = (
sa.select(sa.func.max(orders.c.id).label("order_id"))
.group_by(orders.c.user_id)
.alias("max_orders_by_user")
)
max_orders = (
orders.select()
.where(orders.c.id == max_orders_by_user.c.order_id)
.alias("max_orders")
)
self.mapper_registry.map_imperatively(Order, orders)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"orders": relationship(
Order,
backref="user",
lazy="subquery",
order_by=orders.c.id,
),
"max_order": relationship(
aliased(Order, max_orders), lazy="subquery", uselist=False
),
},
)
q = fixture_session().query(User)
def go():
eq_(
[
User(
id=7,
orders=[Order(id=1), Order(id=3), Order(id=5)],
max_order=Order(id=5),
),
User(id=8, orders=[]),
User(
id=9,
orders=[Order(id=2), Order(id=4)],
max_order=Order(id=4),
),
User(id=10),
],
q.order_by(User.id).all(),
)
self.assert_sql_count(testing.db, go, 3)
def test_uselist_false_warning(self):
"""test that multiple rows received by a
uselist=False raises a warning."""
User, users, orders, Order = (
self.classes.User,
self.tables.users,
self.tables.orders,
self.classes.Order,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"order": relationship(Order, uselist=False)},
)
self.mapper_registry.map_imperatively(Order, orders)
s = fixture_session()
assert_warns(
sa.exc.SAWarning,
s.query(User).options(subqueryload(User.order)).all,
)
| EagerTest |
python | automl__auto-sklearn | autosklearn/ensembles/ensemble_selection.py | {
"start": 621,
"end": 14007
} | class ____(AbstractEnsemble):
def __init__(
self,
task_type: int,
metrics: Sequence[Scorer] | Scorer,
backend: Backend,
ensemble_size: int = 50,
bagging: bool = False,
mode: str = "fast",
random_state: int | np.random.RandomState | None = None,
) -> None:
"""An ensemble of selected algorithms
Fitting an EnsembleSelection generates an ensemble from the the models
generated during the search process. Can be further used for prediction.
Parameters
----------
task_type: int
An identifier indicating which task is being performed.
metrics: Sequence[Scorer] | Scorer
The metric used to evaluate the models. If multiple metrics are passed,
ensemble selection only optimizes for the first
backend : Backend
Gives access to the backend of Auto-sklearn. Not used by Ensemble Selection.
bagging: bool = False
Whether to use bagging in ensemble selection
mode: str in ['fast', 'slow'] = 'fast'
Which kind of ensemble generation to use
* 'slow' - The original method used in Rich Caruana's ensemble selection.
* 'fast' - A faster version of Rich Caruanas' ensemble selection.
random_state: int | RandomState | None = None
The random_state used for ensemble selection.
* None - Uses numpy's default RandomState object
* int - Successive calls to fit will produce the same results
* RandomState - Truly random, each call to fit will produce
different results, even with the same object.
References
----------
| Ensemble selection from libraries of models
| Rich Caruana, Alexandru Niculescu-Mizil, Geoff Crew and Alex Ksikes
| ICML 2004
| https://dl.acm.org/doi/10.1145/1015330.1015432
| https://www.cs.cornell.edu/~caruana/ctp/ct.papers/caruana.icml04.icdm06long.pdf
""" # noqa: E501
self.ensemble_size = ensemble_size
self.task_type = task_type
if isinstance(metrics, Sequence):
if len(metrics) > 1:
warnings.warn(
"Ensemble selection can only optimize one metric, "
"but multiple metrics were passed, dropping all "
"except for the first metric."
)
self.metric = metrics[0]
else:
self.metric = metrics
self.bagging = bagging
self.mode = mode
# Behaviour similar to sklearn
# int - Deteriministic with succesive calls to fit
# RandomState - Successive calls to fit will produce differences
# None - Uses numpmys global singleton RandomState
# https://scikit-learn.org/stable/common_pitfalls.html#controlling-randomness
self.random_state = random_state
def fit(
self,
base_models_predictions: List[np.ndarray],
true_targets: np.ndarray,
model_identifiers: List[Tuple[int, int, float]],
runs: Sequence[Run],
X_data: SUPPORTED_FEAT_TYPES | None = None,
) -> EnsembleSelection:
self.ensemble_size = int(self.ensemble_size)
if self.ensemble_size < 1:
raise ValueError("Ensemble size cannot be less than one!")
if self.task_type not in TASK_TYPES:
raise ValueError("Unknown task type %s." % self.task_type)
if not isinstance(self.metric, Scorer):
raise ValueError(
"The provided metric must be an instance of Scorer, "
"nevertheless it is {}({})".format(
self.metric,
type(self.metric),
)
)
if self.mode not in ("fast", "slow"):
raise ValueError("Unknown mode %s" % self.mode)
if self.bagging:
self._bagging(base_models_predictions, true_targets)
else:
self._fit(
predictions=base_models_predictions,
X_data=X_data,
labels=true_targets,
)
self._calculate_weights()
self.identifiers_ = model_identifiers
return self
def _fit(
self,
predictions: List[np.ndarray],
labels: np.ndarray,
*,
X_data: SUPPORTED_FEAT_TYPES | None = None,
) -> EnsembleSelection:
if self.mode == "fast":
self._fast(predictions=predictions, X_data=X_data, labels=labels)
else:
self._slow(predictions=predictions, X_data=X_data, labels=labels)
return self
def _fast(
self,
predictions: List[np.ndarray],
labels: np.ndarray,
*,
X_data: SUPPORTED_FEAT_TYPES | None = None,
) -> None:
"""Fast version of Rich Caruana's ensemble selection method."""
self.num_input_models_ = len(predictions)
rand = check_random_state(self.random_state)
ensemble = [] # type: List[np.ndarray]
trajectory = []
order = []
ensemble_size = self.ensemble_size
weighted_ensemble_prediction = np.zeros(
predictions[0].shape,
dtype=np.float64,
)
fant_ensemble_prediction = np.zeros(
weighted_ensemble_prediction.shape,
dtype=np.float64,
)
for i in range(ensemble_size):
losses = np.zeros(
(len(predictions)),
dtype=np.float64,
)
s = len(ensemble)
if s > 0:
np.add(
weighted_ensemble_prediction,
ensemble[-1],
out=weighted_ensemble_prediction,
)
# Memory-efficient averaging!
for j, pred in enumerate(predictions):
# fant_ensemble_prediction is the prediction of the current ensemble
# and should be
#
# ([predictions[selected_prev_iterations] + predictions[j])/(s+1)
#
# We overwrite the contents of fant_ensemble_prediction directly with
# weighted_ensemble_prediction + new_prediction and then scale for avg
np.add(weighted_ensemble_prediction, pred, out=fant_ensemble_prediction)
np.multiply(
fant_ensemble_prediction,
(1.0 / float(s + 1)),
out=fant_ensemble_prediction,
)
losses[j] = calculate_losses(
solution=labels,
prediction=fant_ensemble_prediction,
task_type=self.task_type,
metrics=[self.metric],
X_data=X_data,
scoring_functions=None,
)[self.metric.name]
all_best = np.argwhere(losses == np.nanmin(losses)).flatten()
best = rand.choice(all_best)
ensemble.append(predictions[best])
trajectory.append(losses[best])
order.append(best)
# Handle special case
if len(predictions) == 1:
break
self.indices_ = order
self.trajectory_ = trajectory
self.train_loss_ = trajectory[-1]
def _slow(
self,
predictions: List[np.ndarray],
labels: np.ndarray,
*,
X_data: SUPPORTED_FEAT_TYPES | None = None,
) -> None:
"""Rich Caruana's ensemble selection method."""
self.num_input_models_ = len(predictions)
ensemble = []
trajectory = []
order = []
ensemble_size = self.ensemble_size
for i in range(ensemble_size):
losses = np.zeros(
[np.shape(predictions)[0]],
dtype=np.float64,
)
for j, pred in enumerate(predictions):
ensemble.append(pred)
ensemble_prediction = np.mean(np.array(ensemble), axis=0)
losses[j] = calculate_losses(
solution=labels,
prediction=ensemble_prediction,
task_type=self.task_type,
metrics=[self.metric],
X_data=X_data,
scoring_functions=None,
)[self.metric.name]
ensemble.pop()
best = np.nanargmin(losses)
ensemble.append(predictions[best])
trajectory.append(losses[best])
order.append(best)
# Handle special case
if len(predictions) == 1:
break
self.indices_ = np.array(
order,
dtype=np.int64,
)
self.trajectory_ = np.array(
trajectory,
dtype=np.float64,
)
self.train_loss_ = trajectory[-1]
def _calculate_weights(self) -> None:
ensemble_members = Counter(self.indices_).most_common()
weights = np.zeros(
(self.num_input_models_,),
dtype=np.float64,
)
for ensemble_member in ensemble_members:
weight = float(ensemble_member[1]) / self.ensemble_size
weights[ensemble_member[0]] = weight
if np.sum(weights) < 1:
weights = weights / np.sum(weights)
self.weights_ = weights
def _bagging(
self,
predictions: List[np.ndarray],
labels: np.ndarray,
fraction: float = 0.5,
n_bags: int = 20,
) -> np.ndarray:
"""Rich Caruana's ensemble selection method with bagging."""
raise ValueError("Bagging might not work with class-based interface!")
n_models = predictions.shape[0]
bag_size = int(n_models * fraction)
order_of_each_bag = []
for j in range(n_bags):
# Bagging a set of models
indices = sorted(random.sample(range(0, n_models), bag_size))
bag = predictions[indices, :, :]
order, _ = self._fit(predictions=bag, labels=labels)
order_of_each_bag.append(order)
return np.array(
order_of_each_bag,
dtype=np.int64,
)
def predict(
self, base_models_predictions: Union[np.ndarray, List[np.ndarray]]
) -> np.ndarray:
average = np.zeros_like(base_models_predictions[0], dtype=np.float64)
tmp_predictions = np.empty_like(base_models_predictions[0], dtype=np.float64)
# if predictions.shape[0] == len(self.weights_),
# predictions include those of zero-weight models.
if len(base_models_predictions) == len(self.weights_):
for pred, weight in zip(base_models_predictions, self.weights_):
np.multiply(pred, weight, out=tmp_predictions)
np.add(average, tmp_predictions, out=average)
# if prediction model.shape[0] == len(non_null_weights),
# predictions do not include those of zero-weight models.
elif len(base_models_predictions) == np.count_nonzero(self.weights_):
non_null_weights = [w for w in self.weights_ if w > 0]
for pred, weight in zip(base_models_predictions, non_null_weights):
np.multiply(pred, weight, out=tmp_predictions)
np.add(average, tmp_predictions, out=average)
# If none of the above applies, then something must have gone wrong.
else:
raise ValueError(
"The dimensions of ensemble predictions"
" and ensemble weights do not match!"
)
del tmp_predictions
return average
def __str__(self) -> str:
trajectory_str = " ".join(
[f"{id}: {perf:.5f}" for id, perf in enumerate(self.trajectory_)]
)
identifiers_str = " ".join(
[
f"{identifier}"
for idx, identifier in enumerate(self.identifiers_)
if self.weights_[idx] > 0
]
)
return (
"Ensemble Selection:\n"
f"\tTrajectory: {trajectory_str}\n"
f"\tMembers: {self.indices_}\n"
f"\tWeights: {self.weights_}\n"
f"\tIdentifiers: {identifiers_str}\n"
)
def get_models_with_weights(
self, models: Dict[Tuple[int, int, float], BasePipeline]
) -> List[Tuple[float, BasePipeline]]:
output = []
for i, weight in enumerate(self.weights_):
if weight > 0.0:
identifier = self.identifiers_[i]
model = models[identifier]
output.append((weight, model))
output.sort(reverse=True, key=lambda t: t[0])
return output
def get_identifiers_with_weights(
self,
) -> List[Tuple[Tuple[int, int, float], float]]:
return list(zip(self.identifiers_, self.weights_))
def get_selected_model_identifiers(self) -> List[Tuple[int, int, float]]:
output = []
for i, weight in enumerate(self.weights_):
identifier = self.identifiers_[i]
if weight > 0.0:
output.append(identifier)
return output
def get_validation_performance(self) -> float:
return self.trajectory_[-1]
| EnsembleSelection |
python | getsentry__sentry | tests/sentry/integrations/slack/test_notify_action.py | {
"start": 1206,
"end": 15443
} | class ____(RuleTestCase):
rule_cls = SlackNotifyServiceAction
def mock_list(self, list_type, channels, result_name="channels"):
return mock_slack_response(f"{list_type}_list", body={"ok": True, result_name: channels})
def mock_conversations_info(self, channel):
return mock_slack_response(
"conversations_info",
body={"ok": True, "channel": channel},
req_args={"channel": channel},
)
def mock_msg_schedule_response(self, channel_id, result_name="channel"):
if channel_id == "channel_not_found":
body = {"ok": False, "error": "channel_not_found"}
else:
body = {
"ok": True,
result_name: channel_id,
"scheduled_message_id": "Q1298393284",
}
return mock_slack_response("chat_scheduleMessage", body)
def mock_msg_delete_scheduled_response(self, channel_id, result_name="channel"):
return mock_slack_response("chat_deleteScheduledMessage", {"ok": True})
def setUp(self) -> None:
self.organization = self.get_event().project.organization
self.integration, self.org_integration = self.create_provider_integration_for(
organization=self.organization,
user=self.user,
external_id="TXXXXXXX1",
metadata={
"access_token": "xoxb-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
"domain_name": "sentry.slack.com",
"installation_type": "born_as_bot",
},
name="Awesome Team",
provider="slack",
)
def assert_form_valid(self, form, expected_channel_id, expected_channel):
assert form.is_valid()
assert form.cleaned_data["channel_id"] == expected_channel_id
assert form.cleaned_data["channel"] == expected_channel
@patch("sentry.integrations.slack.sdk_client.SlackSdkClient.chat_postMessage")
@patch(
"slack_sdk.web.client.WebClient._perform_urllib_http_request",
return_value={
"body": orjson.dumps({"ok": True}).decode(),
"headers": {},
"status": 200,
},
)
def test_no_upgrade_notice_bot_app(
self, mock_api_call: MagicMock, mock_post: MagicMock
) -> None:
event = self.get_event()
rule = self.get_rule(data={"workspace": self.integration.id, "channel": "#my-channel"})
results = list(rule.after(event=event))
assert len(results) == 1
# Trigger rule callback
results[0].callback(event, futures=[])
blocks = mock_post.call_args.kwargs["blocks"]
blocks = orjson.loads(blocks)
assert event.title in blocks[0]["text"]["text"]
def test_render_label_with_notes(self) -> None:
rule = self.get_rule(
data={
"workspace": self.integration.id,
"channel": "#my-channel",
"channel_id": "",
"tags": "one, two",
"notes": "fix this @colleen",
}
)
assert (
rule.render_label()
== 'Send a notification to the Awesome Team Slack workspace to #my-channel and show tags [one, two] and notes "fix this @colleen" in notification'
)
def test_render_label_without_integration(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration.delete()
rule = self.get_rule(
data={
"workspace": self.integration.id,
"channel": "#my-channel",
"channel_id": "",
"tags": "",
}
)
label = rule.render_label()
assert label == "Send a notification to the [removed] Slack workspace to #my-channel"
@responses.activate
def test_valid_bot_channel_selected(self) -> None:
integration, _ = self.create_provider_integration_for(
organization=self.event.project.organization,
user=self.user,
provider="slack",
name="Awesome Team",
external_id="TXXXXXXX2",
metadata={
"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
"domain_name": "sentry.slack.com",
"installation_type": "born_as_bot",
},
)
rule = self.get_rule(
data={"workspace": integration.id, "channel": "#my-channel", "tags": ""}
)
with self.mock_msg_schedule_response("chan-id"):
with self.mock_msg_delete_scheduled_response("chan-id"):
form = rule.get_form_instance()
assert form.is_valid()
self.assert_form_valid(form, "chan-id", "#my-channel")
@responses.activate
def test_valid_member_selected(self) -> None:
rule = self.get_rule(
data={"workspace": self.integration.id, "channel": "@morty", "tags": ""}
)
members = {
"ok": "true",
"members": [
{"name": "morty", "id": "morty-id"},
{"name": "other-user", "id": "user-id"},
],
}
with self.mock_msg_schedule_response("channel_not_found"):
with self.mock_list("users", members["members"], "members"):
form = rule.get_form_instance()
assert form.is_valid()
self.assert_form_valid(form, "morty-id", "@morty")
@responses.activate
def test_invalid_channel_selected(self) -> None:
rule = self.get_rule(
data={"workspace": self.integration.id, "channel": "#my-channel", "tags": ""}
)
responses.add(
method=responses.POST,
url="https://slack.com/api/chat.scheduleMessage",
status=200,
content_type="application/json",
body=orjson.dumps({"ok": False, "error": "channel_not_found"}),
)
members = {"ok": "true", "members": [{"name": "other-member", "id": "member-id"}]}
responses.add(
method=responses.GET,
url="https://slack.com/api/users.list",
status=200,
content_type="application/json",
body=orjson.dumps(members),
)
form = rule.get_form_instance()
assert not form.is_valid()
assert len(form.errors) == 1
@responses.activate
@patch("slack_sdk.web.client.WebClient.users_list")
def test_rate_limited_response(self, mock_api_call: MagicMock) -> None:
"""Should surface a 429 from Slack to the frontend form"""
mock_api_call.side_effect = SlackApiError(
message="ratelimited",
response=SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/users.list",
req_args={},
data={"ok": False, "error": "rate_limited"},
headers={},
status_code=429,
),
)
with self.mock_msg_schedule_response("channel_not_found"):
rule = self.get_rule(
data={
"workspace": self.integration.id,
"channel": "#my-channel",
"input_channel_id": "",
"tags": "",
}
)
form = rule.get_form_instance()
assert not form.is_valid()
assert SLACK_RATE_LIMITED_MESSAGE in str(form.errors.values())
def test_channel_id_provided_sdk(self) -> None:
channel = {"name": "my-channel", "id": "C2349874"}
with self.mock_conversations_info(channel):
rule = self.get_rule(
data={
"workspace": self.integration.id,
"channel": "#my-channel",
"input_channel_id": "C2349874",
"tags": "",
}
)
form = rule.get_form_instance()
assert form.is_valid()
def test_invalid_channel_id_provided_sdk(self) -> None:
with patch(
"slack_sdk.web.client.WebClient.conversations_info",
side_effect=SlackApiError("", response={"ok": False, "error": "channel_not_found"}),
):
rule = self.get_rule(
data={
"workspace": self.integration.id,
"channel": "#my-chanel",
"input_channel_id": "C1234567",
"tags": "",
}
)
form = rule.get_form_instance()
assert not form.is_valid()
assert "Channel not found. Invalid ID provided." in str(form.errors.values())
def test_invalid_channel_name_provided_sdk(self) -> None:
channel = {"name": "my-channel", "id": "C2349874"}
with self.mock_conversations_info(channel):
rule = self.get_rule(
data={
"workspace": self.integration.id,
"channel": "#my-chanel",
"input_channel_id": "C1234567",
"tags": "",
}
)
form = rule.get_form_instance()
assert not form.is_valid()
assert "Slack: Slack channel name from ID does not match input channel name." in str(
form.errors.values()
)
def test_invalid_workspace(self) -> None:
# the workspace _should_ be the integration id
rule = self.get_rule(data={"workspace": "unknown", "channel": "#my-channel", "tags": ""})
form = rule.get_form_instance()
assert not form.is_valid()
assert ["Slack: Workspace is a required field."] in form.errors.values()
@responses.activate
def test_display_name_conflict(self) -> None:
rule = self.get_rule(
data={"workspace": self.integration.id, "channel": "@morty", "tags": ""}
)
members = {
"ok": "true",
"members": [
{"name": "first-morty", "id": "morty-id", "profile": {"display_name": "morty"}},
{"name": "second-morty", "id": "user-id", "profile": {"display_name": "morty"}},
],
}
with self.mock_msg_schedule_response("channel_not_found"):
with self.mock_list("users", members["members"], "members"):
form = rule.get_form_instance()
assert not form.is_valid()
assert [
"Slack: Multiple users were found with display name '@morty'. Please use your username, found at sentry.slack.com/account/settings#username."
] in form.errors.values()
def test_disabled_org_integration(self) -> None:
org = self.create_organization(owner=self.user)
self.create_organization_integration(
organization_id=org.id, integration=self.integration, status=ObjectStatus.DISABLED
)
with assume_test_silo_mode(SiloMode.CONTROL):
self.org_integration.update(status=ObjectStatus.DISABLED)
event = self.get_event()
rule = self.get_rule(data={"workspace": self.integration.id, "channel": "#my-channel"})
results = list(rule.after(event=event))
assert len(results) == 0
@responses.activate
@mock.patch("sentry.analytics.record")
@patch("sentry.integrations.slack.sdk_client.SlackSdkClient.chat_postMessage")
@patch(
"slack_sdk.web.client.WebClient._perform_urllib_http_request",
return_value={
"body": orjson.dumps({"ok": True}).decode(),
"headers": {},
"status": 200,
},
)
def test_additional_attachment(
self, mock_api_call: MagicMock, mock_post: MagicMock, mock_record: MagicMock
) -> None:
with mock.patch.dict(
manager.attachment_generators,
{ExternalProviders.SLACK: additional_attachment_generator_block_kit},
):
event = self.get_event()
rule = self.get_rule(
data={
"workspace": self.integration.id,
"channel": "#my-channel",
"channel_id": "123",
}
)
notification_uuid = "123e4567-e89b-12d3-a456-426614174000"
results = list(rule.after(event=event, notification_uuid=notification_uuid))
assert len(results) == 1
# Trigger rule callback
results[0].callback(event, futures=[])
blocks = mock_post.call_args.kwargs["blocks"]
blocks = orjson.loads(blocks)
assert event.title in blocks[0]["text"]["text"]
assert blocks[5]["text"]["text"] == self.organization.slug
assert blocks[6]["text"]["text"] == self.integration.id
assert_last_analytics_event(
mock_record,
AlertSentEvent(
provider="slack",
alert_id="",
alert_type="issue_alert",
organization_id=self.organization.id,
project_id=event.project_id,
external_id="123",
notification_uuid=notification_uuid,
),
)
assert_any_analytics_event(
mock_record,
SlackIntegrationNotificationSent(
category="issue_alert",
organization_id=self.organization.id,
project_id=event.project_id,
group_id=event.group_id,
notification_uuid=notification_uuid,
alert_id=None,
),
)
@responses.activate
def test_multiple_integrations(self) -> None:
org = self.create_organization(owner=self.user)
self.create_organization_integration(organization_id=org.id, integration=self.integration)
event = self.get_event()
rule = self.get_rule(data={"workspace": self.integration.id, "channel": "#my-channel"})
results = list(rule.after(event=event))
assert len(results) == 1
| SlackNotifyActionTest |
python | realpython__materials | python-class/mro.py | {
"start": 0,
"end": 61
} | class ____:
def method(self):
print("A.method()")
| A |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_image_anchor03.py | {
"start": 315,
"end": 1246
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("image_anchor03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.png", {"positioning": 1})
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {"in_memory": True})
worksheet = workbook.add_worksheet()
worksheet.insert_image("E9", self.image_dir + "red.png", {"positioning": 1})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1174562,
"end": 1174762
} | class ____(SelectionInit):
"""PrimitiveValue schema wrapper."""
_schema = {"$ref": "#/definitions/PrimitiveValue"}
def __init__(self, *args):
super().__init__(*args)
| PrimitiveValue |
python | sympy__sympy | sympy/integrals/transforms.py | {
"start": 33615,
"end": 34358
} | class ____(IntegralTransform):
""" Base class for Fourier transforms."""
def a(self):
raise NotImplementedError(
"Class %s must implement a(self) but does not" % self.__class__)
def b(self):
raise NotImplementedError(
"Class %s must implement b(self) but does not" % self.__class__)
def _compute_transform(self, f, x, k, **hints):
return _fourier_transform(f, x, k,
self.a(), self.b(),
self.__class__._name, **hints)
def _as_integral(self, f, x, k):
a = self.a()
b = self.b()
return Integral(a*f*exp(b*S.ImaginaryUnit*x*k), (x, S.NegativeInfinity, S.Infinity))
| FourierTypeTransform |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataprep.py | {
"start": 8309,
"end": 9443
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dataprep.GoogleDataprepHook")
def test_execute(self, hook_mock):
op = DataprepDeleteFlowOperator(
task_id=TASK_ID,
dataprep_conn_id=DATAPREP_CONN_ID,
flow_id=FLOW_ID,
)
op.execute(context=mock.MagicMock())
hook_mock.assert_called_once_with(dataprep_conn_id="dataprep_default")
hook_mock.return_value.delete_flow.assert_called_once_with(
flow_id=FLOW_ID,
)
@pytest.mark.db_test
@mock.patch("airflow.providers.google.cloud.operators.dataprep.GoogleDataprepHook")
def test_execute_with_template_params(self, _, create_task_instance_of_operator, session):
dag_id = "test_execute_delete_flow_with_template"
ti = create_task_instance_of_operator(
DataprepDeleteFlowOperator,
dag_id=dag_id,
task_id=TASK_ID,
flow_id="{{ dag.dag_id }}",
)
session.add(ti)
session.commit()
ti.render_templates()
assert dag_id == ti.task.flow_id
| TestDataprepDeleteFlowOperator |
python | django__django | tests/logging_tests/views.py | {
"start": 525,
"end": 1028
} | class ____(Exception):
pass
def uncaught_exception(request):
raise UncaughtException("Uncaught exception")
def internal_server_error(request):
status = request.GET.get("status", 500)
return HttpResponseServerError("Server Error", status=int(status))
def permission_denied(request):
raise PermissionDenied()
def multi_part_parser_error(request):
raise MultiPartParserError("parsing error")
def does_not_exist_raised(request):
raise Http404("Not Found")
| UncaughtException |
python | huggingface__transformers | utils/modular_model_converter.py | {
"start": 4435,
"end": 8600
} | class ____(m.MatcherDecoratableTransformer):
"""A transformer that replaces `old_name` with `new_name` in comments, string and any references.
It should take into account name like `MyNewModel`, or `my_new_model`. Without using the AUTO_MAPPING.
Supported renaming patterns:
- llama -> my_new_model and my_new_model -> llama
- Llama -> MyNewModel and MyNewModel -> Llama
- LLAMA -> MY_NEW_MODEL and MY_NEW_MODEL -> LLAMA
- LLaMa -> MyNewModel and MyNewModel -> Llama
"""
def __init__(self, old_name: str, new_name: str, original_new_model_name: str = "", only_doc: bool = False):
super().__init__()
old_name = old_name.replace("-", "_")
new_name = new_name.replace("-", "_")
self.old_name = old_name
self.new_name = new_name
self.cased_new_name = get_cased_name(self.new_name)
self.cased_old_name = get_cased_name(self.old_name)
self.patterns = {
old_name: new_name,
old_name.upper(): new_name.upper(),
# For some old models, `self.cased_old_name` == `old_name.upper()` in which case this overwrite previous entry
self.cased_old_name: self.cased_new_name,
}
# In case new_name is a prefix alias, and not the original new model name
self.original_new_model_name = original_new_model_name
self.only_doc = only_doc
def _replace_name(self, original_node, updated_node):
if re.findall(r"# Copied from", updated_node.value):
return cst.RemoveFromParent()
update = preserve_case_replace(updated_node.value, self.patterns, self.cased_new_name)
return updated_node.with_changes(value=update)
@m.leave(m.SimpleString() | m.Comment())
def replace_name(self, original_node, updated_node):
return self._replace_name(original_node, updated_node)
def leave_Name(self, original_node, updated_node):
if not self.only_doc:
return self._replace_name(original_node, updated_node)
return updated_node
def leave_ImportFrom(self, original_node, updated_node):
"""
The imports from other file types (configuration, processing etc) should use original model name.
Also, no replaces on absolute imports (e.g. `from mamba_ssm import ...`)
"""
if len(original_node.relative) == 0: # no replaces on absolute imports
return original_node
if self.original_new_model_name != self.new_name and m.matches(updated_node.module, m.Name()):
patterns = "|".join(ALL_FILE_TYPES)
regex = rf"({patterns})_{self.new_name}"
new_source = re.sub(
regex, lambda m: f"{m.group(1)}_{self.original_new_model_name}", updated_node.module.value
)
updated_node = updated_node.with_changes(module=updated_node.module.with_changes(value=new_source))
return updated_node
DOCSTRING_NODE = m.SimpleStatementLine(
body=[
m.Expr(
value=m.SimpleString(
# match anything between """ """
value=m.MatchIfTrue(lambda value: re.search(r"\"\"\"[\s\S]*\"\"\"", value) is not None)
)
)
]
)
def get_full_attribute_name(node: cst.Attribute | cst.Name) -> str | None:
"""Get the full name of an Attribute or Name node (e.g. `"nn.Module"` for an Attribute representing it). If the
successive value of an Attribute are not Name nodes, return `None`."""
if m.matches(node, m.Name()):
return node.value
elif m.matches(node, m.Attribute()):
if not m.matches(node.attr, m.Name()):
return None
name = node.attr.value
new_node = node.value
while m.matches(new_node, m.Attribute()):
if not m.matches(new_node.attr, m.Name()):
return None
name = new_node.attr.value + "." + name
new_node = new_node.value
if not m.matches(new_node, m.Name()):
return None
return new_node.value + "." + name
return None
| ReplaceNameTransformer |
python | encode__django-rest-framework | tests/test_validators.py | {
"start": 2196,
"end": 5014
} | class ____(TestCase):
def setUp(self):
self.instance = UniquenessModel.objects.create(username='existing')
def test_repr(self):
serializer = UniquenessSerializer()
expected = dedent("""
UniquenessSerializer():
id = IntegerField(label='ID', read_only=True)
username = CharField(max_length=100, validators=[<UniqueValidator(queryset=UniquenessModel.objects.all())>])
""")
assert repr(serializer) == expected
def test_is_not_unique(self):
data = {'username': 'existing'}
serializer = UniquenessSerializer(data=data)
assert not serializer.is_valid()
assert serializer.errors == {'username': ['uniqueness model with this username already exists.']}
def test_relation_is_not_unique(self):
RelatedModel.objects.create(user=self.instance)
data = {'user': self.instance.pk}
serializer = RelatedModelUserSerializer(data=data)
assert not serializer.is_valid()
assert serializer.errors == {'user': ['related model with this user already exists.']}
def test_is_unique(self):
data = {'username': 'other'}
serializer = UniquenessSerializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'username': 'other'}
def test_updated_instance_excluded(self):
data = {'username': 'existing'}
serializer = UniquenessSerializer(self.instance, data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'username': 'existing'}
def test_doesnt_pollute_model(self):
instance = AnotherUniquenessModel.objects.create(code='100')
serializer = AnotherUniquenessSerializer(instance)
assert all(
["Unique" not in repr(v) for v in AnotherUniquenessModel._meta.get_field('code').validators]
)
# Accessing data shouldn't effect validators on the model
serializer.data
assert all(
["Unique" not in repr(v) for v in AnotherUniquenessModel._meta.get_field('code').validators]
)
def test_related_model_is_unique(self):
data = {'username': 'Existing', 'email': 'new-email@example.com'}
rs = RelatedModelSerializer(data=data)
assert not rs.is_valid()
assert rs.errors == {'username': ['This field must be unique.']}
data = {'username': 'new-username', 'email': 'new-email@example.com'}
rs = RelatedModelSerializer(data=data)
assert rs.is_valid()
def test_value_error_treated_as_not_unique(self):
serializer = UniquenessIntegerSerializer(data={'integer': 'abc'})
assert serializer.is_valid()
# Tests for `UniqueTogetherValidator`
# -----------------------------------
| TestUniquenessValidation |
python | realpython__materials | python-property/circle_v3.py | {
"start": 0,
"end": 380
} | class ____:
def __init__(self, radius):
self.radius = radius
@property
def radius(self):
return self._radius
@radius.setter
def radius(self, value):
self._radius = float(value)
@property
def diameter(self):
return self.radius * 2
@diameter.setter
def diameter(self, value):
self.radius = value / 2
| Circle |
python | sympy__sympy | sympy/stats/crv_types.py | {
"start": 101885,
"end": 103479
} | class ____(SingleContinuousDistribution):
_argnames = ('b', 'eta')
set = Interval(0, oo)
@staticmethod
def check(b, eta):
_value_check(b > 0, "b must be positive")
_value_check(eta > 0, "eta must be positive")
def pdf(self, x):
b, eta = self.b, self.eta
return b*exp(-b*x)*exp(-eta*exp(-b*x))*(1+eta*(1-exp(-b*x)))
def ShiftedGompertz(name, b, eta):
r"""
Create a continuous random variable with a Shifted Gompertz distribution.
Explanation
===========
The density of the Shifted Gompertz distribution is given by
.. math::
f(x) := b e^{-b x} e^{-\eta \exp(-b x)} \left[1 + \eta(1 - e^(-bx)) \right]
with :math:`x \in [0, \infty)`.
Parameters
==========
b : Real number, `b > 0`, a scale
eta : Real number, `\eta > 0`, a shape
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ShiftedGompertz, density
>>> from sympy import Symbol
>>> b = Symbol("b", positive=True)
>>> eta = Symbol("eta", positive=True)
>>> x = Symbol("x")
>>> X = ShiftedGompertz("x", b, eta)
>>> density(X)(x)
b*(eta*(1 - exp(-b*x)) + 1)*exp(-b*x)*exp(-eta*exp(-b*x))
References
==========
.. [1] https://en.wikipedia.org/wiki/Shifted_Gompertz_distribution
"""
return rv(name, ShiftedGompertzDistribution, (b, eta))
#-------------------------------------------------------------------------------
# StudentT distribution --------------------------------------------------------
| ShiftedGompertzDistribution |
python | getsentry__sentry | tests/sentry/receivers/test_sentry_apps.py | {
"start": 1112,
"end": 8176
} | class ____(APITestCase):
def setUp(self) -> None:
self.issue = self.create_group(project=self.project)
self.sentry_app = self.create_sentry_app(
events=["issue.resolved", "issue.ignored", "issue.unresolved"]
)
self.install = self.create_sentry_app_installation(
organization=self.organization, slug=self.sentry_app.slug
)
self.url = f"/api/0/projects/{self.organization.slug}/{self.issue.project.slug}/issues/?id={self.issue.id}"
self.login_as(self.user)
def update_issue(self, _data=None):
data = {"status": "resolved"}
data.update(_data or {})
self.client.put(self.url, data=data, format="json")
def test_notify_after_regress(self, delay: MagicMock) -> None:
# First we need to resolve the issue
self.update_issue({})
delay.assert_any_call(
installation_id=self.install.id,
issue_id=self.issue.id,
type="resolved",
user_id=self.user.id,
data={"resolution_type": "now"},
)
# Then marked it unresolved for regressed to make sense
self.update_issue({"status": "unresolved", "substatus": "regressed"})
delay.assert_any_call(
installation_id=self.install.id,
issue_id=self.issue.id,
type="unresolved",
user_id=self.user.id,
data={},
)
assert delay.call_count == 2
def test_notify_after_bulk_ongoing(self, delay: MagicMock) -> None:
# First we need to have an ignored issue
self.update_issue({"status": "ignored", "substatus": "archived_until_escalating"})
bulk_transition_group_to_ongoing(
from_status=GroupStatus.IGNORED,
from_substatus=GroupSubStatus.UNTIL_ESCALATING,
group_ids=[self.issue.id],
)
delay.assert_any_call(
installation_id=self.install.id,
issue_id=self.issue.id,
type="unresolved",
user_id=None,
data={},
)
assert delay.call_count == 2
def test_notify_after_escalating(self, delay: MagicMock) -> None:
# First we need to have an ignored issue
self.update_issue({"status": "ignored", "substatus": "archived_until_escalating"})
event = self.issue.get_latest_event()
manage_issue_states(
group=self.issue,
group_inbox_reason=GroupInboxReason.ESCALATING,
event=event,
activity_data={},
)
delay.assert_any_call(
installation_id=self.install.id,
issue_id=self.issue.id,
type="unresolved",
user_id=None,
data={},
)
assert delay.call_count == 2
def test_notify_after_basic_resolved(self, delay: MagicMock) -> None:
self.update_issue()
delay.assert_called_once_with(
installation_id=self.install.id,
issue_id=self.issue.id,
type="resolved",
user_id=self.user.id,
data={"resolution_type": "now"},
)
def test_notify_after_resolve_in_commit(self, delay: MagicMock) -> None:
repo = self.create_repo(project=self.project)
commit = self.create_commit(repo=repo)
self.update_issue(
{"statusDetails": {"inCommit": {"repository": repo.name, "commit": commit.key}}}
)
delay.assert_called_once_with(
installation_id=self.install.id,
issue_id=self.issue.id,
type="resolved",
user_id=self.user.id,
data={"resolution_type": "in_commit"},
)
def test_notify_after_resolve_in_specific_release(self, delay: MagicMock) -> None:
release = self.create_release(project=self.project)
self.update_issue({"statusDetails": {"inRelease": release.version}})
delay.assert_called_once_with(
installation_id=self.install.id,
issue_id=self.issue.id,
type="resolved",
user_id=self.user.id,
data={"resolution_type": "in_release"},
)
def test_notify_after_resolve_in_latest_release(self, delay: MagicMock) -> None:
self.create_release(project=self.project)
self.update_issue({"statusDetails": {"inRelease": "latest"}})
delay.assert_called_once_with(
installation_id=self.install.id,
issue_id=self.issue.id,
type="resolved",
user_id=self.user.id,
data={"resolution_type": "in_release"},
)
def test_notify_after_resolve_in_next_release(self, delay: MagicMock) -> None:
self.create_release(project=self.project)
self.update_issue({"statusDetails": {"inNextRelease": True}})
delay.assert_called_once_with(
installation_id=self.install.id,
issue_id=self.issue.id,
type="resolved",
user_id=self.user.id,
data={"resolution_type": "in_next_release"},
)
def test_notify_after_resolve_from_set_commits(self, delay: MagicMock) -> None:
repo = Repository.objects.create(organization_id=self.organization.id, name="test/repo")
release = Release.objects.create(version="abcabc", organization=self.organization)
commit = Commit.objects.create(
repository_id=repo.id, organization_id=self.organization.id, key="b" * 40
)
GroupLink.objects.create(
group_id=self.issue.id,
project_id=self.project.id,
linked_type=GroupLink.LinkedType.commit,
linked_id=commit.id,
)
release.add_project(self.project)
release.set_commits(
[
{
"id": "b" * 40,
"repository": repo.name,
"author_email": "foo@example.com",
"author_name": "Foo Bar",
"message": f"FIXES {self.issue.qualified_short_id}",
}
]
)
delay.assert_called_once_with(
installation_id=self.install.id,
issue_id=self.issue.id,
type="resolved",
user_id=None,
data={"resolution_type": "with_commit"},
)
def test_notify_after_issue_ignored(self, delay: MagicMock) -> None:
self.update_issue({"status": "ignored"})
delay.assert_called_once_with(
installation_id=self.install.id,
issue_id=self.issue.id,
type="ignored",
user_id=self.user.id,
data={},
)
def test_notify_pending_installation(self, delay: MagicMock) -> None:
self.install.status = SentryAppInstallationStatus.PENDING
with assume_test_silo_mode(SiloMode.CONTROL):
self.install.save()
self.update_issue()
assert not delay.called
@patch("sentry.sentry_apps.tasks.sentry_apps.workflow_notification.delay")
| TestIssueWorkflowNotifications |
python | lxml__lxml | src/lxml/html/__init__.py | {
"start": 25091,
"end": 25160
} | class ____(HtmlMixin, etree.PIBase):
pass
| HtmlProcessingInstruction |
python | getsentry__sentry | src/sentry/users/models/email.py | {
"start": 485,
"end": 2442
} | class ____(Model):
"""
Email represents a unique email. Email settings (unsubscribe state) should be associated here.
UserEmail represents whether a given user account has access to that email.
"""
__relocation_scope__ = RelocationScope.User
__relocation_dependencies__ = {"sentry.User"}
__relocation_custom_ordinal__ = ["email"]
email = CIEmailField(_("email address"), unique=True, max_length=200)
date_added = models.DateTimeField(default=timezone.now)
class Meta:
app_label = "sentry"
db_table = "sentry_email"
__repr__ = sane_repr("email")
@classmethod
def query_for_relocation_export(cls, q: models.Q, pk_map: PrimaryKeyMap) -> models.Q:
from sentry.users.models.user import User
from sentry.users.models.useremail import UserEmail
# `Sentry.Email` models don't have any explicit dependencies on `Sentry.User`, so we need to
# find them manually via `UserEmail`.
emails = (
UserEmail.objects.filter(user_id__in=pk_map.get_pks(get_model_name(User)))
.annotate(email_lower=models.Func(models.F("email"), function="LOWER"))
.values_list("email_lower", flat=True)
)
# Use case-insensitive matching as useremail and email are case-insensitive
# This doesn't handle upper case Email records, with lowercase Useremail records.
return q & models.Q(email__in=emails)
def write_relocation_import(
self, _s: ImportScope, _f: ImportFlags
) -> tuple[int, ImportKind] | None:
# Ensure that we never attempt to duplicate email entries, as they must always be unique.
(email, created) = self.__class__.objects.get_or_create(
email=self.email, defaults=model_to_dict(self)
)
if email:
self.pk = email.pk
self.save()
return (self.pk, ImportKind.Inserted if created else ImportKind.Existing)
| Email |
python | tiangolo__fastapi | docs_src/cookie_param_models/tutorial001_an.py | {
"start": 152,
"end": 391
} | class ____(BaseModel):
session_id: str
fatebook_tracker: Union[str, None] = None
googall_tracker: Union[str, None] = None
@app.get("/items/")
async def read_items(cookies: Annotated[Cookies, Cookie()]):
return cookies
| Cookies |
python | pytorch__pytorch | test/package/package_a/test_nn_module.py | {
"start": 55,
"end": 1234
} | class ____(torch.nn.Module):
def __init__(self, nz=6, ngf=9, nc=3):
super().__init__()
self.main = torch.nn.Sequential(
# input is Z, going into a convolution
torch.nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
torch.nn.BatchNorm2d(ngf * 8),
torch.nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
torch.nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
torch.nn.BatchNorm2d(ngf * 4),
torch.nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
torch.nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
torch.nn.BatchNorm2d(ngf * 2),
torch.nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
torch.nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
torch.nn.BatchNorm2d(ngf),
torch.nn.ReLU(True),
# state size. (ngf) x 32 x 32
torch.nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
torch.nn.Tanh(),
# state size. (nc) x 64 x 64
)
def forward(self, input):
return self.main(input)
| TestNnModule |
python | pydantic__pydantic | pydantic/networks.py | {
"start": 31698,
"end": 42075
} | class ____(_repr.Representation):
"""
Info:
To use this type, you need to install the optional
[`email-validator`](https://github.com/JoshData/python-email-validator) package:
```bash
pip install email-validator
```
Validate a name and email address combination, as specified by
[RFC 5322](https://datatracker.ietf.org/doc/html/rfc5322#section-3.4).
The `NameEmail` has two properties: `name` and `email`.
In case the `name` is not provided, it's inferred from the email address.
```python
from pydantic import BaseModel, NameEmail
class User(BaseModel):
email: NameEmail
user = User(email='Fred Bloggs <fred.bloggs@example.com>')
print(user.email)
#> Fred Bloggs <fred.bloggs@example.com>
print(user.email.name)
#> Fred Bloggs
user = User(email='fred.bloggs@example.com')
print(user.email)
#> fred.bloggs <fred.bloggs@example.com>
print(user.email.name)
#> fred.bloggs
```
""" # noqa: D212
__slots__ = 'name', 'email'
def __init__(self, name: str, email: str):
self.name = name
self.email = email
def __eq__(self, other: Any) -> bool:
return isinstance(other, NameEmail) and (self.name, self.email) == (other.name, other.email)
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: _schema_generation_shared.GetJsonSchemaHandler
) -> JsonSchemaValue:
field_schema = handler(core_schema)
field_schema.update(type='string', format='name-email')
return field_schema
@classmethod
def __get_pydantic_core_schema__(
cls,
_source: type[Any],
_handler: GetCoreSchemaHandler,
) -> core_schema.CoreSchema:
import_email_validator()
return core_schema.no_info_after_validator_function(
cls._validate,
core_schema.json_or_python_schema(
json_schema=core_schema.str_schema(),
python_schema=core_schema.union_schema(
[core_schema.is_instance_schema(cls), core_schema.str_schema()],
custom_error_type='name_email_type',
custom_error_message='Input is not a valid NameEmail',
),
serialization=core_schema.to_string_ser_schema(),
),
)
@classmethod
def _validate(cls, input_value: Self | str, /) -> Self:
if isinstance(input_value, str):
name, email = validate_email(input_value)
return cls(name, email)
else:
return input_value
def __str__(self) -> str:
if '@' in self.name:
return f'"{self.name}" <{self.email}>'
return f'{self.name} <{self.email}>'
IPvAnyAddressType: TypeAlias = 'IPv4Address | IPv6Address'
IPvAnyInterfaceType: TypeAlias = 'IPv4Interface | IPv6Interface'
IPvAnyNetworkType: TypeAlias = 'IPv4Network | IPv6Network'
if TYPE_CHECKING:
IPvAnyAddress = IPvAnyAddressType
IPvAnyInterface = IPvAnyInterfaceType
IPvAnyNetwork = IPvAnyNetworkType
else:
class IPvAnyAddress:
"""Validate an IPv4 or IPv6 address.
```python
from pydantic import BaseModel
from pydantic.networks import IPvAnyAddress
class IpModel(BaseModel):
ip: IPvAnyAddress
print(IpModel(ip='127.0.0.1'))
#> ip=IPv4Address('127.0.0.1')
try:
IpModel(ip='http://www.example.com')
except ValueError as e:
print(e.errors())
'''
[
{
'type': 'ip_any_address',
'loc': ('ip',),
'msg': 'value is not a valid IPv4 or IPv6 address',
'input': 'http://www.example.com',
}
]
'''
```
"""
__slots__ = ()
def __new__(cls, value: Any) -> IPvAnyAddressType:
"""Validate an IPv4 or IPv6 address."""
try:
return IPv4Address(value)
except ValueError:
pass
try:
return IPv6Address(value)
except ValueError:
raise PydanticCustomError('ip_any_address', 'value is not a valid IPv4 or IPv6 address')
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: _schema_generation_shared.GetJsonSchemaHandler
) -> JsonSchemaValue:
field_schema = {}
field_schema.update(type='string', format='ipvanyaddress')
return field_schema
@classmethod
def __get_pydantic_core_schema__(
cls,
_source: type[Any],
_handler: GetCoreSchemaHandler,
) -> core_schema.CoreSchema:
return core_schema.no_info_plain_validator_function(
cls._validate, serialization=core_schema.to_string_ser_schema()
)
@classmethod
def _validate(cls, input_value: Any, /) -> IPvAnyAddressType:
return cls(input_value) # type: ignore[return-value]
class IPvAnyInterface:
"""Validate an IPv4 or IPv6 interface."""
__slots__ = ()
def __new__(cls, value: NetworkType) -> IPvAnyInterfaceType:
"""Validate an IPv4 or IPv6 interface."""
try:
return IPv4Interface(value)
except ValueError:
pass
try:
return IPv6Interface(value)
except ValueError:
raise PydanticCustomError('ip_any_interface', 'value is not a valid IPv4 or IPv6 interface')
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: _schema_generation_shared.GetJsonSchemaHandler
) -> JsonSchemaValue:
field_schema = {}
field_schema.update(type='string', format='ipvanyinterface')
return field_schema
@classmethod
def __get_pydantic_core_schema__(
cls,
_source: type[Any],
_handler: GetCoreSchemaHandler,
) -> core_schema.CoreSchema:
return core_schema.no_info_plain_validator_function(
cls._validate, serialization=core_schema.to_string_ser_schema()
)
@classmethod
def _validate(cls, input_value: NetworkType, /) -> IPvAnyInterfaceType:
return cls(input_value) # type: ignore[return-value]
class IPvAnyNetwork:
"""Validate an IPv4 or IPv6 network."""
__slots__ = ()
def __new__(cls, value: NetworkType) -> IPvAnyNetworkType:
"""Validate an IPv4 or IPv6 network."""
# Assume IP Network is defined with a default value for `strict` argument.
# Define your own class if you want to specify network address check strictness.
try:
return IPv4Network(value)
except ValueError:
pass
try:
return IPv6Network(value)
except ValueError:
raise PydanticCustomError('ip_any_network', 'value is not a valid IPv4 or IPv6 network')
@classmethod
def __get_pydantic_json_schema__(
cls, core_schema: core_schema.CoreSchema, handler: _schema_generation_shared.GetJsonSchemaHandler
) -> JsonSchemaValue:
field_schema = {}
field_schema.update(type='string', format='ipvanynetwork')
return field_schema
@classmethod
def __get_pydantic_core_schema__(
cls,
_source: type[Any],
_handler: GetCoreSchemaHandler,
) -> core_schema.CoreSchema:
return core_schema.no_info_plain_validator_function(
cls._validate, serialization=core_schema.to_string_ser_schema()
)
@classmethod
def _validate(cls, input_value: NetworkType, /) -> IPvAnyNetworkType:
return cls(input_value) # type: ignore[return-value]
def _build_pretty_email_regex() -> re.Pattern[str]:
name_chars = r'[\w!#$%&\'*+\-/=?^_`{|}~]'
unquoted_name_group = rf'((?:{name_chars}+\s+)*{name_chars}+)'
quoted_name_group = r'"((?:[^"]|\")+)"'
email_group = r'<(.+)>'
return re.compile(rf'\s*(?:{unquoted_name_group}|{quoted_name_group})?\s*{email_group}\s*')
pretty_email_regex = _build_pretty_email_regex()
MAX_EMAIL_LENGTH = 2048
"""Maximum length for an email.
A somewhat arbitrary but very generous number compared to what is allowed by most implementations.
"""
def validate_email(value: str) -> tuple[str, str]:
"""Email address validation using [email-validator](https://pypi.org/project/email-validator/).
Returns:
A tuple containing the local part of the email (or the name for "pretty" email addresses)
and the normalized email.
Raises:
PydanticCustomError: If the email is invalid.
Note:
Note that:
* Raw IP address (literal) domain parts are not allowed.
* `"John Doe <local_part@domain.com>"` style "pretty" email addresses are processed.
* Spaces are striped from the beginning and end of addresses, but no error is raised.
"""
if email_validator is None:
import_email_validator()
if len(value) > MAX_EMAIL_LENGTH:
raise PydanticCustomError(
'value_error',
'value is not a valid email address: {reason}',
{'reason': f'Length must not exceed {MAX_EMAIL_LENGTH} characters'},
)
m = pretty_email_regex.fullmatch(value)
name: str | None = None
if m:
unquoted_name, quoted_name, value = m.groups()
name = unquoted_name or quoted_name
email = value.strip()
try:
parts = email_validator.validate_email(email, check_deliverability=False)
except email_validator.EmailNotValidError as e:
raise PydanticCustomError(
'value_error', 'value is not a valid email address: {reason}', {'reason': str(e.args[0])}
) from e
email = parts.normalized
assert email is not None
name = name or parts.local_part
return name, email
__getattr__ = getattr_migration(__name__)
| NameEmail |
python | openai__openai-python | src/openai/types/chat/chat_completion_allowed_tools_param.py | {
"start": 265,
"end": 1010
} | class ____(TypedDict, total=False):
mode: Required[Literal["auto", "required"]]
"""Constrains the tools available to the model to a pre-defined set.
`auto` allows the model to pick from among the allowed tools and generate a
message.
`required` requires the model to call one or more of the allowed tools.
"""
tools: Required[Iterable[Dict[str, object]]]
"""A list of tool definitions that the model should be allowed to call.
For the Chat Completions API, the list of tool definitions might look like:
```json
[
{ "type": "function", "function": { "name": "get_weather" } },
{ "type": "function", "function": { "name": "get_time" } }
]
```
"""
| ChatCompletionAllowedToolsParam |
python | jazzband__django-oauth-toolkit | tests/migrations/0005_basetestapplication_allowed_origins_and_more.py | {
"start": 158,
"end": 878
} | class ____(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.OAUTH2_PROVIDER_ID_TOKEN_MODEL),
("tests", "0004_basetestapplication_hash_client_secret_and_more"),
]
operations = [
migrations.AddField(
model_name="basetestapplication",
name="allowed_origins",
field=models.TextField(blank=True, help_text="Allowed origins list to enable CORS, space separated"),
),
migrations.AddField(
model_name="sampleapplication",
name="allowed_origins",
field=models.TextField(blank=True, help_text="Allowed origins list to enable CORS, space separated"),
),
]
| Migration |
python | huggingface__transformers | src/transformers/models/clip/modeling_clip.py | {
"start": 19522,
"end": 21323
} | class ____(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`CLIPEncoderLayer`].
Args:
config: CLIPConfig
"""
def __init__(self, config: CLIPConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
"""
hidden_states = inputs_embeds
for encoder_layer in self.layers:
hidden_states = encoder_layer(
hidden_states,
attention_mask,
**kwargs,
)
return BaseModelOutput(
last_hidden_state=hidden_states,
)
| CLIPEncoder |
python | keras-team__keras | keras/src/utils/torch_utils_test.py | {
"start": 370,
"end": 1363
} | class ____(models.Model):
def __init__(
self, use_batch_norm=False, num_torch_layers=1, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.use_batch_norm = use_batch_norm
self.num_torch_layers = num_torch_layers
self.torch_wrappers = []
for _ in range(num_torch_layers):
modules = [torch.nn.Linear(2, 2)]
if use_batch_norm:
modules.append(torch.nn.BatchNorm1d(2))
torch_model = torch.nn.Sequential(*modules)
self.torch_wrappers.append(TorchModuleWrapper(torch_model))
self.fc = layers.Dense(1)
def call(self, x, training=None):
for wrapper in self.torch_wrappers:
x = wrapper(x, training=training)
return self.fc(x)
def get_config(self):
config = super().get_config()
config["use_batch_norm"] = self.use_batch_norm
config["num_torch_layers"] = self.num_torch_layers
return config
| Classifier |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-rag-evaluator/llama_index/packs/rag_evaluator/base.py | {
"start": 913,
"end": 16825
} | class ____(BaseLlamaPack):
"""
A pack for performing evaluation with your own RAG pipeline.
Args:
query_engine: The RAG pipeline to evaluate.
rag_dataset: The BaseLlamaDataset to evaluate on.
judge_llm: The LLM to use as the evaluator.
"""
def __init__(
self,
query_engine: BaseQueryEngine,
rag_dataset: BaseLlamaDataset,
judge_llm: Optional[LLM] = None,
embed_model: Optional[BaseEmbedding] = None,
show_progress: bool = True,
result_path: Optional[str] = None,
):
self.query_engine = query_engine
self.rag_dataset = rag_dataset
self._num_examples = len(self.rag_dataset.examples)
if judge_llm is None:
self.judge_llm = OpenAI(temperature=0, model="gpt-4-1106-preview")
else:
assert isinstance(judge_llm, LLM)
self.judge_llm = judge_llm
self.embed_model = embed_model or Settings.embed_model
self.show_progress = show_progress
self.evals = {
"correctness": [],
"relevancy": [],
"faithfulness": [],
"context_similarity": [],
}
self.eval_queue = deque(range(len(rag_dataset.examples)))
self.prediction_dataset = None
if result_path is None:
self.result_path = Path.cwd()
else:
self.result_path = Path(result_path)
if not self.result_path.is_absolute():
self.result_path = Path.cwd() / self.result_path
if not os.path.exists(self.result_path):
os.makedirs(self.result_path)
async def _amake_predictions(
self,
batch_size: int = 20,
sleep_time_in_seconds: int = 1,
):
"""Async make predictions with query engine."""
self.prediction_dataset: BaseLlamaPredictionDataset = (
await self.rag_dataset.amake_predictions_with(
self.query_engine,
show_progress=self.show_progress,
batch_size=batch_size,
sleep_time_in_seconds=sleep_time_in_seconds,
)
)
def _make_predictions(
self,
batch_size: int = 20,
sleep_time_in_seconds: int = 1,
):
"""Sync make predictions with query engine."""
self.prediction_dataset: BaseLlamaPredictionDataset = (
self.rag_dataset.make_predictions_with(
self.query_engine,
show_progress=self.show_progress,
batch_size=batch_size,
sleep_time_in_seconds=sleep_time_in_seconds,
)
)
def _prepare_judges(self):
"""Construct the evaluators."""
judges = {}
judges["correctness"] = CorrectnessEvaluator(
llm=self.judge_llm,
)
judges["relevancy"] = RelevancyEvaluator(
llm=self.judge_llm,
)
judges["faithfulness"] = FaithfulnessEvaluator(
llm=self.judge_llm,
)
judges["semantic_similarity"] = SemanticSimilarityEvaluator(
embed_model=self.embed_model
)
return judges
async def _areturn_null_eval_result(self, query) -> EvaluationResult:
"""
A dummy async method that returns None.
NOTE: this is used to handle case when creating async tasks for evaluating
predictions where contexts do not exist.
"""
return EvaluationResult(
query=query,
)
def _return_null_eval_result(self, query) -> EvaluationResult:
"""
A dummy async method that returns None.
NOTE: this is used to handle case when creating async tasks for evaluating
predictions where contexts do not exist.
"""
return EvaluationResult(
query=query,
)
def _create_async_evaluate_example_prediction_tasks(
self, judges, example, prediction, sleep_time_in_seconds
):
"""Collect the co-routines."""
correctness_task = judges["correctness"].aevaluate(
query=example.query,
response=prediction.response,
reference=example.reference_answer,
sleep_time_in_seconds=sleep_time_in_seconds,
)
relevancy_task = judges["relevancy"].aevaluate(
query=example.query,
response=prediction.response,
contexts=prediction.contexts,
sleep_time_in_seconds=sleep_time_in_seconds,
)
faithfulness_task = judges["faithfulness"].aevaluate(
query=example.query,
response=prediction.response,
contexts=prediction.contexts,
sleep_time_in_seconds=sleep_time_in_seconds,
)
if example.reference_contexts and prediction.contexts:
semantic_similarity_task = judges["semantic_similarity"].aevaluate(
query=example.query,
response="\n".join(prediction.contexts),
reference="\n".join(example.reference_contexts),
)
else:
semantic_similarity_task = self._areturn_null_eval_result(
query=example.query
)
return (
correctness_task,
relevancy_task,
faithfulness_task,
semantic_similarity_task,
)
def _evaluate_example_prediction(self, judges, example, prediction):
"""Collect the co-routines."""
correctness_result = judges["correctness"].evaluate(
query=example.query,
response=prediction.response,
reference=example.reference_answer,
)
relevancy_result = judges["relevancy"].evaluate(
query=example.query,
response=prediction.response,
contexts=prediction.contexts,
)
faithfulness_result = judges["faithfulness"].evaluate(
query=example.query,
response=prediction.response,
contexts=prediction.contexts,
)
if example.reference_contexts and prediction.contexts:
semantic_similarity_result = judges["semantic_similarity"].evaluate(
query=example.query,
response="\n".join(prediction.contexts),
reference="\n".join(example.reference_contexts),
)
else:
semantic_similarity_result = self._return_null_eval_result(
query=example.query
)
return (
correctness_result,
relevancy_result,
faithfulness_result,
semantic_similarity_result,
)
def _save_evaluations(self):
"""Save evaluation json object."""
# saving evaluations
evaluations_objects = {
"context_similarity": [e.dict() for e in self.evals["context_similarity"]],
"correctness": [e.dict() for e in self.evals["correctness"]],
"faithfulness": [e.dict() for e in self.evals["faithfulness"]],
"relevancy": [e.dict() for e in self.evals["relevancy"]],
}
with open(
os.path.join(self.result_path, "_evaluations.json"), "w"
) as json_file:
json.dump(evaluations_objects, json_file)
def _prepare_and_save_benchmark_results(self):
"""Get mean score across all of the evaluated examples-predictions."""
_, mean_correctness_df = get_eval_results_df(
["base_rag"] * len(self.evals["correctness"]),
self.evals["correctness"],
metric="correctness",
)
_, mean_relevancy_df = get_eval_results_df(
["base_rag"] * len(self.evals["relevancy"]),
self.evals["relevancy"],
metric="relevancy",
)
_, mean_faithfulness_df = get_eval_results_df(
["base_rag"] * len(self.evals["faithfulness"]),
self.evals["faithfulness"],
metric="faithfulness",
)
_, mean_context_similarity_df = get_eval_results_df(
["base_rag"] * len(self.evals["context_similarity"]),
self.evals["context_similarity"],
metric="context_similarity",
)
mean_scores_df = pd.concat(
[
mean_correctness_df.reset_index(),
mean_relevancy_df.reset_index(),
mean_faithfulness_df.reset_index(),
mean_context_similarity_df.reset_index(),
],
axis=0,
ignore_index=True,
)
mean_scores_df = mean_scores_df.set_index("index")
mean_scores_df.index = mean_scores_df.index.set_names(["metrics"])
# save mean_scores_df
mean_scores_df.to_csv(os.path.join(self.result_path, "benchmark.csv"))
return mean_scores_df
def _make_evaluations(
self,
batch_size,
sleep_time_in_seconds,
):
"""Sync make evaluations."""
judges = self._prepare_judges()
start_ix = self.eval_queue[0]
for batch in self._batch_examples_and_preds(
self.rag_dataset.examples,
self.prediction_dataset.predictions,
batch_size=batch_size,
start_position=start_ix,
):
examples, predictions = batch
for example, prediction in tqdm.tqdm(zip(examples, predictions)):
(
correctness_result,
relevancy_result,
faithfulness_result,
semantic_similarity_result,
) = self._evaluate_example_prediction(
judges=judges, example=example, prediction=prediction
)
self.evals["correctness"].append(correctness_result)
self.evals["relevancy"].append(relevancy_result)
self.evals["faithfulness"].append(faithfulness_result)
self.evals["context_similarity"].append(semantic_similarity_result)
time.sleep(sleep_time_in_seconds)
self._save_evaluations()
return self._prepare_and_save_benchmark_results()
def _batch_examples_and_preds(
self,
examples: List[Any],
predictions: List[Any],
batch_size: int = 10,
start_position: int = 0,
):
"""Batches examples and predictions with a given batch_size."""
assert self._num_examples == len(predictions)
for ndx in range(start_position, self._num_examples, batch_size):
yield (
examples[ndx : min(ndx + batch_size, self._num_examples)],
predictions[ndx : min(ndx + batch_size, self._num_examples)],
)
async def _amake_evaluations(self, batch_size, sleep_time_in_seconds):
"""Async make evaluations."""
judges = self._prepare_judges()
ix = self.eval_queue[0]
batch_iterator = self._batch_examples_and_preds(
self.rag_dataset.examples,
self.prediction_dataset.predictions,
batch_size=batch_size,
start_position=ix,
)
total_batches = (self._num_examples - ix + 1) / batch_size + (
(self._num_examples - ix + 1) % batch_size != 0
)
if self.show_progress:
batch_iterator = tqdm_asyncio(
batch_iterator,
desc="Batch processing of evaluations",
total=total_batches,
)
for batch in batch_iterator:
examples, predictions = batch
tasks = []
for example, prediction in zip(examples, predictions):
(
correctness_task,
relevancy_task,
faithfulness_task,
semantic_similarity_task,
) = self._create_async_evaluate_example_prediction_tasks(
judges=judges,
example=example,
prediction=prediction,
sleep_time_in_seconds=sleep_time_in_seconds,
)
tasks += [
correctness_task,
relevancy_task,
faithfulness_task,
semantic_similarity_task,
]
# do this in batches to avoid RateLimitError
try:
eval_results: List[EvaluationResult] = await asyncio.gather(*tasks)
except RateLimitError as err:
if self.show_progress:
batch_iterator.close()
raise ValueError(
"You've hit rate limits on your OpenAI subscription. This"
" `RagEvaluatorPack` maintains state of evaluations. Simply"
" re-invoke .arun() in order to continue from where you left"
" off."
) from err
# store in memory
# since final result of eval_results respects order of inputs
# just take appropriate slices
self.evals["correctness"] += eval_results[::4]
self.evals["relevancy"] += eval_results[1::4]
self.evals["faithfulness"] += eval_results[2::4]
self.evals["context_similarity"] += eval_results[3::4]
# update queue
for _ in range(batch_size):
if self.eval_queue:
self.eval_queue.popleft()
ix += 1
if self.show_progress:
batch_iterator.update()
batch_iterator.refresh()
self._save_evaluations()
return self._prepare_and_save_benchmark_results()
def run(self, batch_size: int = 10, sleep_time_in_seconds: int = 1):
if batch_size > 10:
warnings.warn(
"You've set a large batch_size (>10). If using OpenAI GPT-4 as "
" `judge_llm` (which is the default judge_llm),"
" you may experience a RateLimitError. Previous successful eval "
" responses are cached per batch. So hitting a RateLimitError"
" would mean you'd lose all of the current batches successful "
" GPT-4 calls."
)
if self.prediction_dataset is None:
self._make_predictions(batch_size, sleep_time_in_seconds)
# evaluate predictions
eval_sleep_time_in_seconds = (
sleep_time_in_seconds * 2
) # since we make 3 evaluator llm calls
eval_batch_size = int(max(batch_size / 4, 1))
return self._make_evaluations(
batch_size=eval_batch_size, sleep_time_in_seconds=eval_sleep_time_in_seconds
)
async def arun(
self,
batch_size: int = 10,
sleep_time_in_seconds: int = 1,
):
if batch_size > 10:
warnings.warn(
"You've set a large batch_size (>10). If using OpenAI GPT-4 as "
" `judge_llm` (which is the default judge_llm),"
" you may experience a RateLimitError. Previous successful eval "
" responses are cached per batch. So hitting a RateLimitError"
" would mean you'd lose all of the current batches successful "
" GPT-4 calls."
)
# make predictions
if self.prediction_dataset is None:
await self._amake_predictions(batch_size, sleep_time_in_seconds)
# evaluate predictions
eval_sleep_time_in_seconds = (
sleep_time_in_seconds * 2
) # since we make 3 evaluator llm calls and default is gpt-4
# which is heavily rate-limited
eval_batch_size = int(max(batch_size / 4, 1))
return await self._amake_evaluations(
batch_size=eval_batch_size, sleep_time_in_seconds=eval_sleep_time_in_seconds
)
| RagEvaluatorPack |
python | ansible__ansible | test/lib/ansible_test/_internal/diff.py | {
"start": 391,
"end": 1004
} | class ____:
"""Parsed diff for a single file."""
def __init__(self, old_path: str, new_path: str) -> None:
self.old = DiffSide(old_path, new=False)
self.new = DiffSide(new_path, new=True)
self.headers: list[str] = []
self.binary = False
def append_header(self, line: str) -> None:
"""Append the given line to the list of headers for this file."""
self.headers.append(line)
@property
def is_complete(self) -> bool:
"""True if the diff is complete, otherwise False."""
return self.old.is_complete and self.new.is_complete
| FileDiff |
python | allegroai__clearml | clearml/backend_api/services/v2_20/projects.py | {
"start": 41858,
"end": 42719
} | class ____(Response):
"""
Response of projects.create endpoint.
:param id: Project id
:type id: str
"""
_service = "projects"
_action = "create"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {"id": {"description": "Project id", "type": ["string", "null"]}},
"type": "object",
}
def __init__(self, id: Optional[str] = None, **kwargs: Any) -> None:
super(CreateResponse, self).__init__(**kwargs)
self.id = id
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
| CreateResponse |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictReadOnly2.py | {
"start": 3764,
"end": 3894
} | class ____(TypedDict):
x: NotRequired[Never]
y: ReadOnly[int]
def update_a(a: TD18, b: TD19) -> None:
a.update(b)
| TD19 |
python | falconry__falcon | falcon/media/handlers.py | {
"start": 917,
"end": 1652
} | class ____(BinaryBaseHandlerWS):
"""Placeholder handler that always raises an error.
This handler is used by the framework for media types that require an
external dependency that can not be found.
"""
def __init__(self, handler: str, library: str) -> None:
self._msg = ('The {} requires the {} library, which is not installed.').format(
handler, library
)
def _raise(self, *args: Any, **kwargs: Any) -> NoReturn:
raise RuntimeError(self._msg)
# TODO(kgriffs): Add support for async later if needed.
serialize = deserialize = _raise
_ResolverMethodReturnTuple = tuple[
BaseHandler, Optional[SerializeSync], Optional[DeserializeSync]
]
| MissingDependencyHandler |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/errors.py | {
"start": 12901,
"end": 13152
} | class ____(DagsterUserCodeExecutionError):
"""Indicates an error in the op type system at runtime. E.g. a op receives an
unexpected input, or produces an output that does not match the type of the output definition.
"""
| DagsterTypeCheckError |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol4.py | {
"start": 506,
"end": 648
} | class ____:
x: int
# This should generate an error because x is not a ClassVar in B
# but is a ClassVar in the protocol.
b: ProtoB = B()
| B |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_types.py | {
"start": 7265,
"end": 8875
} | class ____(_LiteralRoundTripFixture, fixtures.TablesTest):
"""Add ARRAY test suite, #8138.
This only works on PostgreSQL right now.
"""
__requires__ = ("array_type",)
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"array_table",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("single_dim", ARRAY(Integer)),
Column("multi_dim", ARRAY(String, dimensions=2)),
)
def test_array_roundtrip(self, connection):
array_table = self.tables.array_table
connection.execute(
array_table.insert(),
{
"id": 1,
"single_dim": [1, 2, 3],
"multi_dim": [["one", "two"], ["thr'ee", "réve🐍 illé"]],
},
)
row = connection.execute(
select(array_table.c.single_dim, array_table.c.multi_dim)
).first()
eq_(row, ([1, 2, 3], [["one", "two"], ["thr'ee", "réve🐍 illé"]]))
def test_literal_simple(self, literal_round_trip):
literal_round_trip(
ARRAY(Integer),
([1, 2, 3],),
([1, 2, 3],),
support_whereclause=False,
)
def test_literal_complex(self, literal_round_trip):
literal_round_trip(
ARRAY(String, dimensions=2),
([["one", "two"], ["thr'ee", "réve🐍 illé"]],),
([["one", "two"], ["thr'ee", "réve🐍 illé"]],),
support_whereclause=False,
)
| ArrayTest |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 135952,
"end": 136292
} | class ____(Response):
"""
Response of events.multi_task_scalar_metrics_iter_histogram endpoint.
"""
_service = "events"
_action = "multi_task_scalar_metrics_iter_histogram"
_version = "2.20"
_schema = {"additionalProperties": True, "definitions": {}, "type": "object"}
| MultiTaskScalarMetricsIterHistogramResponse |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/shopify_graphql/bulk/query.py | {
"start": 125859,
"end": 130981
} | class ____(DeliveryZoneList):
"""
query DeliveryZoneList {
deliveryProfiles(
first: 1
) {
pageInfo {
hasNextPage
endCursor
}
nodes {
profileLocationGroups(
locationGroupId: "<locationGroupId>"
) {
locationGroupZones(
first: 100
) {
nodes {
zone {
id
name
countries {
id
name
translatedName
code {
countryCode
restOfWorld
}
provinces {
id
translatedName
name
code
}
}
}
}
pageInfo {
hasNextPage
endCursor
}
}
}
}
}
}
"""
page_size = 1
sub_page_size = 100
def __init__(self, location_group_id: str, location_group_zones_cursor: str = None):
self.location_group_id = location_group_id
self.location_group_zones_cursor = location_group_zones_cursor
@property
def query_nodes(self) -> Optional[Union[List[Field], List[str]]]:
location_group_id = '"' + self.location_group_id + '"'
location_group_zones_arguments = [Argument(name="first", value=self.sub_page_size)]
if self.location_group_zones_cursor:
cursor = '"' + self.location_group_zones_cursor + '"'
location_group_zones_arguments.append(Argument(name="after", value=cursor))
query_nodes: List[Field] = [
Field(name="pageInfo", fields=["hasNextPage", "endCursor"]),
Field(
name="nodes",
fields=[
Field(
name="profileLocationGroups",
arguments=[Argument(name="locationGroupId", value=location_group_id)],
fields=[
Field(
name="locationGroupZones",
arguments=location_group_zones_arguments,
fields=[
Field(
name="nodes",
fields=[
Field(
name="zone",
fields=[
"id",
"name",
Field(
name="countries",
fields=[
"id",
"name",
Field(name="translatedName", alias="translated_name"),
Field(
name="code",
fields=[
Field(name="countryCode", alias="country_code"),
Field(name="restOfWorld", alias="rest_of_world"),
],
),
Field(
name="provinces",
fields=[
"id",
"name",
"code",
Field(name="translatedName", alias="translated_name"),
],
),
],
),
],
)
],
),
Field(name="pageInfo", fields=["hasNextPage", "endCursor"]),
],
),
],
),
],
),
]
return query_nodes
| DeliveryProfile |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 1646,
"end": 1795
} | class ____(ShowFieldTypeAndContent, PolymorphicModel):
field1 = models.CharField(max_length=30)
m2m = models.ManyToManyField("self")
| ModelShow3 |
python | keras-team__keras | keras/src/utils/progbar.py | {
"start": 188,
"end": 10354
} | class ____:
"""Displays a progress bar.
Args:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that should *not*
be averaged over time. Metrics in this list will be displayed as-is.
All others will be averaged by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
unit_name: Display name for step counts (usually "step" or "sample").
"""
def __init__(
self,
target,
width=20,
verbose=1,
interval=0.05,
stateful_metrics=None,
unit_name="step",
):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self.unit_name = unit_name
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = (
(hasattr(sys.stdout, "isatty") and sys.stdout.isatty())
or "ipykernel" in sys.modules
or "posix" in sys.modules
or "PYCHARM_HOSTED" in os.environ
)
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
self._time_at_epoch_start = self._start
self._time_after_first_step = None
self._prev_total_width = 0
def update(self, current, values=None, finalize=None):
"""Updates the progress bar.
Args:
current: Index of current step.
values: List of tuples: `(name, value_for_last_step)`. If `name` is
in `stateful_metrics`, `value_for_last_step` will be displayed
as-is. Else, an average of the metric over time will be
displayed.
finalize: Whether this is the last update for the progress bar. If
`None`, defaults to `current >= self.target`.
"""
if finalize is None:
if self.target is None:
finalize = False
else:
finalize = current >= self.target
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
# In the case that progress bar doesn't have a target value in
# the first epoch, both on_batch_end and on_epoch_end will be
# called, which will cause 'current' and 'self._seen_so_far' to
# have the same value. Force the minimal value to 1 here,
# otherwise stateful_metric will be 0s.
if finalize:
self._values[k] = [v, 1]
else:
value_base = max(current - self._seen_so_far, 1)
if k not in self._values:
self._values[k] = [v * value_base, value_base]
else:
self._values[k][0] += v * value_base
self._values[k][1] += value_base
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
message = ""
special_char_len = 0
now = time.time()
time_per_unit = self._estimate_step_duration(current, now)
if self.verbose == 1:
if now - self._last_update < self.interval and not finalize:
return
if self._dynamic_display:
message += "\b" * self._prev_total_width
message += "\r"
else:
message += "\n"
if self.target is not None:
numdigits = int(math.log10(self.target)) + 1
bar = (f"%{numdigits}d/%d") % (current, self.target)
bar = f"\x1b[1m{bar}\x1b[0m "
special_char_len += 8
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += f"\33[32m{'━' * prog_width}\x1b[0m"
special_char_len += 9
bar += f"\33[37m{'━' * (self.width - prog_width)}\x1b[0m"
special_char_len += 9
else:
bar = "%7d/Unknown" % current
message += bar
# Add ETA if applicable
if self.target is not None and not finalize:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = "%d:%02d:%02d" % (
eta // 3600,
(eta % 3600) // 60,
eta % 60,
)
elif eta > 60:
eta_format = "%d:%02d" % (eta // 60, eta % 60)
else:
eta_format = "%ds" % eta
info = f" \x1b[1m{eta_format}\x1b[0m"
else:
# Time elapsed since start, in seconds
info = f" \x1b[1m{now - self._start:.0f}s\x1b[0m"
special_char_len += 8
# Add time/step
info += self._format_time(time_per_unit, self.unit_name)
# Add metrics
for k in self._values_order:
info += f" - {k}:"
if isinstance(self._values[k], list):
values, count = self._values[k]
if not isinstance(values, float):
values = np.mean(values)
avg = values / max(1, count)
if abs(avg) > 1e-3:
info += f" {avg:.4f}"
else:
info += f" {avg:.4e}"
else:
info += f" {self._values[k]}"
message += info
total_width = len(bar) + len(info) - special_char_len
if self._prev_total_width > total_width:
message += " " * (self._prev_total_width - total_width)
if finalize:
message += "\n"
io_utils.print_msg(message, line_break=False)
self._prev_total_width = total_width
message = ""
elif self.verbose == 2:
if finalize:
numdigits = int(math.log10(self.target)) + 1
count = f"%{numdigits}d/%d" % (current, self.target)
info = f"{count} - {now - self._start:.0f}s"
info += f" -{self._format_time(time_per_unit, self.unit_name)}"
for k in self._values_order:
info += f" - {k}:"
values, count = self._values[k]
if not isinstance(values, float):
values = np.mean(values)
avg = values / max(1, count)
if avg > 1e-3:
info += f" {avg:.4f}"
else:
info += f" {avg:.4e}"
info += "\n"
message += info
io_utils.print_msg(message, line_break=False)
message = ""
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def _format_time(self, time_per_unit, unit_name):
"""format a given duration to display to the user.
Given the duration, this function formats it in either milliseconds
or seconds and displays the unit (i.e. ms/step or s/epoch).
Args:
time_per_unit: the duration to display
unit_name: the name of the unit to display
Returns:
A string with the correctly formatted duration and units
"""
formatted = ""
if time_per_unit >= 1 or time_per_unit == 0:
formatted += f" {time_per_unit:.0f}s/{unit_name}"
elif time_per_unit >= 1e-3:
formatted += f" {time_per_unit * 1000.0:.0f}ms/{unit_name}"
else:
formatted += f" {time_per_unit * 1000000.0:.0f}us/{unit_name}"
return formatted
def _estimate_step_duration(self, current, now):
"""Estimate the duration of a single step.
Given the step number `current` and the corresponding time `now` this
function returns an estimate for how long a single step takes. If this
is called before one step has been completed (i.e. `current == 0`) then
zero is given as an estimate. The duration estimate ignores the duration
of the (assumed to be non-representative) first step for estimates when
more steps are available (i.e. `current>1`).
Args:
current: Index of current step.
now: The current time.
Returns: Estimate of the duration of a single step.
"""
if current:
# there are a few special scenarios here:
# 1) somebody is calling the progress bar without ever supplying
# step 1
# 2) somebody is calling the progress bar and supplies step one
# multiple times, e.g. as part of a finalizing call
# in these cases, we just fall back to the simple calculation
if self._time_after_first_step is not None and current > 1:
time_per_unit = (now - self._time_after_first_step) / (
current - 1
)
else:
time_per_unit = (now - self._start) / current
if current == 1:
self._time_after_first_step = now
return time_per_unit
else:
return 0
| Progbar |
python | apache__airflow | airflow-core/src/airflow/ti_deps/deps/prev_dagrun_dep.py | {
"start": 1689,
"end": 8878
} | class ____(BaseTIDep):
"""
Is the past dagrun in a state that allows this task instance to run.
For example, did this task instance's task in the previous dagrun complete
if we are depending on past?
"""
NAME = "Previous Dagrun State"
IGNORABLE = True
IS_TASK_DEP = True
@staticmethod
def _push_past_deps_met_xcom_if_needed(ti: TI, dep_context):
if dep_context.wait_for_past_depends_before_skipping:
ti.xcom_push(key=PAST_DEPENDS_MET, value=True)
@staticmethod
def _has_tis(dagrun: DagRun, task_id: str, *, session: Session) -> bool:
"""
Check if a task has presence in the specified DAG run.
This function exists for easy mocking in tests.
"""
return exists_query(
TI.dag_id == dagrun.dag_id,
TI.task_id == task_id,
TI.run_id == dagrun.run_id,
session=session,
)
@staticmethod
def _has_any_prior_tis(ti: TI, *, session: Session) -> bool:
"""
Check if a task has ever been run before.
This function exists for easy mocking in tests.
"""
query = exists_query(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.logical_date < ti.logical_date,
session=session,
)
return query
@staticmethod
def _count_unsuccessful_tis(dagrun: DagRun, task_id: str, *, session: Session) -> int:
"""
Get a count of unsuccessful task instances in a given run.
Due to historical design considerations, "unsuccessful" here means the
task instance is not in either SUCCESS or SKIPPED state. This means that
unfinished states such as RUNNING are considered unsuccessful.
This function exists for easy mocking in tests.
"""
unsuccessful_tis_count = session.scalar(
select(func.count()).where(
TI.dag_id == dagrun.dag_id,
TI.task_id == task_id,
TI.run_id == dagrun.run_id,
or_(TI.state.is_(None), TI.state.not_in(_SUCCESSFUL_STATES)),
)
)
return 0 if unsuccessful_tis_count is None else unsuccessful_tis_count
@staticmethod
def _has_unsuccessful_dependants(
dagrun: DagRun,
task: SdkOperator | SchedulerOperator,
*,
session: Session,
) -> bool:
"""
Check if any of the task's dependants are unsuccessful in a given run.
Due to historical design considerations, "unsuccessful" here means the
task instance is not in either SUCCESS or SKIPPED state. This means that
unfinished states such as RUNNING are considered unsuccessful.
This function exists for easy mocking in tests.
"""
if not task.downstream_task_ids:
return False
return exists_query(
TI.dag_id == dagrun.dag_id,
TI.task_id.in_(task.downstream_task_ids),
TI.run_id == dagrun.run_id,
or_(TI.state.is_(None), TI.state.not_in(_SUCCESSFUL_STATES)),
session=session,
)
@provide_session
def _get_dep_statuses(self, ti: TI, session: Session, dep_context):
if TYPE_CHECKING:
assert ti.task
if dep_context.ignore_depends_on_past:
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
reason = "The context specified that the state of past DAGs could be ignored."
yield self._passing_status(reason=reason)
return
if not ti.task.depends_on_past:
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
yield self._passing_status(reason="The task did not have depends_on_past set.")
return
dr = ti.get_dagrun(session=session)
if dr.backfill_id:
sort_ordinal = session.scalar(
select(BackfillDagRun.sort_ordinal).where(
BackfillDagRun.backfill_id == dr.backfill_id,
BackfillDagRun.dag_run_id == dr.id,
)
)
if sort_ordinal == 1:
yield self._passing_status(reason="Task instance is first run in a backfill.")
return
if not dr:
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
yield self._passing_status(reason="This task instance does not belong to a DAG.")
return
# Don't depend on the previous task instance if we are the first task.
catchup = ti.task.dag and ti.task.dag.catchup
if catchup:
last_dagrun = DagRun.get_previous_scheduled_dagrun(dr.id, session)
else:
last_dagrun = DagRun.get_previous_dagrun(dr, session=session)
# First ever run for this DAG.
if not last_dagrun:
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
yield self._passing_status(reason="This task instance was the first task instance for its task.")
return
# There was a DAG run, but the task wasn't active back then.
if (
catchup
and ti.task.start_date is not None
and last_dagrun.logical_date is not None
and last_dagrun.logical_date < ti.task.start_date
):
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
yield self._passing_status(reason="This task instance was the first task instance for its task.")
return
if not self._has_tis(last_dagrun, ti.task_id, session=session):
if ti.task.ignore_first_depends_on_past:
if not self._has_any_prior_tis(ti, session=session):
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
yield self._passing_status(
reason="ignore_first_depends_on_past is true for this task "
"and it is the first task instance for its task."
)
return
yield self._failing_status(
reason="depends_on_past is true for this task's DAG, but the previous "
"task instance has not run yet."
)
return
unsuccessful_tis_count = self._count_unsuccessful_tis(last_dagrun, ti.task_id, session=session)
if unsuccessful_tis_count > 0:
reason = (
f"depends_on_past is true for this task, but {unsuccessful_tis_count} "
f"previous task instance(s) are not in a successful state."
)
yield self._failing_status(reason=reason)
return
if ti.task.wait_for_downstream and self._has_unsuccessful_dependants(
last_dagrun, ti.task, session=session
):
yield self._failing_status(
reason=(
"The tasks downstream of the previous task instance(s) "
"haven't completed, and wait_for_downstream is True."
)
)
return
self._push_past_deps_met_xcom_if_needed(ti, dep_context)
| PrevDagrunDep |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/utils.py | {
"start": 12075,
"end": 14368
} | class ____:
"""
This is the placeholder for the tmp stream state for each incremental stream,
It's empty, once the sync has started and is being updated while sync operation takes place,
It holds the `temporary stream state values` before they are updated to have the opportunity to reuse this state.
"""
cached_state: Dict = {}
@staticmethod
def stream_state_to_tmp(*args, state_object: Dict = cached_state, **kwargs) -> Dict:
"""
Method to save the current stream state for future re-use within slicing.
The method requires having the temporary `state_object` as placeholder.
Because of the specific of Shopify's entities relations, we have the opportunity to fetch the updates,
for particular stream using the `Incremental Refresh`, inside slicing.
For example:
if `order refund` records were updated, then the `orders` is updated as well.
if 'transaction` was added to the order, then the `orders` is updated as well.
etc.
"""
# Map the input *args, the sequece should be always keeped up to the input function
# change the mapping if needed
stream: object = args[0] # the self instance of the stream
current_stream_state: Dict = kwargs.get("stream_state") or {}
# get the current tmp_state_value
tmp_stream_state_value = state_object.get(stream.name, {}).get(stream.cursor_field, "")
# Save the curent stream value for current sync, if present.
if current_stream_state:
state_object[stream.name] = {stream.cursor_field: current_stream_state.get(stream.cursor_field, "")}
# Check if we have the saved state and keep the minimun value
if tmp_stream_state_value:
state_object[stream.name] = {
stream.cursor_field: min(current_stream_state.get(stream.cursor_field, ""), tmp_stream_state_value)
}
return state_object
def cache_stream_state(func) -> Callable[..., Any]:
@wraps(func)
def decorator(*args, **kwargs) -> Any:
EagerlyCachedStreamState.stream_state_to_tmp(*args, **kwargs)
return func(*args, **kwargs)
return decorator
| EagerlyCachedStreamState |
python | fluentpython__example-code-2e | 21-async/mojifinder/bottle.py | {
"start": 70305,
"end": 70851
} | class ____(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None,
**options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
| HTTPError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1008339,
"end": 1008832
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of UnfollowOrganization"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "organization")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
organization = sgqlc.types.Field("Organization", graphql_name="organization")
"""The organization that was unfollowed."""
| UnfollowOrganizationPayload |
python | python-visualization__folium | folium/raster_layers.py | {
"start": 431,
"end": 5597
} | class ____(Layer):
"""
Create a tile layer to append on a Map.
Parameters
----------
tiles: str or :class:`xyzservices.TileProvider`, default 'OpenStreetMap'
Map tileset to use. Folium has built-in all tilesets
available in the ``xyzservices`` package. For example, you can pass
any of the following to the "tiles" keyword:
- "OpenStreetMap"
- "CartoDB Positron"
- "CartoDB Voyager"
Explore more provider names available in ``xyzservices`` here:
https://leaflet-extras.github.io/leaflet-providers/preview/.
You can also pass a custom tileset by passing a
:class:`xyzservices.TileProvider` or a Leaflet-style
URL to the tiles parameter: ``https://{s}.yourtiles.com/{z}/{x}/{y}.png``.
min_zoom: int, optional, default 0
Minimum allowed zoom level for this tile layer. Filled by xyzservices by default.
max_zoom: int, optional, default 18
Maximum allowed zoom level for this tile layer. Filled by xyzservices by default.
max_native_zoom: int, default None
The highest zoom level at which the tile server can provide tiles.
Filled by xyzservices by default.
By setting max_zoom higher than max_native_zoom, you can zoom in
past max_native_zoom, tiles will be autoscaled.
attr: string, default None
Map tile attribution; only required if passing custom tile URL.
detect_retina: bool, default False
If true and user is on a retina display, it will request four
tiles of half the specified size and a bigger zoom level in place
of one to utilize the high resolution.
name : string, default None
The name of the Layer, as it will appear in LayerControls
overlay : bool, default False
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
show: bool, default True
Whether the layer will be shown on opening.
When adding multiple base layers, use this parameter to select which one
should be shown when opening the map, by not showing the others.
subdomains: list of strings, default ['abc']
Subdomains of the tile service.
tms: bool, default False
If true, inverses Y axis numbering for tiles (turn this on for TMS
services).
opacity: float, default 1
Sets the opacity for the layer.
**kwargs : additional keyword arguments
Other keyword arguments are passed as options to the Leaflet tileLayer
object.
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = L.tileLayer(
{{ this.tiles|tojson }},
{{ this.options|tojavascript }}
);
{% endmacro %}
"""
)
def __init__(
self,
tiles: Union[str, xyzservices.TileProvider] = "OpenStreetMap",
min_zoom: Optional[int] = None,
max_zoom: Optional[int] = None,
max_native_zoom: Optional[int] = None,
attr: Optional[str] = None,
detect_retina: bool = False,
name: Optional[str] = None,
overlay: bool = False,
control: bool = True,
show: bool = True,
no_wrap: bool = False,
subdomains: str = "abc",
tms: bool = False,
opacity: float = 1,
**kwargs,
):
if isinstance(tiles, str):
if tiles.lower() == "openstreetmap":
tiles = "OpenStreetMap Mapnik"
if name is None:
name = "openstreetmap"
try:
tiles = xyzservices.providers.query_name(tiles)
except ValueError:
# no match, likely a custom URL
pass
if isinstance(tiles, xyzservices.TileProvider):
attr = attr if attr else tiles.html_attribution # type: ignore
min_zoom = min_zoom or tiles.get("min_zoom")
max_zoom = max_zoom or tiles.get("max_zoom")
subdomains = tiles.get("subdomains", subdomains)
if name is None:
name = tiles.name.replace(".", "").lower()
tiles = tiles.build_url(fill_subdomain=False, scale_factor="{r}") # type: ignore
self.tile_name = (
name if name is not None else "".join(tiles.lower().strip().split())
)
super().__init__(
name=self.tile_name, overlay=overlay, control=control, show=show
)
self._name = "TileLayer"
self.tiles = tiles
if not attr:
raise ValueError("Custom tiles must have an attribution.")
self.options = remove_empty(
min_zoom=min_zoom or 0,
max_zoom=max_zoom or 18,
max_native_zoom=max_native_zoom or max_zoom or 18,
no_wrap=no_wrap,
attribution=attr,
subdomains=subdomains,
detect_retina=detect_retina,
tms=tms,
opacity=opacity,
**kwargs,
)
| TileLayer |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_events.py | {
"start": 1643,
"end": 4432
} | class ____(
APITransactionTestCase,
SnubaTestCase,
SpanTestCase,
OurLogTestCase,
TraceMetricsTestCase,
ProfileFunctionsTestCase,
):
viewname = "sentry-api-0-organization-events"
referrer = "api.organization-events"
def setUp(self) -> None:
super().setUp()
self.nine_mins_ago = before_now(minutes=9)
self.ten_mins_ago = before_now(minutes=10)
self.ten_mins_ago_iso = self.ten_mins_ago.replace(microsecond=0).isoformat()
self.eleven_mins_ago = before_now(minutes=11)
self.eleven_mins_ago_iso = self.eleven_mins_ago.isoformat()
self.transaction_data = load_data("transaction", timestamp=self.ten_mins_ago)
self.features: dict[str, bool] = {}
def client_get(self, *args, **kwargs):
return self.client.get(*args, **kwargs)
def reverse_url(self):
return reverse(
self.viewname,
kwargs={"organization_id_or_slug": self.organization.slug},
)
def do_request(self, query, features=None, **kwargs):
if features is None:
features = {"organizations:discover-basic": True}
features.update(self.features)
self.login_as(user=self.user)
with self.feature(features):
return self.client_get(self.reverse_url(), query, format="json", **kwargs)
def _setup_user_misery(
self, per_transaction_threshold: bool = False, project: Project | None = None
) -> None:
_project = project or self.project
# If duration is > 300 * 4 then the user is frustrated
# There's a total of 4 users and three of them reach the frustration threshold
events = [
("one", 300),
("two", 300),
("one", 3000), # Frustrated
("two", 3000), # Frustrated
("three", 400),
("four", 4000), # Frustrated
]
for idx, event in enumerate(events):
data = self.load_data(
timestamp=before_now(minutes=(10 + idx)),
duration=timedelta(milliseconds=event[1]),
)
data["event_id"] = f"{idx}" * 32
data["transaction"] = f"/count_miserable/horribilis/{idx}"
data["user"] = {"email": f"{event[0]}@example.com"}
self.store_event(data, project_id=_project.id)
if per_transaction_threshold and idx % 2:
ProjectTransactionThresholdOverride.objects.create(
transaction=f"/count_miserable/horribilis/{idx}",
project=_project,
organization=_project.organization,
threshold=100 * idx,
metric=TransactionMetric.DURATION.value,
)
| OrganizationEventsEndpointTestBase |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF023.py | {
"start": 3700,
"end": 4529
} | class ____(object):
__slots__ = (
# name of descriptor record, also a module global name; a string
'name',
# length of argument, in bytes; an int; UP_TO_NEWLINE and
# TAKEN_FROM_ARGUMENT{1,4,8} are negative values for variable-length
# cases
'n',
# a function taking a file-like object, reading this kind of argument
# from the object at the current position, advancing the current
# position by n bytes, and returning the value of the argument
'reader',
# human-readable docs for this arg descriptor; a string
'doc',
)
####################################
# Should be flagged, but not fixed
####################################
# from cpython/Lib/test/test_inspect.py.
# Multiline dicts are out of scope.
| ArgumentDescriptor |
python | joblib__joblib | joblib/externals/loky/backend/resource_tracker.py | {
"start": 3251,
"end": 15403
} | class ____(_ResourceTracker):
"""Resource tracker with refcounting scheme.
This class is an extension of the multiprocessing ResourceTracker class
which implements a reference counting scheme to avoid unlinking shared
resources still in use in other processes.
This feature is notably used by `joblib.Parallel` to share temporary
folders and memory mapped files between the main process and the worker
processes.
The actual implementation of the refcounting scheme is in the main
function, which is run in a dedicated process.
"""
def maybe_unlink(self, name, rtype):
"""Decrement the refcount of a resource, and delete it if it hits 0"""
self._send("MAYBE_UNLINK", name, rtype)
def ensure_running(self):
"""Make sure that resource tracker process is running.
This can be run from any process. Usually a child process will use
the resource created by its parent.
This function is necessary for backward compatibility with python
versions before 3.13.7.
"""
return self._ensure_running_and_write()
def _teardown_dead_process(self):
# Override this function for compatibility with windows and
# for python version before 3.13.7
# At this point, the resource_tracker process has been killed
# or crashed.
os.close(self._fd)
# Let's remove the process entry from the process table on POSIX system
# to avoid zombie processes.
if os.name == "posix":
try:
# _pid can be None if this process is a child from another
# python process, which has started the resource_tracker.
if self._pid is not None:
os.waitpid(self._pid, 0)
except OSError:
# The resource_tracker has already been terminated.
pass
self._fd = None
self._pid = None
warnings.warn(
"resource_tracker: process died unexpectedly, relaunching. "
"Some folders/semaphores might leak."
)
def _launch(self):
# This is the overridden part of the resource tracker, which launches
# loky's version, which is compatible with windows and allow to track
# folders with external ref counting.
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
# Create a pipe for posix and windows
r, w = os.pipe()
if sys.platform == "win32":
_r = duplicate(msvcrt.get_osfhandle(r), inheritable=True)
os.close(r)
r = _r
cmd = f"from {main.__module__} import main; main({r}, {VERBOSE})"
try:
fds_to_pass.append(r)
# process will out live us, so no need to wait on pid
exe = spawn.get_executable()
args = [exe, *util._args_from_interpreter_flags(), "-c", cmd]
util.debug(f"launching resource tracker: {args}")
# bpo-33613: Register a signal mask that will block the
# signals. This signal mask will be inherited by the child
# that is going to be spawned and will protect the child from a
# race condition that can make the child die before it
# registers signal handlers for SIGINT and SIGTERM. The mask is
# unregistered after spawning the child.
try:
if _HAVE_SIGMASK:
signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS)
pid = spawnv_passfds(exe, args, fds_to_pass)
finally:
if _HAVE_SIGMASK:
signal.pthread_sigmask(
signal.SIG_UNBLOCK, _IGNORED_SIGNALS
)
except BaseException:
os.close(w)
raise
else:
self._fd = w
self._pid = pid
finally:
if sys.platform == "win32":
_winapi.CloseHandle(r)
else:
os.close(r)
def _ensure_running_and_write(self, msg=None):
"""Make sure that resource tracker process is running.
This can be run from any process. Usually a child process will use
the resource created by its parent.
This function is added for compatibility with python version before 3.13.7.
"""
with self._lock:
if (
self._fd is not None
): # resource tracker was launched before, is it still running?
if msg is None:
to_send = b"PROBE:0:noop\n"
else:
to_send = msg
try:
self._write(to_send)
except OSError:
self._teardown_dead_process()
self._launch()
msg = None # message was sent in probe
else:
self._launch()
if msg is not None:
self._write(msg)
def _write(self, msg):
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg), f"{nbytes=} != {len(msg)=}"
def __del__(self):
# ignore error due to trying to clean up child process which has already been
# shutdown on windows. See https://github.com/joblib/loky/pull/450
# This is only required if __del__ is defined
if not hasattr(_ResourceTracker, "__del__"):
return
try:
super().__del__()
except ChildProcessError:
pass
_resource_tracker = ResourceTracker()
ensure_running = _resource_tracker.ensure_running
register = _resource_tracker.register
maybe_unlink = _resource_tracker.maybe_unlink
unregister = _resource_tracker.unregister
getfd = _resource_tracker.getfd
def main(fd, verbose=0):
"""Run resource tracker."""
if verbose:
util.log_to_stderr(level=util.DEBUG)
# protect the process from ^C and "killall python" etc
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
if _HAVE_SIGMASK:
signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
for f in (sys.stdin, sys.stdout):
try:
f.close()
except Exception:
pass
if verbose:
util.debug("Main resource tracker is running")
registry = {rtype: {} for rtype in _CLEANUP_FUNCS.keys()}
try:
if sys.platform == "win32":
fd = msvcrt.open_osfhandle(fd, os.O_RDONLY)
# keep track of registered/unregistered resources
with open(fd, "rb") as f:
for line in f:
try:
splitted = line.strip().decode("ascii").split(":")
# name can potentially contain separator symbols (for
# instance folders on Windows)
cmd, name, rtype = (
splitted[0],
":".join(splitted[1:-1]),
splitted[-1],
)
if rtype not in _CLEANUP_FUNCS:
raise ValueError(
f"Cannot register {name} for automatic cleanup: "
f"unknown resource type ({rtype}). Resource type "
"should be one of the following: "
f"{list(_CLEANUP_FUNCS.keys())}"
)
if cmd == "PROBE":
pass
elif cmd == "REGISTER":
if name not in registry[rtype]:
registry[rtype][name] = 1
else:
registry[rtype][name] += 1
if verbose:
util.debug(
"[ResourceTracker] incremented refcount of "
f"{rtype} {name} "
f"(current {registry[rtype][name]})"
)
elif cmd == "UNREGISTER":
del registry[rtype][name]
if verbose:
util.debug(
f"[ResourceTracker] unregister {name} {rtype}: "
f"registry({len(registry)})"
)
elif cmd == "MAYBE_UNLINK":
registry[rtype][name] -= 1
if verbose:
util.debug(
"[ResourceTracker] decremented refcount of "
f"{rtype} {name} "
f"(current {registry[rtype][name]})"
)
if registry[rtype][name] == 0:
del registry[rtype][name]
try:
if verbose:
util.debug(
f"[ResourceTracker] unlink {name}"
)
_CLEANUP_FUNCS[rtype](name)
except Exception as e:
warnings.warn(
f"resource_tracker: {name}: {e!r}"
)
else:
raise RuntimeError(f"unrecognized command {cmd!r}")
except BaseException:
try:
sys.excepthook(*sys.exc_info())
except BaseException:
pass
finally:
# all processes have terminated; cleanup any remaining resources
def _unlink_resources(rtype_registry, rtype):
if rtype_registry:
try:
warnings.warn(
"resource_tracker: There appear to be "
f"{len(rtype_registry)} leaked {rtype} objects to "
"clean up at shutdown"
)
except Exception:
pass
for name in rtype_registry:
# For some reason the process which created and registered this
# resource has failed to unregister it. Presumably it has
# died. We therefore clean it up.
try:
_CLEANUP_FUNCS[rtype](name)
if verbose:
util.debug(f"[ResourceTracker] unlink {name}")
except Exception as e:
warnings.warn(f"resource_tracker: {name}: {e!r}")
for rtype, rtype_registry in registry.items():
if rtype == "folder":
continue
else:
_unlink_resources(rtype_registry, rtype)
# The default cleanup routine for folders deletes everything inside
# those folders recursively, which can include other resources tracked
# by the resource tracker). To limit the risk of the resource tracker
# attempting to delete twice a resource (once as part of a tracked
# folder, and once as a resource), we delete the folders after all
# other resource types.
if "folder" in registry:
_unlink_resources(registry["folder"], "folder")
if verbose:
util.debug("resource tracker shut down")
def spawnv_passfds(path, args, passfds):
if sys.platform != "win32":
args = [arg.encode("utf-8") for arg in args]
path = path.encode("utf-8")
return util.spawnv_passfds(path, args, passfds)
else:
passfds = sorted(passfds)
cmd = " ".join(f'"{x}"' for x in args)
try:
_, ht, pid, _ = _winapi.CreateProcess(
path, cmd, None, None, True, 0, None, None, None
)
_winapi.CloseHandle(ht)
except BaseException:
pass
return pid
| ResourceTracker |
python | getsentry__sentry | src/sentry/roles/manager.py | {
"start": 2061,
"end": 3882
} | class ____(Generic[R]):
"""Represent the set of all roles at one level (org or team)."""
def __init__(self, roles: Iterable[R], default_id: str | None = None) -> None:
self._priority_seq = tuple(sorted(roles, key=lambda r: r.priority))
self._id_map = {r.id: r for r in self._priority_seq}
self._choices = tuple((r.id, r.name) for r in self._priority_seq)
self._descriptions = tuple((r.id, r.desc) for r in self._priority_seq)
self._default = self._id_map[default_id] if default_id else self._priority_seq[0]
self._top_dog = self._priority_seq[-1]
def __iter__(self) -> Iterable[R]:
yield from self._priority_seq
def can_manage(self, role: str, other: str) -> bool:
return self.get(role).priority >= self.get(other).priority
def get(self, id: str) -> R:
return self._id_map[id]
def get_all(self) -> Sequence[R]:
return self._priority_seq
def get_choices(self) -> Sequence[tuple[str, str]]:
return self._choices
def get_descriptions(self) -> Sequence[tuple[str, str]]:
return self._descriptions
def get_default(self) -> R:
return self._default
def get_top_dog(self) -> R:
return self._top_dog
def with_scope(self, scope: str) -> Iterable[R]:
for role in self.get_all():
if role.has_scope(scope):
yield role
def with_any_scope(self, scopes: Iterable[str]) -> Iterable[R]:
for role in self.get_all():
if any(role.has_scope(scope) for scope in scopes):
yield role
def get_sorted_roles(self, roles: Iterable[str]) -> list[R]:
return sorted(
[self.get(role) for role in roles],
key=lambda r: r.priority,
reverse=True,
)
| RoleLevel |
python | python-openxml__python-docx | src/docx/shared.py | {
"start": 12517,
"end": 13506
} | class ____:
"""Accepts `str` fragments and joins them together, in order, on `.pop().
Handy when text in a stream is broken up arbitrarily and you want to join it back
together within certain bounds. The optional `separator` argument determines how
the text fragments are punctuated, defaulting to the empty string.
"""
def __init__(self, separator: str = ""):
self._separator = separator
self._texts: List[str] = []
def push(self, text: str) -> None:
"""Add a text fragment to the accumulator."""
self._texts.append(text)
def pop(self) -> Iterator[str]:
"""Generate sero-or-one str from those accumulated.
Using `yield from accum.pop()` in a generator setting avoids producing an empty
string when no text is in the accumulator.
"""
if not self._texts:
return
text = self._separator.join(self._texts)
self._texts.clear()
yield text
| TextAccumulator |
python | huggingface__transformers | src/transformers/models/glm4v_moe/modular_glm4v_moe.py | {
"start": 21860,
"end": 22369
} | class ____(Glm4MoePreTrainedModel):
config: Glm4vMoeConfig
base_model_prefix = "model"
input_modalities = ("text", "image", "video")
_no_split_modules = ["Glm4vMoeTextDecoderLayer", "Glm4vMoeVisionBlock"]
_skip_keys_device_placement = "past_key_values"
_can_record_outputs = {
"hidden_states": Glm4vMoeTextDecoderLayer,
"attentions": Glm4vMoeTextAttention,
"router_logits": OutputRecorder(nn.Linear, layer_name="mlp.gate", index=0),
}
| Glm4vMoePreTrainedModel |
python | kamyu104__LeetCode-Solutions | Python/find-the-winner-of-the-circular-game.py | {
"start": 50,
"end": 315
} | class ____(object):
def findTheWinner(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
return reduce(lambda idx, n:(idx+k)%(n+1), xrange(1, n), 0)+1
# Time: O(n)
# Space: O(n)
# top-down solution
| Solution |
python | sqlalchemy__sqlalchemy | test/engine/test_reconnect.py | {
"start": 42750,
"end": 44174
} | class ____(fixtures.TestBase):
__backend__ = True
def test_pre_ping_db_is_restarted(self):
engine = engines.reconnecting_engine(options={"pool_pre_ping": True})
conn = engine.connect()
eq_(conn.execute(select(1)).scalar(), 1)
stale_connection = conn.connection.dbapi_connection
conn.close()
engine.test_shutdown()
engine.test_restart()
conn = engine.connect()
eq_(conn.execute(select(1)).scalar(), 1)
conn.close()
with expect_raises(engine.dialect.dbapi.Error, check_context=False):
curs = stale_connection.cursor()
curs.execute("select 1")
def test_pre_ping_db_stays_shutdown(self):
engine = engines.reconnecting_engine(options={"pool_pre_ping": True})
if isinstance(engine.pool, pool.QueuePool):
eq_(engine.pool.checkedin(), 0)
eq_(engine.pool._overflow, -5)
conn = engine.connect()
eq_(conn.execute(select(1)).scalar(), 1)
conn.close()
if isinstance(engine.pool, pool.QueuePool):
eq_(engine.pool.checkedin(), 1)
eq_(engine.pool._overflow, -4)
engine.test_shutdown(stop=True)
assert_raises(exc.DBAPIError, engine.connect)
if isinstance(engine.pool, pool.QueuePool):
eq_(engine.pool.checkedin(), 1)
eq_(engine.pool._overflow, -4)
| PrePingRealTest |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_basic.py | {
"start": 111267,
"end": 114340
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"base",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(A):
pass
class C(B):
pass
class D(B):
pass
class E(A):
pass
@classmethod
def setup_mappers(cls):
A, C, B, E, D, base = (
cls.classes.A,
cls.classes.C,
cls.classes.B,
cls.classes.E,
cls.classes.D,
cls.tables.base,
)
cls.mapper_registry.map_imperatively(
A, base, polymorphic_on=base.c.type
)
with expect_warnings(
r"Mapper\[B\(base\)\] does not indicate a "
"'polymorphic_identity',"
):
cls.mapper_registry.map_imperatively(B, inherits=A)
cls.mapper_registry.map_imperatively(
C, inherits=B, polymorphic_identity="c"
)
cls.mapper_registry.map_imperatively(
D, inherits=B, polymorphic_identity="d"
)
cls.mapper_registry.map_imperatively(
E, inherits=A, polymorphic_identity="e"
)
cls.mapper_registry.configure()
def test_warning(self, decl_base):
"""test #7545"""
class A(decl_base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
type = Column(String)
__mapper_args__ = {"polymorphic_on": type}
class B(A):
__mapper_args__ = {"polymorphic_identity": "b"}
with expect_warnings(
r"Mapper\[C\(a\)\] does not indicate a 'polymorphic_identity',"
):
class C(A):
__mapper_args__ = {}
def test_load_from_middle(self):
C, B = self.classes.C, self.classes.B
s = fixture_session()
s.add(C())
o = s.query(B).first()
eq_(o.type, "c")
assert isinstance(o, C)
def test_load_from_base(self):
A, C = self.classes.A, self.classes.C
s = fixture_session()
s.add(C())
o = s.query(A).first()
eq_(o.type, "c")
assert isinstance(o, C)
def test_discriminator(self):
C, B, base = (self.classes.C, self.classes.B, self.tables.base)
assert class_mapper(B).polymorphic_on is base.c.type
assert class_mapper(C).polymorphic_on is base.c.type
def test_load_multiple_from_middle(self):
C, B, E, D, base = (
self.classes.C,
self.classes.B,
self.classes.E,
self.classes.D,
self.tables.base,
)
s = fixture_session()
s.add_all([C(), D(), E()])
eq_(s.query(B).order_by(base.c.type).all(), [C(), D()])
| NoPolyIdentInMiddleTest |
python | python-poetry__poetry | src/poetry/console/commands/run.py | {
"start": 360,
"end": 3364
} | class ____(EnvCommand):
name = "run"
description = "Runs a command in the appropriate environment."
arguments: ClassVar[list[Argument]] = [
argument("args", "The command and arguments/options to run.", multiple=True)
]
def handle(self) -> int:
args = self.argument("args")
script = args[0]
scripts = self.poetry.local_config.get("scripts")
if scripts and script in scripts:
return self.run_script(scripts[script], args)
try:
return self.env.execute(*args)
except FileNotFoundError:
self.line_error(f"<error>Command not found: <c1>{script}</c1></error>")
return 1
@property
def _module(self) -> Module:
from poetry.core.masonry.utils.module import Module
poetry = self.poetry
package = poetry.package
path = poetry.file.path.parent
module = Module(package.name, path.as_posix(), package.packages)
return module
def run_script(self, script: str | dict[str, str], args: list[str]) -> int:
"""Runs an entry point script defined in the section ``[tool.poetry.scripts]``.
When a script exists in the venv bin folder, i.e. after ``poetry install``,
then ``sys.argv[0]`` must be set to the full path of the executable, so
``poetry run foo`` and ``poetry shell``, ``foo`` have the same ``sys.argv[0]``
that points to the full path.
Otherwise (when an entry point script does not exist), ``sys.argv[0]`` is the
script name only, i.e. ``poetry run foo`` has ``sys.argv == ['foo']``.
"""
for script_dir in self.env.script_dirs:
script_path = script_dir / args[0]
if WINDOWS:
script_path = script_path.with_suffix(".cmd")
if script_path.exists():
args = [str(script_path), *args[1:]]
break
else:
# If we reach this point, the script is not installed
self._warning_not_installed_script(args[0])
if isinstance(script, dict):
script = script["callable"]
module, callable_ = script.split(":")
src_in_sys_path = "sys.path.append('src'); " if self._module.is_in_src() else ""
cmd = ["python", "-c"]
cmd += [
"import sys; "
"from importlib import import_module; "
f"sys.argv = {args!r}; {src_in_sys_path}"
f"sys.exit(import_module('{module}').{callable_}())"
]
return self.env.execute(*cmd)
def _warning_not_installed_script(self, script: str) -> None:
message = f"""\
Warning: '{script}' is an entry point defined in pyproject.toml, but it's not \
installed as a script. You may get improper `sys.argv[0]`.
The support to run uninstalled scripts will be removed in a future release.
Run `poetry install` to resolve and get rid of this message.
"""
self.line_error(message, style="warning")
| RunCommand |
python | pytorch__pytorch | torch/profiler/_memory_profiler.py | {
"start": 11315,
"end": 11780
} | class ____:
def __init__(self, result: _ProfilerResult) -> None:
self._root_nodes = result.experimental_event_tree()
self._sorted_nodes = tuple(sorted(self.dfs(), key=lambda x: x.start_time_ns))
def dfs(self, *args, **kwargs) -> Iterator[_ProfilerEvent]:
yield from _utils.traverse_dfs(self._root_nodes, *args, **kwargs)
@property
def sorted_nodes(self) -> tuple[_ProfilerEvent, ...]:
return self._sorted_nodes
| OpTree |
python | google__pytype | pytype/tests/test_fiddle_overlay.py | {
"start": 649,
"end": 805
} | class ____:
cwam: cwam.ClassWithAnnotatedMethod
fiddle.Config(
Dataclass,
cwam=fiddle.Config(cwam.ClassWithAnnotatedMethod.method),
)
"""
| Dataclass |
python | getsentry__sentry | src/sentry/metrics/precise_dogstatsd.py | {
"start": 171,
"end": 4519
} | class ____(MetricsBackend):
def __init__(self, prefix: str | None = None, **kwargs: Any) -> None:
self.tags = kwargs.pop("tags", None)
instance_kwargs: dict[str, Any] = {
"disable_telemetry": True,
"disable_buffering": False,
# When enabled, a background thread will be used to send metric payloads to the Agent.
"disable_background_sender": False,
}
if socket_path := kwargs.get("statsd_socket_path"):
instance_kwargs["socket_path"] = socket_path
else:
if host := kwargs.get("statsd_host"):
instance_kwargs["host"] = host
if port := kwargs.get("statsd_port"):
instance_kwargs["port"] = int(port)
self.statsd = DogStatsd(**instance_kwargs)
# Origin detection is enabled after 0.45 by default.
# Disable it since it silently fails.
# Ref: https://github.com/DataDog/datadogpy/issues/764
self.statsd._container_id = None
# Applications should call wait_for_pending() before exiting to make sure all pending payloads are sent.
atexit.register(self.statsd.wait_for_pending)
super().__init__(prefix=prefix)
def incr(
self,
key: str,
instance: str | None = None,
tags: Tags | None = None,
amount: float | int = 1,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
tags = dict(tags or ())
if self.tags:
tags.update(self.tags)
if instance:
tags["instance"] = instance
tags_list = [f"{k}:{v}" for k, v in tags.items()]
self.statsd.increment(self._get_key(key), amount, sample_rate=sample_rate, tags=tags_list)
def timing(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
stacklevel: int = 0,
) -> None:
tags = dict(tags or ())
if self.tags:
tags.update(self.tags)
if instance:
tags["instance"] = instance
tags_list = [f"{k}:{v}" for k, v in tags.items()]
self.statsd.distribution(self._get_key(key), value, sample_rate=sample_rate, tags=tags_list)
def gauge(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
tags = dict(tags or ())
if self.tags:
tags.update(self.tags)
if instance:
tags["instance"] = instance
tags_list = [f"{k}:{v}" for k, v in tags.items()]
self.statsd.gauge(self._get_key(key), value, sample_rate=sample_rate, tags=tags_list)
def distribution(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
tags = dict(tags or ())
if self.tags:
tags.update(self.tags)
if instance:
tags["instance"] = instance
tags_list = [f"{k}:{v}" for k, v in tags.items()]
self.statsd.distribution(self._get_key(key), value, sample_rate=sample_rate, tags=tags_list)
def event(
self,
title: str,
message: str,
alert_type: str | None = None,
aggregation_key: str | None = None,
source_type_name: str | None = None,
priority: str | None = None,
instance: str | None = None,
tags: Tags | None = None,
stacklevel: int = 0,
) -> None:
tags = dict(tags or ())
if self.tags:
tags.update(self.tags)
if instance:
tags["instance"] = instance
tags_list = [f"{k}:{v}" for k, v in tags.items()]
self.statsd.event(
title=title,
message=message,
alert_type=alert_type,
aggregation_key=aggregation_key,
source_type_name=source_type_name,
priority=priority,
tags=tags_list,
hostname=self.host,
)
| PreciseDogStatsdMetricsBackend |
python | gevent__gevent | src/greentest/3.13/test_socket.py | {
"start": 200061,
"end": 206890
} | class ____(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
@unittest.skipIf(_testcapi is None, "requires _testcapi")
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(False)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], support.LONG_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(False)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], support.LONG_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
def testLargeTimeout(self):
# gh-126876: Check that a timeout larger than INT_MAX is replaced with
# INT_MAX in the poll() code path. The following assertion must not
# fail: assert(INT_MIN <= ms && ms <= INT_MAX).
if _testcapi is not None:
large_timeout = _testcapi.INT_MAX + 1
else:
large_timeout = 2147483648
# test recv() with large timeout
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
try:
conn.settimeout(large_timeout)
except OverflowError:
# On Windows, settimeout() fails with OverflowError, whereas
# we want to test recv(). Just give up silently.
return
msg = conn.recv(len(MSG))
def _testLargeTimeout(self):
# test sendall() with large timeout
if _testcapi is not None:
large_timeout = _testcapi.INT_MAX + 1
else:
large_timeout = 2147483648
self.cli.connect((HOST, self.port))
try:
self.cli.settimeout(large_timeout)
except OverflowError:
return
self.cli.sendall(MSG)
| NonBlockingTCPTests |
python | kamyu104__LeetCode-Solutions | Python/delete-node-in-a-bst.py | {
"start": 29,
"end": 937
} | class ____(object):
def deleteNode(self, root, key):
"""
:type root: TreeNode
:type key: int
:rtype: TreeNode
"""
if not root:
return root
if root.val > key:
root.left = self.deleteNode(root.left, key)
elif root.val < key:
root.right = self.deleteNode(root.right, key)
else:
if not root.left:
right = root.right
del root
return right
elif not root.right:
left = root.left
del root
return left
else:
successor = root.right
while successor.left:
successor = successor.left
root.val = successor.val
root.right = self.deleteNode(root.right, successor.val)
return root
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec3.py | {
"start": 1723,
"end": 2614
} | class ____:
def __call__(self, *args, **kwargs) -> None: ...
def func7(f1: Callable[P, R], f2: Callable[P, R]) -> Callable[P, R]: ...
def func8(cb1: Callback1, cb2: Callback2, cb3: Callback3):
v1 = func7(cb1, cb2)
reveal_type(v1, expected_text="(x: int, /) -> None")
v2 = func7(cb1, cb3)
reveal_type(v2, expected_text="(x: int | str, y: int = 3) -> None")
def func9(f: Callable[P, object], *args: P.args, **kwargs: P.kwargs) -> object:
# This should generate an error because "name" doesn't exist.
return f(*args, **kwargs, name="")
def func10(data: int = 1) -> None:
pass
def func11[**P](
cls: Callable[P, None], data: str, *args: P.args, **kwargs: P.kwargs
) -> None: ...
func11(func10, "")
func11(func10, "", 0)
# This should generate an error because one of the two "data" parameters
# does not have a default value.
func11(func10)
| Callback3 |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 43342,
"end": 43562
} | class ____(BaseModel, extra="forbid"):
type: "GeoIndexType" = Field(..., description="")
on_disk: Optional[bool] = Field(default=None, description="If true, store the index on disk. Default: false.")
| GeoIndexParams |
python | scipy__scipy | tools/ninjatracing.py | {
"start": 1004,
"end": 2313
} | class ____:
"""Represents a single line read for a .ninja_log file. Start and end times
are milliseconds."""
def __init__(self, start, end):
self.start = int(start)
self.end = int(end)
self.targets = []
def read_targets(log, show_all):
"""Reads all targets from .ninja_log file |log_file|, sorted by start
time"""
header = log.readline()
m = re.search(r'^# ninja log v(\d+)\n$', header)
assert m, f"unrecognized ninja log version {header!r}"
version = int(m.group(1))
assert 5 <= version <= 6, f"unsupported ninja log version {version}"
if version == 6:
# Skip header line
next(log)
targets = {}
last_end_seen = 0
for line in log:
start, end, _, name, cmdhash = line.strip().split('\t') # Ignore restat.
if not show_all and int(end) < last_end_seen:
# An earlier time stamp means that this step is the first in a new
# build, possibly an incremental build. Throw away the previous data
# so that this new build will be displayed independently.
targets = {}
last_end_seen = int(end)
targets.setdefault(cmdhash, Target(start, end)).targets.append(name)
return sorted(targets.values(), key=lambda job: job.end, reverse=True)
| Target |
python | google__pytype | pytype/abstract/_instances.py | {
"start": 13118,
"end": 17819
} | class ____( # pytype: disable=signature-mismatch
_instance_base.Instance, mixin.HasSlots, mixin.PythonConstant
):
"""Representation of Python 'list' objects."""
def __init__(self, content, ctx: "context.Context") -> None:
super().__init__(ctx.convert.list_type, ctx)
self._instance_cache = {}
combined_content = ctx.convert.build_content(content)
self.merge_instance_type_parameter(
None, abstract_utils.T, combined_content
) # pytype: disable=wrong-arg-types
mixin.PythonConstant.init_mixin(self, content)
mixin.HasSlots.init_mixin(self)
self.set_native_slot("__getitem__", self.getitem_slot)
self.set_native_slot("__getslice__", self.getslice_slot)
def str_of_constant(self, printer: Callable[[_base.BaseValue], str]) -> str:
return "[%s]" % ", ".join(
" or ".join(_var_map(printer, val)) for val in self.pyval
)
def __repr__(self) -> str:
if self.is_concrete:
return mixin.PythonConstant.__repr__(self)
else:
return _instance_base.Instance.__repr__(self)
def get_fullhash(self, seen: set[int] | None = None):
if self.is_concrete:
return _get_concrete_sequence_fullhash(self, seen)
return super().get_fullhash(seen)
def merge_instance_type_parameter(
self, node: cfg.CFGNode, name: str, value: cfg.Variable
) -> None:
self.is_concrete = False
super().merge_instance_type_parameter(node, name, value)
def getitem_slot(
self, node: cfg.CFGNode, index_var: cfg.Variable
) -> tuple[cfg.CFGNode, cfg.Variable]:
"""Implements __getitem__ for List.
Arguments:
node: The current CFG node.
index_var: The Variable containing the index value, the i in lst[i].
Returns:
Tuple of (node, return_variable). node may be the same as the argument.
return_variable is a Variable with bindings of the possible return values.
"""
results = []
unresolved = False
node, ret = self.call_pytd(node, "__getitem__", index_var)
if self.is_concrete:
for val in index_var.bindings:
try:
index = self.ctx.convert.value_to_constant(val.data, int)
except abstract_utils.ConversionError:
unresolved = True
else:
self_len = len(self.pyval)
if -self_len <= index < self_len:
results.append(self.pyval[index])
else:
unresolved = True
if unresolved or not self.is_concrete:
results.append(ret)
return node, self.ctx.join_variables(node, results)
def _get_index(
self, data: _instance_base.Instance | ConcreteValue
) -> int | None:
"""Helper function for getslice_slot that extracts int or None from data.
If data is an Instance of int, None is returned.
Args:
data: The object to extract from. Usually a ConcreteValue or an Instance.
Returns:
The value (an int or None) of the index.
Raises:
abstract_utils.ConversionError: If the data could not be converted.
"""
if isinstance(data, ConcreteValue):
return self.ctx.convert.value_to_constant(data, (int, type(None)))
elif isinstance(data, _instance_base.Instance):
if data.cls != self.ctx.convert.int_type:
raise abstract_utils.ConversionError()
else:
return None
else:
raise abstract_utils.ConversionError()
def getslice_slot(
self, node: cfg.CFGNode, start_var: cfg.Variable, end_var: cfg.Variable
) -> tuple[cfg.CFGNode, cfg.Variable]:
"""Implements __getslice__ for List.
Arguments:
node: The current CFG node.
start_var: A Variable containing the i in lst[i:j].
end_var: A Variable containing the j in lst[i:j].
Returns:
Tuple of (node, return_variable). node may be the same as the argument.
return_variable is a Variable with bindings of the possible return values.
"""
# call_pytd will typecheck start_var and end_var.
node, ret = self.call_pytd(node, "__getslice__", start_var, end_var)
results = []
unresolved = False
if self.is_concrete:
for start_val, end_val in cfg_utils.variable_product(
[start_var, end_var]
):
try:
start = self._get_index(
start_val.data
) # pytype: disable=wrong-arg-types
end = self._get_index(end_val.data) # pytype: disable=wrong-arg-types
except abstract_utils.ConversionError:
unresolved = True
else:
results.append(
List(self.pyval[start:end], self.ctx).to_variable(node)
)
if unresolved or not self.is_concrete:
results.append(ret)
return node, self.ctx.join_variables(node, results)
| List |
python | arrow-py__arrow | arrow/locales.py | {
"start": 31272,
"end": 33373
} | class ____(SlavicBaseLocale):
names = ["pl", "pl-pl"]
past = "{0} temu"
future = "za {0}"
# The nouns should be in genitive case (Polish: "dopełniacz")
# in order to correctly form `past` & `future` expressions.
timeframes: ClassVar[Mapping[TimeFrameLiteral, Union[str, Mapping[str, str]]]] = {
"now": "teraz",
"second": "sekundę",
"seconds": {
"singular": "{0} sekund",
"dual": "{0} sekundy",
"plural": "{0} sekund",
},
"minute": "minutę",
"minutes": {
"singular": "{0} minut",
"dual": "{0} minuty",
"plural": "{0} minut",
},
"hour": "godzinę",
"hours": {
"singular": "{0} godzin",
"dual": "{0} godziny",
"plural": "{0} godzin",
},
"day": "dzień",
"days": "{0} dni",
"week": "tydzień",
"weeks": {
"singular": "{0} tygodni",
"dual": "{0} tygodnie",
"plural": "{0} tygodni",
},
"month": "miesiąc",
"months": {
"singular": "{0} miesięcy",
"dual": "{0} miesiące",
"plural": "{0} miesięcy",
},
"year": "rok",
"years": {"singular": "{0} lat", "dual": "{0} lata", "plural": "{0} lat"},
}
month_names = [
"",
"styczeń",
"luty",
"marzec",
"kwiecień",
"maj",
"czerwiec",
"lipiec",
"sierpień",
"wrzesień",
"październik",
"listopad",
"grudzień",
]
month_abbreviations = [
"",
"sty",
"lut",
"mar",
"kwi",
"maj",
"cze",
"lip",
"sie",
"wrz",
"paź",
"lis",
"gru",
]
day_names = [
"",
"poniedziałek",
"wtorek",
"środa",
"czwartek",
"piątek",
"sobota",
"niedziela",
]
day_abbreviations = ["", "Pn", "Wt", "Śr", "Czw", "Pt", "So", "Nd"]
| PolishLocale |
python | sympy__sympy | sympy/polys/orderings.py | {
"start": 1107,
"end": 1316
} | class ____(MonomialOrder):
"""Graded lexicographic order of monomials. """
alias = 'grlex'
is_global = True
def __call__(self, monomial):
return (sum(monomial), monomial)
| GradedLexOrder |
python | tiangolo__fastapi | docs_src/sql_databases/tutorial002_an_py310.py | {
"start": 485,
"end": 2567
} | class ____(HeroBase):
name: str | None = None
age: int | None = None
secret_name: str | None = None
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
connect_args = {"check_same_thread": False}
engine = create_engine(sqlite_url, connect_args=connect_args)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def get_session():
with Session(engine) as session:
yield session
SessionDep = Annotated[Session, Depends(get_session)]
app = FastAPI()
@app.on_event("startup")
def on_startup():
create_db_and_tables()
@app.post("/heroes/", response_model=HeroPublic)
def create_hero(hero: HeroCreate, session: SessionDep):
db_hero = Hero.model_validate(hero)
session.add(db_hero)
session.commit()
session.refresh(db_hero)
return db_hero
@app.get("/heroes/", response_model=list[HeroPublic])
def read_heroes(
session: SessionDep,
offset: int = 0,
limit: Annotated[int, Query(le=100)] = 100,
):
heroes = session.exec(select(Hero).offset(offset).limit(limit)).all()
return heroes
@app.get("/heroes/{hero_id}", response_model=HeroPublic)
def read_hero(hero_id: int, session: SessionDep):
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
return hero
@app.patch("/heroes/{hero_id}", response_model=HeroPublic)
def update_hero(hero_id: int, hero: HeroUpdate, session: SessionDep):
hero_db = session.get(Hero, hero_id)
if not hero_db:
raise HTTPException(status_code=404, detail="Hero not found")
hero_data = hero.model_dump(exclude_unset=True)
hero_db.sqlmodel_update(hero_data)
session.add(hero_db)
session.commit()
session.refresh(hero_db)
return hero_db
@app.delete("/heroes/{hero_id}")
def delete_hero(hero_id: int, session: SessionDep):
hero = session.get(Hero, hero_id)
if not hero:
raise HTTPException(status_code=404, detail="Hero not found")
session.delete(hero)
session.commit()
return {"ok": True}
| HeroUpdate |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.