language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | encode__django-rest-framework | tests/test_renderers.py | {
"start": 22771,
"end": 24537
} | class ____(TestCase):
"""
Test rendering MultipleChoiceField with HTMLFormRenderer.
"""
def setUp(self):
self.renderer = HTMLFormRenderer()
def test_render_selected_option_with_string_option_ids(self):
choices = (('1', 'Option1'), ('2', 'Option2'), ('12', 'Option12'),
('}', 'OptionBrace'))
class TestSerializer(serializers.Serializer):
test_field = serializers.MultipleChoiceField(choices=choices)
serializer = TestSerializer(data={'test_field': ['12']})
serializer.is_valid()
result = self.renderer.render(serializer.data)
self.assertIsInstance(result, SafeText)
self.assertInHTML('<option value="12" selected>Option12</option>',
result)
self.assertInHTML('<option value="1">Option1</option>', result)
self.assertInHTML('<option value="2">Option2</option>', result)
self.assertInHTML('<option value="}">OptionBrace</option>', result)
def test_render_selected_option_with_integer_option_ids(self):
choices = ((1, 'Option1'), (2, 'Option2'), (12, 'Option12'))
class TestSerializer(serializers.Serializer):
test_field = serializers.MultipleChoiceField(choices=choices)
serializer = TestSerializer(data={'test_field': ['12']})
serializer.is_valid()
result = self.renderer.render(serializer.data)
self.assertIsInstance(result, SafeText)
self.assertInHTML('<option value="12" selected>Option12</option>',
result)
self.assertInHTML('<option value="1">Option1</option>', result)
self.assertInHTML('<option value="2">Option2</option>', result)
| TestMultipleChoiceFieldHTMLFormRenderer |
python | doocs__leetcode | solution/2500-2599/2591.Distribute Money to Maximum Children/Solution.py | {
"start": 0,
"end": 358
} | class ____:
def distMoney(self, money: int, children: int) -> int:
if money < children:
return -1
if money > 8 * children:
return children - 1
if money == 8 * children - 4:
return children - 2
# money-8x >= children-x, x <= (money-children)/7
return (money - children) // 7
| Solution |
python | optuna__optuna | optuna/artifacts/_backoff.py | {
"start": 306,
"end": 3716
} | class ____:
"""An artifact store's middleware for exponential backoff.
Example:
.. code-block:: python
import optuna
from optuna.artifacts import upload_artifact
from optuna.artifacts import Boto3ArtifactStore
from optuna.artifacts import Backoff
artifact_store = Backoff(Boto3ArtifactStore("my-bucket"))
def objective(trial: optuna.Trial) -> float:
... = trial.suggest_float("x", -10, 10)
file_path = generate_example(...)
upload_artifact(
artifact_store=artifact_store,
file_path=file_path,
study_or_trial=trial,
)
return ...
"""
def __init__(
self,
backend: ArtifactStore,
*,
max_retries: int = 10,
multiplier: float = 2,
min_delay: float = 0.1,
max_delay: float = 30,
) -> None:
# Default sleep seconds:
# 0.1, 0.2, 0.4, 0.8, 1.6, 3.2, 6.4, 12.8, 25.6, 30
self._backend = backend
assert max_retries > 0
assert multiplier > 0
assert min_delay > 0
assert max_delay > min_delay
self._max_retries = max_retries
self._multiplier = multiplier
self._min_delay = min_delay
self._max_delay = max_delay
def _get_sleep_secs(self, n_retry: int) -> float:
return min(self._min_delay * self._multiplier**n_retry, self._max_delay)
def open_reader(self, artifact_id: str) -> BinaryIO:
for i in range(self._max_retries):
try:
return self._backend.open_reader(artifact_id)
except ArtifactNotFound:
raise
except Exception as e:
if i == self._max_retries - 1:
raise
else:
_logger.error(f"Failed to open artifact={artifact_id} n_retry={i}", exc_info=e)
time.sleep(self._get_sleep_secs(i))
assert False, "must not reach here"
def write(self, artifact_id: str, content_body: BinaryIO) -> None:
for i in range(self._max_retries):
try:
self._backend.write(artifact_id, content_body)
break
except ArtifactNotFound:
raise
except Exception as e:
if i == self._max_retries - 1:
raise
else:
_logger.error(f"Failed to open artifact={artifact_id} n_retry={i}", exc_info=e)
content_body.seek(0)
time.sleep(self._get_sleep_secs(i))
def remove(self, artifact_id: str) -> None:
for i in range(self._max_retries):
try:
self._backend.remove(artifact_id)
except ArtifactNotFound:
raise
except Exception as e:
if i == self._max_retries - 1:
raise
else:
_logger.error(f"Failed to delete artifact={artifact_id}", exc_info=e)
time.sleep(self._get_sleep_secs(i))
if TYPE_CHECKING:
# A mypy-runtime assertion to ensure that the Backoff middleware implements
# all abstract methods in ArtifactStore.
from optuna.artifacts import FileSystemArtifactStore
_: ArtifactStore = Backoff(FileSystemArtifactStore("."))
| Backoff |
python | walkccc__LeetCode | solutions/3107. Minimum Operations to Make Median of Array Equal to K/3107.py | {
"start": 0,
"end": 282
} | class ____:
def minOperationsToMakeMedianK(self, nums: list[int], k: int) -> int:
n = len(nums)
ans = 0
nums.sort()
for i in range(n // 2 + 1):
ans += max(0, nums[i] - k)
for i in range(n // 2, n):
ans += max(0, k - nums[i])
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/proto/encode_proto_op_test_base.py | {
"start": 1371,
"end": 8378
} | class ____(test_base.ProtoOpTestBase, parameterized.TestCase):
"""Base class for testing proto encoding ops."""
def __init__(self, decode_module, encode_module, methodName='runTest'): # pylint: disable=invalid-name
"""EncodeProtoOpTestBase initializer.
Args:
decode_module: a module containing the `decode_proto_op` method
encode_module: a module containing the `encode_proto_op` method
methodName: the name of the test method (same as for test.TestCase)
"""
super(EncodeProtoOpTestBase, self).__init__(methodName)
self._decode_module = decode_module
self._encode_module = encode_module
def testBadSizesShape(self):
if context.executing_eagerly():
expected_error = (errors.InvalidArgumentError,
r'Invalid shape for field double_value.')
else:
expected_error = (ValueError,
r'Shape must be at least rank 2 but is rank 0')
with self.assertRaisesRegex(*expected_error):
self.evaluate(
self._encode_module.encode_proto(
sizes=1,
values=[np.double(1.0)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
def testBadInputs(self):
# Invalid field name
with self.assertRaisesOpError('Unknown field: non_existent_field'):
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1]],
values=[np.array([[0.0]], dtype=np.int32)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['non_existent_field']))
# Incorrect types.
with self.assertRaisesOpError('Incompatible type for field double_value.'):
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1]],
values=[np.array([[0.0]], dtype=np.int32)],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
# Incorrect shapes of sizes.
for sizes_value in 1, np.array([[[0, 0]]]):
with self.assertRaisesOpError(
r'sizes should be batch_size \+ \[len\(field_names\)\]'):
if context.executing_eagerly():
self.evaluate(
self._encode_module.encode_proto(
sizes=sizes_value,
values=[np.array([[0.0]])],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']))
else:
with self.cached_session():
sizes = array_ops.placeholder(dtypes.int32)
values = array_ops.placeholder(dtypes.float64)
self._encode_module.encode_proto(
sizes=sizes,
values=[values],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value']).eval(feed_dict={
sizes: sizes_value,
values: [[0.0]]
})
# Inconsistent shapes of values.
with self.assertRaisesOpError('Values must match up to the last dimension'):
if context.executing_eagerly():
self.evaluate(
self._encode_module.encode_proto(
sizes=[[1, 1]],
values=[np.array([[0.0]]),
np.array([[0], [0]])],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value', 'int32_value']))
else:
with self.cached_session():
values1 = array_ops.placeholder(dtypes.float64)
values2 = array_ops.placeholder(dtypes.int32)
(self._encode_module.encode_proto(
sizes=[[1, 1]],
values=[values1, values2],
message_type='tensorflow.contrib.proto.TestValue',
field_names=['double_value', 'int32_value']).eval(feed_dict={
values1: [[0.0]],
values2: [[0], [0]]
}))
def _testRoundtrip(self, in_bufs, message_type, fields):
field_names = [f.name for f in fields]
out_types = [f.dtype for f in fields]
with self.cached_session() as sess:
sizes, field_tensors = self._decode_module.decode_proto(
in_bufs,
message_type=message_type,
field_names=field_names,
output_types=out_types)
out_tensors = self._encode_module.encode_proto(
sizes,
field_tensors,
message_type=message_type,
field_names=field_names)
out_bufs, = sess.run([out_tensors])
# Check that the re-encoded tensor has the same shape.
self.assertEqual(in_bufs.shape, out_bufs.shape)
# Compare the input and output.
for in_buf, out_buf in zip(in_bufs.flat, out_bufs.flat):
in_obj = test_example_pb2.TestValue()
in_obj.ParseFromString(in_buf)
out_obj = test_example_pb2.TestValue()
out_obj.ParseFromString(out_buf)
# Check that the deserialized objects are identical.
self.assertEqual(in_obj, out_obj)
# Check that the input and output serialized messages are identical.
# If we fail here, there is a difference in the serialized
# representation but the new serialization still parses. This could
# be harmless (a change in map ordering?) or it could be bad (e.g.
# loss of packing in the encoding).
self.assertEqual(in_buf, out_buf)
@parameterized.named_parameters(
*test_base.ProtoOpTestBase.named_parameters(extension=False))
def testRoundtrip(self, case):
in_bufs = [value.SerializeToString() for value in case.values]
# np.array silently truncates strings if you don't specify dtype=object.
in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
return self._testRoundtrip(
in_bufs, 'tensorflow.contrib.proto.TestValue', case.fields)
@parameterized.named_parameters(
*test_base.ProtoOpTestBase.named_parameters(extension=False))
def testRoundtripPacked(self, case):
# Now try with the packed serialization.
# We test the packed representations by loading the same test cases using
# PackedTestValue instead of TestValue. To do this we rely on the text
# format being the same for packed and unpacked fields, and reparse the test
# message using the packed version of the proto.
in_bufs = [
# Note: float_format='.17g' is necessary to ensure preservation of
# doubles and floats in text format.
text_format.Parse(
text_format.MessageToString(
value, float_format='.17g'),
test_example_pb2.PackedTestValue()).SerializeToString()
for value in case.values
]
# np.array silently truncates strings if you don't specify dtype=object.
in_bufs = np.reshape(np.array(in_bufs, dtype=object), list(case.shapes))
return self._testRoundtrip(
in_bufs, 'tensorflow.contrib.proto.PackedTestValue', case.fields)
| EncodeProtoOpTestBase |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 693952,
"end": 694703
} | class ____(sgqlc.types.relay.Connection):
"""Look up Marketplace Listings"""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("MarketplaceListingEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("MarketplaceListing"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| MarketplaceListingConnection |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/processing_qwen3_omni_moe.py | {
"start": 3150,
"end": 19393
} | class ____(ProcessorMixin):
r"""
Constructs a Qwen2.5Omni processor.
[`Qwen3OmniMoeProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`], [`WhisperFeatureExtractor`], and [`Qwen2TokenizerFast`]. See the
[`~Qwen3OmniMoeProcessor.__call__`] and [`~Qwen3OmniMoeProcessor.decode`] for more information.
Args:
image_processor ([`Qwen2VLImageProcessor`], *optional*):
The image processor.
video_processor ([`Qwen2VLVideoProcessor`], *optional*):
The video processor.
feature_extractor ([`WhisperFeatureExtractor`], *optional*):
The audio feature extractor.
tokenizer ([`Qwen2TokenizerFast`], *optional*):
The text tokenizer.
chat_template (`Optional[str]`, *optional*):
The Jinja template to use for formatting the conversation. If not provided, the default chat template is used.
"""
def __init__(
self, image_processor=None, video_processor=None, feature_extractor=None, tokenizer=None, chat_template=None
):
super().__init__(image_processor, video_processor, feature_extractor, tokenizer, chat_template=chat_template)
self.image_token = self.tokenizer.image_token
self.audio_token = self.tokenizer.audio_token
self.video_token = self.tokenizer.video_token
self.vision_bos_token = self.tokenizer.vision_bos_token
self.vision_eos_token = self.tokenizer.vision_eos_token
self.audio_bos_token = self.tokenizer.audio_bos_token
self.audio_eos_token = self.tokenizer.audio_eos_token
def __call__(
self,
text: TextInput = None,
images: Optional[ImageInput] = None,
videos: Optional[VideoInput] = None,
audio: Optional[AudioInput] = None,
**kwargs,
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the audio(s), this method forwards the `audio` and `kwargs` arguments to
WhisperFeatureExtractor's [`~WhisperFeatureExtractor.__call__`] if `audio` is not `None`. To prepare the vision inputs,
this method forwards the `vision_infos` and `kwargs` arguments to Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`]
if `vision_infos` is not `None`. Please refer to the doctsring
of the above two methods for more information.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
audio (`np.ndarray`, `List[np.ndarray]`):
The audio or batch of audio to be prepared. Each audio can be a NumPy array.
"""
if text is None:
raise ValueError("You need to specify either a `text` input to process.")
output_kwargs = self._merge_kwargs(
Qwen3OmniMoeProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
seconds_per_chunk = output_kwargs["videos_kwargs"].pop("seconds_per_chunk")
position_id_per_seconds = output_kwargs["videos_kwargs"].pop("position_id_per_seconds")
use_audio_in_video = output_kwargs["videos_kwargs"].pop("use_audio_in_video")
fps = output_kwargs["videos_kwargs"].get("fps", 1.0)
if audio is not None:
audio_inputs = self.feature_extractor(audio, **output_kwargs["audio_kwargs"])
audio_inputs["feature_attention_mask"] = audio_inputs.pop(
"attention_mask"
) # rename feature_attention_mask to prevent conflicts later on
audio_inputs["input_features"] = audio_inputs.pop(
"input_features"
) # rename input_features to prevent conflicts later on
audio_lengths = iter(_get_feat_extract_output_lengths(audio_inputs["feature_attention_mask"].sum(-1)))
else:
audio_inputs = {}
audio_lengths = iter([])
if images is not None:
images_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = iter(images_inputs["image_grid_thw"])
else:
images_inputs = {}
image_grid_thw = iter([])
if videos is not None:
videos = make_batched_videos(videos)
videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
fps = [fps] * len(videos)
videos_inputs["video_second_per_grid"] = [
self.video_processor.temporal_patch_size / fps[i] for i in range(len(fps))
]
video_grid_thw = iter(videos_inputs["video_grid_thw"])
video_second_per_grid = iter(videos_inputs["video_second_per_grid"])
else:
videos_inputs = {}
video_grid_thw = iter([])
video_second_per_grid = iter([])
if not isinstance(text, list):
text = [text]
text = self.replace_multimodal_special_tokens(
text,
audio_lengths,
image_grid_thw,
video_grid_thw,
video_second_per_grid=video_second_per_grid,
use_audio_in_video=use_audio_in_video,
position_id_per_seconds=position_id_per_seconds,
seconds_per_chunk=seconds_per_chunk,
)
texts_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
return BatchFeature(
data={**texts_inputs, **images_inputs, **videos_inputs, **audio_inputs},
tensor_type=kwargs.get("return_tensors"),
)
def replace_multimodal_special_tokens(
self,
text,
audio_lengths,
image_grid_thw,
video_grid_thw,
video_second_per_grid,
use_audio_in_video,
position_id_per_seconds,
seconds_per_chunk,
):
# Extend mm token length
merge_length_image = self.image_processor.merge_size**2
merge_length_video = self.video_processor.merge_size**2
processed_text = []
for sample in text:
positions = []
special_tokens = [re.escape(tok) for tok in [self.audio_token, self.image_token, self.video_token]]
pattern = "|".join(special_tokens)
positions = sorted([(match.start(), match.group()) for match in re.finditer(pattern, sample)])
positions.sort(key=lambda x: x[0])
for _, special_token in positions:
if special_token == self.audio_token:
sample = sample.replace(self.audio_token, "<|audio_placeholder|>" * next(audio_lengths), 1)
elif special_token == self.image_token:
image_seq_length = next(image_grid_thw).prod() // merge_length_image
sample = sample.replace(self.image_token, "<|image_placeholder|>" * image_seq_length, 1)
elif special_token == self.video_token:
if not use_audio_in_video:
video_seq_length = next(video_grid_thw).prod() // merge_length_video
sample = sample.replace(self.video_token, "<|video_placeholder|>" * video_seq_length, 1)
else:
audio_token_indices = np.arange(next(audio_lengths))
curr_video_grid_thw = next(video_grid_thw)
height = curr_video_grid_thw[1] // self.video_processor.merge_size
width = curr_video_grid_thw[2] // self.video_processor.merge_size
video_token_indices = np.arange(curr_video_grid_thw[0]).reshape(-1, 1, 1)
video_token_indices = np.broadcast_to(
video_token_indices, (video_token_indices.shape[0], height, width)
).reshape(-1)
video_token_indices = (
video_token_indices * next(video_second_per_grid) * position_id_per_seconds
)
video_data_index, audio_data_index = 0, 0
placeholder_string = self.vision_bos_token + self.audio_bos_token
while video_data_index < len(video_token_indices) and audio_data_index < len(
audio_token_indices
):
if video_token_indices[video_data_index] <= audio_token_indices[audio_data_index]:
placeholder_string += "<|video_placeholder|>"
video_data_index += 1
else:
placeholder_string += "<|audio_placeholder|>"
audio_data_index += 1
if video_data_index < len(video_token_indices):
placeholder_string += "<|video_placeholder|>" * (
len(video_token_indices) - video_data_index
)
if audio_data_index < len(audio_token_indices):
placeholder_string += "<|audio_placeholder|>" * (
len(audio_token_indices) - audio_data_index
)
placeholder_string += self.audio_eos_token + self.vision_eos_token
sample = sample.replace(
self.vision_bos_token + self.video_token + self.vision_eos_token,
placeholder_string,
1,
)
sample = sample.replace("<|audio_placeholder|>", self.audio_token)
sample = sample.replace("<|image_placeholder|>", self.image_token)
sample = sample.replace("<|video_placeholder|>", self.video_token)
processed_text.append(sample)
return processed_text
def get_chunked_index(self, token_indices: np.ndarray, tokens_per_chunk: int) -> list[tuple[int, int]]:
"""
Splits token index list into chunks based on token value ranges.
Given a list of token indices, returns a list of (start, end) index tuples representing
slices of the list where the token values fall within successive ranges of `t_ntoken_per_chunk`.
For example, if `t_ntoken_per_chunk` is 1000, the function will create chunks such that:
- the first chunk contains token values < 1000,
- the second chunk contains values >= 1000 and < 2000, and so on.
Parameters:
token_indices (`np.ndarray`): A monotonically increasing list of token index values.
t_ntoken_per_chunk (`int`): Number of tokens per chunk (used as the chunk size threshold).
Returns:
`list[tuple[int, int]]`: A list of tuples, each representing the start (inclusive)
and end (exclusive) indices of a chunk in `token_indices`.
"""
def _iter():
i, start_idx = 0, 0 # skip bos token
current_chunk = 1
while i < len(token_indices): # skip eos token
if token_indices[i] >= current_chunk * tokens_per_chunk:
yield (start_idx, i)
start_idx = i
current_chunk += 1
i += 1
yield (start_idx, len(token_indices))
return list(_iter())
def apply_chat_template(self, conversations, chat_template=None, **kwargs):
return super().apply_chat_template(conversations, chat_template, **kwargs)
def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs):
"""
Post-process the output of a vlm to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[str]`: The decoded text.
"""
return self.tokenizer.batch_decode(generated_outputs[0], skip_special_tokens=skip_special_tokens, **kwargs)
def post_process_multimodal_output(
self, generated_outputs, skip_special_tokens=True, generation_mode=None, **kwargs
):
"""
Post-process the output of a multimodal model to return the requested modality output.
If the model cannot generated the requested modality, an error will be raised.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
generation_mode (`str`, *optional*):
Generation mode indicated which modality to output and can be one of `["text", "image", "audio"]`.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[Inion[str, np.ndarray]]`: The decoded text or generated audio.
"""
if generation_mode is None or generation_mode == "text":
return self.post_process_image_text_to_text(
generated_outputs, skip_special_tokens=skip_special_tokens, **kwargs
)
elif generation_mode == "audio":
# model supports only bs=1, so we will never get several audio outputs
audio = generated_outputs[1].reshape(-1).detach().cpu().numpy()
return [audio]
else:
raise ValueError(
f"{self.__class__.__name__} got an unexpected generation_mode={generation_mode}. Supported options are only `text` and `audio"
)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
feature_extractor_input_names = self.feature_extractor.model_input_names
image_processor_input_names = self.image_processor.model_input_names
video_processor_input_names = self.video_processor.model_input_names
return list(
dict.fromkeys(
tokenizer_input_names
+ feature_extractor_input_names
+ image_processor_input_names
+ video_processor_input_names
+ ["feature_attention_mask"]
+ ["video_second_per_grid"]
)
)
__all__ = ["Qwen3OmniMoeProcessor"]
| Qwen3OmniMoeProcessor |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_header_image11.py | {
"start": 315,
"end": 985
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("header_image11.xlsx")
self.ignore_elements = {
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_header("&L&G", {"image_left": self.image_dir + "black_300.jpg"})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | tensorflow__tensorflow | tensorflow/compiler/tests/data_format_ops_test.py | {
"start": 986,
"end": 3211
} | class ____(xla_test.XLATestCase):
def _test(self, input_data, src_format, dst_format, expected):
for dtype in {np.int32, np.int64}:
x = np.array(input_data, dtype=dtype)
with self.session() as session:
with self.test_scope():
placeholder = array_ops.placeholder(dtypes.as_dtype(x.dtype), x.shape)
param = {placeholder: x}
output = nn_ops.data_format_dim_map(
placeholder, src_format=src_format, dst_format=dst_format)
result = session.run(output, param)
self.assertAllEqual(result, expected)
def test(self):
self._test(0, "NHWC", "NCHW", 0)
self._test(1, "NHWC", "NCHW", 2)
self._test(2, "NHWC", "NCHW", 3)
self._test(3, "NHWC", "NCHW", 1)
self._test(-1, "NHWC", "NCHW", 1)
self._test(-2, "NHWC", "NCHW", 3)
self._test(-3, "NHWC", "NCHW", 2)
self._test(-4, "NHWC", "NCHW", 0)
self._test([1, 3], "NHWC", "NCHW", [2, 1])
self._test([1, 3, -2], "NHWC", "NCHW", [2, 1, 3])
self._test([1, -3, -2], "NHWC", "NCHW", [2, 2, 3])
self._test([[1, -3], [1, -1]], "NHWC", "NCHW", [[2, 2], [2, 1]])
self._test([1, -3, -2], "NHWC", "NCHW", [2, 2, 3])
self._test([-4, -3, -2, -1, 0, 1, 2, 3], "NHWC", "HWNC",
[2, 0, 1, 3, 2, 0, 1, 3])
self._test([-4, -3, -2, -1, 0, 1, 2, 3], "NHWC", "WHCN",
[3, 1, 0, 2, 3, 1, 0, 2])
self._test([-4, -3, -2, -1, 0, 1, 2, 3], "qwer", "rewq",
[3, 2, 1, 0, 3, 2, 1, 0])
self._test(0, "NDHWC", "NCDHW", 0)
self._test(1, "NDHWC", "NCDHW", 2)
self._test(2, "NDHWC", "NCDHW", 3)
self._test(3, "NDHWC", "NCDHW", 4)
self._test(4, "NDHWC", "NCDHW", 1)
self._test([1, 4], "NDHWC", "NCDHW", [2, 1])
self._test([1, 4, -2], "NDHWC", "NCDHW", [2, 1, 4])
self._test([1, -3, -2], "NDHWC", "NCDHW", [2, 3, 4])
self._test([[1, -4], [1, -1]], "NDHWC", "NCDHW", [[2, 2], [2, 1]])
self._test([1, -3, -2], "NDHWC", "NCDHW", [2, 3, 4])
self._test([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4], "NDHWC", "DHWNC",
[3, 0, 1, 2, 4, 3, 0, 1, 2, 4])
self._test([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4], "NDHWC", "WHDCN",
[4, 2, 1, 0, 3, 4, 2, 1, 0, 3])
| XlaDataFormatDimMapTest |
python | getsentry__sentry | tests/sentry/models/test_organizationoption.py | {
"start": 119,
"end": 1601
} | class ____(TestCase):
def test_set_value(self) -> None:
OrganizationOption.objects.set_value(self.organization, "foo", "bar")
assert (
OrganizationOption.objects.get(organization=self.organization, key="foo").value == "bar"
)
def test_get_value(self) -> None:
result = OrganizationOption.objects.get_value(self.organization, "foo")
assert result is None
OrganizationOption.objects.create(organization=self.organization, key="foo", value="bar")
result = OrganizationOption.objects.get_value(self.organization, "foo")
assert result == "bar"
def test_unset_value(self) -> None:
OrganizationOption.objects.unset_value(self.organization, "foo")
OrganizationOption.objects.create(organization=self.organization, key="foo", value="bar")
OrganizationOption.objects.unset_value(self.organization, "foo")
assert not OrganizationOption.objects.filter(
organization=self.organization, key="foo"
).exists()
def test_get_value_bulk(self) -> None:
result = OrganizationOption.objects.get_value_bulk([self.organization], "foo")
assert result == {self.organization: None}
OrganizationOption.objects.create(organization=self.organization, key="foo", value="bar")
result = OrganizationOption.objects.get_value_bulk([self.organization], "foo")
assert result == {self.organization: "bar"}
| OrganizationOptionManagerTest |
python | realpython__materials | asterioids-pygame-project/source_code_final/space_rocks/models.py | {
"start": 2038,
"end": 2715
} | class ____(GameObject):
def __init__(self, position, create_asteroid_callback, size=3):
self.create_asteroid_callback = create_asteroid_callback
self.size = size
size_to_scale = {3: 1.0, 2: 0.5, 1: 0.25}
scale = size_to_scale[size]
sprite = rotozoom(load_sprite("asteroid"), 0, scale)
super().__init__(position, sprite, get_random_velocity(1, 3))
def split(self):
if self.size > 1:
for _ in range(2):
asteroid = Asteroid(
self.position, self.create_asteroid_callback, self.size - 1
)
self.create_asteroid_callback(asteroid)
| Asteroid |
python | PrefectHQ__prefect | tests/test_futures.py | {
"start": 17673,
"end": 21914
} | class ____:
async def test_wait_with_timeout(self, prefect_client: PrefectClient):
@flow
async def my_flow():
return 42
flow_run = await prefect_client.create_flow_run(
flow=my_flow,
parameters={},
)
asyncio.create_task(
run_flow_async(
flow=my_flow,
flow_run=flow_run,
parameters={},
return_type="state",
)
)
future = PrefectFlowRunFuture(flow_run_id=flow_run.id)
future.wait(timeout=0.25)
assert future.state.is_pending()
async def test_wait_without_timeout(
self, events_pipeline: EventsPipeline, prefect_client: PrefectClient
):
@flow
def my_flow():
return 42
flow_run = await prefect_client.create_flow_run(
flow=my_flow,
parameters={},
)
future = PrefectFlowRunFuture(flow_run_id=flow_run.id)
state = run_flow_sync(
flow=my_flow,
flow_run=flow_run,
parameters={},
return_type="state",
)
assert state.is_completed()
await events_pipeline.process_events()
future.wait()
assert future.state.is_completed()
async def test_result_with_final_state(
self, events_pipeline: EventsPipeline, prefect_client: PrefectClient
):
@flow(persist_result=True)
def my_flow():
return 42
flow_run = await prefect_client.create_flow_run(
flow=my_flow,
parameters={},
)
future = PrefectFlowRunFuture(flow_run_id=flow_run.id)
state = run_flow_sync(
flow=my_flow,
flow_run=flow_run,
parameters={},
return_type="state",
)
assert state.is_completed()
await events_pipeline.process_events()
assert await state.result() == 42
assert future.result() == 42
async def test_final_state_without_result(
self, events_pipeline: EventsPipeline, prefect_client: PrefectClient
):
@flow(persist_result=False)
def my_flow():
return 42
flow_run = await prefect_client.create_flow_run(
flow=my_flow,
parameters={},
)
future = PrefectFlowRunFuture(flow_run_id=flow_run.id)
state = run_flow_sync(
flow=my_flow,
flow_run=flow_run,
parameters={},
return_type="state",
)
assert state.is_completed()
await events_pipeline.process_events()
with pytest.raises(MissingResult):
future.result()
async def test_result_with_final_state_and_raise_on_failure(
self, events_pipeline: EventsPipeline, prefect_client: PrefectClient
):
@flow(persist_result=True)
def my_flow():
raise ValueError("oops")
flow_run = await prefect_client.create_flow_run(
flow=my_flow,
parameters={},
)
future = PrefectFlowRunFuture(flow_run_id=flow_run.id)
state = run_flow_sync(
flow=my_flow,
flow_run=flow_run,
parameters={},
return_type="state",
)
assert state.is_failed()
await events_pipeline.process_events()
with pytest.raises(ValueError, match="oops"):
future.result(raise_on_failure=True)
async def test_final_state_missing_result(
self, events_pipeline: EventsPipeline, prefect_client: PrefectClient
):
@flow(persist_result=False)
def my_flow():
return 42
flow_run = await prefect_client.create_flow_run(
flow=my_flow,
parameters={},
)
future = PrefectFlowRunFuture(flow_run_id=flow_run.id)
state = run_flow_sync(
flow=my_flow,
flow_run=flow_run,
parameters={},
return_type="state",
)
assert state.is_completed()
await events_pipeline.process_events()
with pytest.raises(MissingResult):
future.result()
| TestPrefectFlowRunFuture |
python | econchick__interrogate | tests/functional/sample/full.py | {
"start": 1946,
"end": 2064
} | class ____:
"""a private class"""
pass
# Coverage % for InitDocs should be the same as ClassDocs
| __PrivateClass |
python | gevent__gevent | src/gevent/_fileobjectcommon.py | {
"start": 420,
"end": 590
} | class ____(IOError):
def __init__(self):
IOError.__init__(
self,
EBADF, 'File descriptor was closed in another greenlet')
| cancel_wait_ex |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 100522,
"end": 115769
} | class ____(Request):
"""
Gets a list of datasets information matching a query
:param id: List of IDs to filter by
:type id: Sequence[str]
:param name: Get only datasets whose name matches this pattern (python regular
expression syntax)
:type name: str
:param tags: User-defined tags filter. Use '-' for exclusion
:type tags: Sequence[str]
:param system_tags: System tags filter. Use '-' for exclusion (e.g.
['-archived'] for all non-hidden datasets)
:type system_tags: Sequence[str]
:param page: Page number, returns a specific page out of the result list of
datasets.
:type page: int
:param page_size: Page size, specifies the number of results returned in each
page (last page may contain fewer results)
:type page_size: int
:param order_by: List of field names to order by. When search_text is used,
'@text_score' can be used as a field representing the text score of returned
documents. Use '-' prefix to specify descending order. Optional, recommended
when using page
:type order_by: Sequence[str]
:param search_text: Free text search query
:type search_text: str
:param only_fields: List of document's field names (nesting is supported using
'.', e.g. execution.model_labels). If provided, this list defines the query's
projection (only these fields will be returned for each result entry)
:type only_fields: Sequence[str]
:param _all_: Multi-field pattern condition (all fields match pattern)
:type _all_: MultiFieldPatternData
:param _any_: Multi-field pattern condition (any field matches pattern)
:type _any_: MultiFieldPatternData
:param allow_public: Allow public datasets to be returned in the results
:type allow_public: bool
:param resolve_head: If set then dataset paradigm and head version are
calculated and returned. Note: do not use it with queries that are supposed to
return multiple datasets.
:type resolve_head: bool
:param scroll_id: Scroll ID returned from the previos calls to get_all
:type scroll_id: str
:param refresh_scroll: If set then all the data received with this scroll will
be requeried
:type refresh_scroll: bool
:param size: The number of datasets to retrieve
:type size: int
"""
_service = "datasets"
_action = "get_all"
_version = "2.23"
_schema = {
"definitions": {
"multi_field_pattern_data": {
"properties": {
"fields": {
"description": "List of field names",
"items": {"type": "string"},
"type": ["array", "null"],
},
"pattern": {
"description": "Pattern string (regex)",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"_all_": {
"description": "Multi-field pattern condition (all fields match pattern)",
"oneOf": [
{"$ref": "#/definitions/multi_field_pattern_data"},
{"type": "null"},
],
},
"_any_": {
"description": "Multi-field pattern condition (any field matches pattern)",
"oneOf": [
{"$ref": "#/definitions/multi_field_pattern_data"},
{"type": "null"},
],
},
"allow_public": {
"default": True,
"description": "Allow public datasets to be returned in the results",
"type": ["boolean", "null"],
},
"id": {
"description": "List of IDs to filter by",
"items": {"type": "string"},
"type": ["array", "null"],
},
"name": {
"description": "Get only datasets whose name matches this pattern (python regular expression syntax)",
"type": ["string", "null"],
},
"only_fields": {
"description": (
"List of document's field names (nesting is supported using '.', e.g. execution.model_labels). If"
" provided, this list defines the query's projection (only these fields will be returned for each"
" result entry)"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"order_by": {
"description": (
"List of field names to order by. When search_text is used, '@text_score' can be used as a field"
" representing the text score of returned documents. Use '-' prefix to specify descending order."
" Optional, recommended when using page"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"page": {
"description": "Page number, returns a specific page out of the result list of datasets.",
"minimum": 0,
"type": ["integer", "null"],
},
"page_size": {
"description": (
"Page size, specifies the number of results returned in each page (last page may contain fewer "
"results)"
),
"minimum": 1,
"type": ["integer", "null"],
},
"refresh_scroll": {
"description": "If set then all the data received with this scroll will be requeried",
"type": ["boolean", "null"],
},
"resolve_head": {
"default": False,
"description": (
"If set then dataset paradigm and head version are calculated and returned. Note: do not use it"
" with queries that are supposed to return multiple datasets."
),
"type": ["boolean", "null"],
},
"scroll_id": {
"description": "Scroll ID returned from the previos calls to get_all",
"type": ["string", "null"],
},
"search_text": {
"description": "Free text search query",
"type": ["string", "null"],
},
"size": {
"description": "The number of datasets to retrieve",
"minimum": 1,
"type": ["integer", "null"],
},
"system_tags": {
"description": (
"System tags filter. Use '-' for exclusion (e.g. ['-archived'] for all non-hidden datasets)"
),
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags filter. Use '-' for exclusion",
"items": {"type": "string"},
"type": ["array", "null"],
},
"project": {
"description": "Associated project ID",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self,
id=None,
name=None,
tags=None,
system_tags=None,
page=None,
page_size=None,
order_by=None,
search_text=None,
only_fields=None,
_all_=None,
_any_=None,
allow_public=True,
resolve_head=False,
scroll_id=None,
refresh_scroll=None,
size=None,
project=None,
**kwargs
):
super(GetAllRequest, self).__init__(**kwargs)
self.id = id
self.name = name
self.tags = tags
self.system_tags = system_tags
self.page = page
self.page_size = page_size
self.order_by = order_by
self.search_text = search_text
self.only_fields = only_fields
self._all_ = _all_
self._any_ = _any_
self.allow_public = allow_public
self.resolve_head = resolve_head
self.scroll_id = scroll_id
self.refresh_scroll = refresh_scroll
self.size = size
self.project = project
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", (list, tuple))
self.assert_isinstance(value, "id", six.string_types, is_array=True)
self._property_id = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("page")
def page(self):
return self._property_page
@page.setter
def page(self, value):
if value is None:
self._property_page = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page", six.integer_types)
self._property_page = value
@schema_property("page_size")
def page_size(self):
return self._property_page_size
@page_size.setter
def page_size(self, value):
if value is None:
self._property_page_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page_size", six.integer_types)
self._property_page_size = value
@schema_property("order_by")
def order_by(self):
return self._property_order_by
@order_by.setter
def order_by(self, value):
if value is None:
self._property_order_by = None
return
self.assert_isinstance(value, "order_by", (list, tuple))
self.assert_isinstance(value, "order_by", six.string_types, is_array=True)
self._property_order_by = value
@schema_property("search_text")
def search_text(self):
return self._property_search_text
@search_text.setter
def search_text(self, value):
if value is None:
self._property_search_text = None
return
self.assert_isinstance(value, "search_text", six.string_types)
self._property_search_text = value
@schema_property("only_fields")
def only_fields(self):
return self._property_only_fields
@only_fields.setter
def only_fields(self, value):
if value is None:
self._property_only_fields = None
return
self.assert_isinstance(value, "only_fields", (list, tuple))
self.assert_isinstance(value, "only_fields", six.string_types, is_array=True)
self._property_only_fields = value
@schema_property("_all_")
def _all_(self):
return self._property__all_
@_all_.setter
def _all_(self, value):
if value is None:
self._property__all_ = None
return
if isinstance(value, dict):
value = MultiFieldPatternData.from_dict(value)
else:
self.assert_isinstance(value, "_all_", MultiFieldPatternData)
self._property__all_ = value
@schema_property("_any_")
def _any_(self):
return self._property__any_
@_any_.setter
def _any_(self, value):
if value is None:
self._property__any_ = None
return
if isinstance(value, dict):
value = MultiFieldPatternData.from_dict(value)
else:
self.assert_isinstance(value, "_any_", MultiFieldPatternData)
self._property__any_ = value
@schema_property("allow_public")
def allow_public(self):
return self._property_allow_public
@allow_public.setter
def allow_public(self, value):
if value is None:
self._property_allow_public = None
return
self.assert_isinstance(value, "allow_public", (bool,))
self._property_allow_public = value
@schema_property("resolve_head")
def resolve_head(self):
return self._property_resolve_head
@resolve_head.setter
def resolve_head(self, value):
if value is None:
self._property_resolve_head = None
return
self.assert_isinstance(value, "resolve_head", (bool,))
self._property_resolve_head = value
@schema_property("scroll_id")
def scroll_id(self):
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value):
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("refresh_scroll")
def refresh_scroll(self):
return self._property_refresh_scroll
@refresh_scroll.setter
def refresh_scroll(self, value):
if value is None:
self._property_refresh_scroll = None
return
self.assert_isinstance(value, "refresh_scroll", (bool,))
self._property_refresh_scroll = value
@schema_property("size")
def size(self):
return self._property_size
@size.setter
def size(self, value):
if value is None:
self._property_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "size", six.integer_types)
self._property_size = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
| GetAllRequest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_cond_format05.py | {
"start": 315,
"end": 1211
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("cond_format05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({"border": 1})
worksheet.write("A1", 10)
worksheet.write("A2", 20)
worksheet.write("A3", 30)
worksheet.write("A4", 40)
worksheet.conditional_format(
"A1",
{
"type": "cell",
"format": format1,
"criteria": "==",
"value": 7,
},
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | src/sentry/replays/endpoints/project_replay_clicks_index.py | {
"start": 1857,
"end": 1992
} | class ____(TypedDict):
data: list[ReplayClickResponseData]
@region_silo_endpoint
@extend_schema(tags=["Replays"])
| ReplayClickResponse |
python | pytorch__pytorch | torch/distributions/pareto.py | {
"start": 422,
"end": 2544
} | class ____(TransformedDistribution):
r"""
Samples from a Pareto Type 1 distribution.
Example::
>>> # xdoctest: +IGNORE_WANT("non-deterministic")
>>> m = Pareto(torch.tensor([1.0]), torch.tensor([1.0]))
>>> m.sample() # sample from a Pareto distribution with scale=1 and alpha=1
tensor([ 1.5623])
Args:
scale (float or Tensor): Scale parameter of the distribution
alpha (float or Tensor): Shape parameter of the distribution
"""
arg_constraints = {"alpha": constraints.positive, "scale": constraints.positive}
def __init__(
self,
scale: Union[Tensor, float],
alpha: Union[Tensor, float],
validate_args: Optional[bool] = None,
) -> None:
self.scale, self.alpha = broadcast_all(scale, alpha)
base_dist = Exponential(self.alpha, validate_args=validate_args)
transforms = [ExpTransform(), AffineTransform(loc=0, scale=self.scale)]
# pyrefly: ignore [bad-argument-type]
super().__init__(base_dist, transforms, validate_args=validate_args)
def expand(
self, batch_shape: _size, _instance: Optional["Pareto"] = None
) -> "Pareto":
new = self._get_checked_instance(Pareto, _instance)
new.scale = self.scale.expand(batch_shape)
new.alpha = self.alpha.expand(batch_shape)
return super().expand(batch_shape, _instance=new)
@property
def mean(self) -> Tensor:
# mean is inf for alpha <= 1
a = self.alpha.clamp(min=1)
return a * self.scale / (a - 1)
@property
def mode(self) -> Tensor:
return self.scale
@property
def variance(self) -> Tensor:
# var is inf for alpha <= 2
a = self.alpha.clamp(min=2)
return self.scale.pow(2) * a / ((a - 1).pow(2) * (a - 2))
@constraints.dependent_property(is_discrete=False, event_dim=0)
def support(self) -> constraints.Constraint:
return constraints.greater_than_eq(self.scale)
def entropy(self) -> Tensor:
return (self.scale / self.alpha).log() + (1 + self.alpha.reciprocal())
| Pareto |
python | ray-project__ray | python/ray/train/_internal/utils.py | {
"start": 5998,
"end": 6408
} | class ____(abc.ABCMeta):
"""Singleton Abstract Base Class
https://stackoverflow.com/questions/33364070/implementing
-singleton-as-metaclass-but-for-abstract-classes
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
| Singleton |
python | plotly__plotly.py | plotly/graph_objs/parcoords/_labelfont.py | {
"start": 233,
"end": 9896
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "parcoords"
_path_str = "parcoords.labelfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Labelfont object
Sets the font for the `dimension` labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcoords.Labelfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Labelfont
"""
super().__init__("labelfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.parcoords.Labelfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.Labelfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Labelfont |
python | doocs__leetcode | solution/2500-2599/2595.Number of Even and Odd Bits/Solution2.py | {
"start": 0,
"end": 188
} | class ____:
def evenOddBit(self, n: int) -> List[int]:
mask = 0x5555
even = (n & mask).bit_count()
odd = (n & ~mask).bit_count()
return [even, odd]
| Solution |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linalg_ops_test.py | {
"start": 5261,
"end": 5910
} | class ____(test.TestCase):
def test_compare_to_numpy(self):
for dtype in np.float64, np.float64, np.complex64, np.complex128:
with self.subTest(dtype=dtype):
matrix_np = np.array([[1 + 1j, 2 + 2j, 3 + 3j], [4 + 4j, 5 + 5j,
6 + 6j]]).astype(dtype)
expected_transposed = np.conj(matrix_np.T)
with self.session():
matrix = ops.convert_to_tensor(matrix_np)
transposed = linalg.adjoint(matrix)
self.assertEqual((3, 2), transposed.get_shape())
self.assertAllEqual(expected_transposed, self.evaluate(transposed))
| AdjointTest |
python | great-expectations__great_expectations | great_expectations/data_context/types/base.py | {
"start": 80909,
"end": 82534
} | class ____(AbstractConfigSchema):
class Meta:
unknown = INCLUDE
id = fields.String(required=False, allow_none=True)
@override
def dump(self, obj: dict, *, many: Optional[bool] = None) -> dict:
"""
Chetan - 20220803 - By design, Marshmallow accepts unknown fields through the
`unknown = INCLUDE` directive but only upon load. When dumping, it validates
each item against the declared fields and only includes explicitly named values.
As such, this override of parent behavior is meant to keep ALL values provided
to the config in the output dict. To get rid of this function, we need to
explicitly name all possible values in CheckpoingValidationDefinitionSchema as
schema fields.
"""
data = super().dump(obj, many=many)
for key, value in obj.items():
if key not in data and key not in self.declared_fields and value is not None:
data[key] = value
sorted_data = dict(sorted(data.items()))
return sorted_data
@pre_dump
def prepare_dump(self, data, **kwargs):
data = copy.deepcopy(data)
for key, value in data.items():
data[key] = convert_to_json_serializable(data=value)
return data
dataContextConfigSchema = DataContextConfigSchema()
dataConnectorConfigSchema = DataConnectorConfigSchema()
executionEngineConfigSchema = ExecutionEngineConfigSchema()
assetConfigSchema = AssetConfigSchema()
sorterConfigSchema = SorterConfigSchema()
progressBarsConfigSchema = ProgressBarsConfigSchema()
| CheckpointValidationDefinitionSchema |
python | huggingface__transformers | src/transformers/models/perceiver/modeling_perceiver.py | {
"start": 107475,
"end": 108923
} | class ____(PerceiverAbstractPositionEncoding):
"""Fourier (Sinusoidal) position encoding."""
def __init__(self, num_bands, max_resolution, concat_pos=True, sine_only=False):
super().__init__()
self.num_bands = num_bands
self.max_resolution = max_resolution
self.concat_pos = concat_pos
self.sine_only = sine_only
@property
def num_dimensions(self) -> int:
return len(self.max_resolution)
def output_size(self):
"""Returns size of positional encodings last dimension."""
num_dims = len(self.max_resolution)
encoding_size = self.num_bands * num_dims
if not self.sine_only:
encoding_size *= 2
if self.concat_pos:
encoding_size += self.num_dimensions
return encoding_size
def forward(
self,
index_dims: list[int],
batch_size: int,
device: torch.device,
dtype: torch.dtype,
pos: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
pos = _check_or_build_spatial_positions(pos, index_dims, batch_size)
fourier_pos_enc = generate_fourier_features(
pos,
num_bands=self.num_bands,
max_resolution=self.max_resolution,
concat_pos=self.concat_pos,
sine_only=self.sine_only,
).to(device=device, dtype=dtype)
return fourier_pos_enc
| PerceiverFourierPositionEncoding |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/integrations/airbyte_cloud/customize_airbyte_cloud_translator_asset_spec.py | {
"start": 517,
"end": 1191
} | class ____(DagsterAirbyteTranslator):
def get_asset_spec(self, props: AirbyteConnectionTableProps) -> dg.AssetSpec:
# We create the default asset spec using super()
default_spec = super().get_asset_spec(props)
# We customize the metadata and asset key prefix for all assets
return default_spec.replace_attributes(
key=default_spec.key.with_prefix("prefix"),
).merge_attributes(metadata={"custom": "metadata"})
airbyte_cloud_specs = load_airbyte_cloud_asset_specs(
airbyte_workspace, dagster_airbyte_translator=MyCustomAirbyteTranslator()
)
defs = dg.Definitions(assets=airbyte_cloud_specs)
| MyCustomAirbyteTranslator |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 8821,
"end": 8998
} | class ____(EllipticCurve):
name = "secp256k1"
key_size = 256
group_order = (
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
)
| SECP256K1 |
python | Textualize__textual | tests/snapshot_tests/language_snippets.py | {
"start": 10755,
"end": 15671
} | class ____ {
constructor(width, height) {
this.width = width;
this.height = height;
}
getArea() {
return this.width * this.height;
}
}
const rectangle = new Rectangle(5, 3);
console.log("Rectangle area:", rectangle.getArea());
// Async/Await and Promises
async function fetchData() {
try {
const response = await fetch("https://api.example.com/data");
const data = await response.json();
console.log("Fetched data:", data);
} catch (error) {
console.error("Error:", error);
}
}
fetchData();
// Arrow functions
const greet = (name) => {
console.log(`Hello, ${name}!`);
};
greet("Alice");
// Destructuring assignment
const [a, b, ...rest] = [1, 2, 3, 4, 5];
console.log(a, b, rest);
// Spread operator
const arr1 = [1, 2, 3];
const arr2 = [4, 5, 6];
const combinedArr = [...arr1, ...arr2];
console.log("Combined array:", combinedArr);
// Ternary operator
const message = age >= 18 ? "You are an adult." : "You are a minor.";
console.log(message);
"""
BASH = """\
#!/bin/bash
# Variables
name="John"
age=30
is_student=true
# Printing variables
echo "Hello, $name! You are $age years old."
# Conditional statements
if [[ $age -ge 18 && $is_student == true ]]; then
echo "You are an adult student."
elif [[ $age -ge 18 ]]; then
echo "You are an adult."
else
echo "You are a minor."
fi
# Arrays
numbers=(1 2 3 4 5)
echo "Numbers: ${numbers[@]}"
# Loops
for num in "${numbers[@]}"; do
echo "Number: $num"
done
# Functions
greet() {
local name=$1
echo "Hello, $name!"
}
greet "Alice"
# Command substitution
current_date=$(date +%Y-%m-%d)
echo "Current date: $current_date"
# File operations
touch file.txt
echo "Some content" > file.txt
cat file.txt
# Conditionals with file checks
if [[ -f file.txt ]]; then
echo "file.txt exists."
else
echo "file.txt does not exist."
fi
# Case statement
case $age in
18)
echo "You are 18 years old."
;;
30)
echo "You are 30 years old."
;;
*)
echo "You are neither 18 nor 30 years old."
;;
esac
# While loop
counter=0
while [[ $counter -lt 5 ]]; do
echo "Counter: $counter"
((counter++))
done
# Until loop
until [[ $counter -eq 0 ]]; do
echo "Counter: $counter"
((counter--))
done
# Heredoc
cat << EOF
This is a heredoc.
It allows you to write multiple lines of text.
EOF
# Redirection
ls > file_list.txt
grep "file" file_list.txt > filtered_list.txt
# Pipes
cat file_list.txt | wc -l
# Arithmetic operations
result=$((10 + 5))
echo "Result: $result"
# Exporting variables
export DB_PASSWORD="secret"
# Sourcing external files
source config.sh
"""
RUST = """\
use std::collections::HashMap;
// Constants
const PI: f64 = 3.14159;
// Structs
struct Rectangle {
width: u32,
height: u32,
}
impl Rectangle {
fn area(&self) -> u32 {
self.width * self.height
}
}
// Enums
enum Result<T, E> {
Ok(T),
Err(E),
}
// Functions
fn greet(name: &str) {
println!("Hello, {}!", name);
}
fn main() {
// Variables
let name = "John";
let mut age = 30;
let is_student = true;
// Printing variables
println!("Hello, {}! You are {} years old.", name, age);
// Conditional statements
if age >= 18 && is_student {
println!("You are an adult student.");
} else if age >= 18 {
println!("You are an adult.");
} else {
println!("You are a minor.");
}
// Arrays
let numbers = [1, 2, 3, 4, 5];
println!("Numbers: {:?}", numbers);
// Vectors
let mut fruits = vec!["apple", "banana", "orange"];
fruits.push("grape");
println!("Fruits: {:?}", fruits);
// Loops
for num in &numbers {
println!("Number: {}", num);
}
// Pattern matching
let result = Result::Ok(42);
match result {
Result::Ok(value) => println!("Value: {}", value),
Result::Err(error) => println!("Error: {:?}", error),
}
// Ownership and borrowing
let s1 = String::from("hello");
let s2 = s1.clone();
println!("s1: {}, s2: {}", s1, s2);
// References
let rect = Rectangle {
width: 10,
height: 20,
};
println!("Rectangle area: {}", rect.area());
// Hash maps
let mut scores = HashMap::new();
scores.insert("Alice", 100);
scores.insert("Bob", 80);
println!("Alice's score: {}", scores["Alice"]);
// Closures
let square = |num: i32| num * num;
println!("Square of 5: {}", square(5));
// Traits
trait Printable {
fn print(&self);
}
impl Printable for Rectangle {
fn print(&self) {
println!("Rectangle: width={}, height={}", self.width, self.height);
}
}
rect.print();
// Modules
greet("Alice");
}
"""
JAVA = """\
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
// Classes and interfaces
interface Shape {
double getArea();
}
| Rectangle |
python | django__django | tests/model_fields/models.py | {
"start": 3586,
"end": 3656
} | class ____(models.Model):
value = models.IntegerField()
| IntegerModel |
python | gevent__gevent | src/greentest/3.10/test_threading.py | {
"start": 56203,
"end": 56300
} | class ____(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
| BarrierTests |
python | doocs__leetcode | solution/1400-1499/1409.Queries on a Permutation With Key/Solution.py | {
"start": 0,
"end": 284
} | class ____:
def processQueries(self, queries: List[int], m: int) -> List[int]:
p = list(range(1, m + 1))
ans = []
for v in queries:
j = p.index(v)
ans.append(j)
p.pop(j)
p.insert(0, v)
return ans
| Solution |
python | numpy__numpy | benchmarks/benchmarks/bench_io.py | {
"start": 3113,
"end": 3929
} | class ____(Benchmark):
# benchmarks for np.loadtxt operating with
# different dtypes parsed / cast from CSV files
params = (['float32', 'float64', 'int32', 'int64',
'complex128', 'str', 'object'],
[10, int(1e2), int(1e4), int(1e5)])
param_names = ['dtype', 'num_lines']
def setup(self, dtype, num_lines):
data = ['5, 7, 888'] * num_lines
self.csv_data = StringIO('\n'.join(data))
def time_loadtxt_dtypes_csv(self, dtype, num_lines):
# benchmark loading arrays of various dtypes
# from csv files
# state-dependent timing benchmark requires
# rewind of StringIO object
np.loadtxt(self.csv_data,
delimiter=',',
dtype=dtype)
self.csv_data.seek(0)
| LoadtxtCSVdtypes |
python | etianen__django-reversion | tests/test_app/tests/test_api.py | {
"start": 10962,
"end": 11889
} | class ____(TestModelMixin, TestBase):
databases = {"default", "mysql", "postgres"}
def testAddMeta(self):
with reversion.create_revision():
reversion.add_meta(TestMeta, name="meta v1")
obj = TestModel.objects.create()
self.assertSingleRevision((obj,), meta_names=("meta v1",))
def testAddMetaNoBlock(self):
with self.assertRaises(reversion.RevisionManagementError):
reversion.add_meta(TestMeta, name="meta v1")
def testAddMetaMultDb(self):
with reversion.create_revision(using="mysql"), reversion.create_revision(using="postgres"):
obj = TestModel.objects.create()
reversion.add_meta(TestMeta, name="meta v1")
self.assertNoRevision()
self.assertSingleRevision((obj,), meta_names=("meta v1",), using="mysql")
self.assertSingleRevision((obj,), meta_names=("meta v1",), using="postgres")
| AddMetaTest |
python | walkccc__LeetCode | solutions/2478. Number of Beautiful Partitions/2478.py | {
"start": 0,
"end": 753
} | class ____:
def beautifulPartitions(self, s: str, k: int, minLength: int) -> int:
def isPrime(c: str) -> bool:
return c in '2357'
if not isPrime(s[0]) or isPrime(s[-1]):
return 0
MOD = 1_000_000_007
@lru_cache(None)
def dp(i: int, k: int) -> int:
"""
Returns the number of beautiful partitions of s[i..n) with k bars (|)
left.
"""
if i <= len(s) and k == 0:
return 1
if i >= len(s):
return 0
# Don't split between s[i - 1] and s[i].
ans = dp(i + 1, k) % MOD
# Split between s[i - 1] and s[i].
if isPrime(s[i]) and not isPrime(s[i - 1]):
ans += dp(i + minLength, k - 1)
return ans % MOD
return dp(minLength, k - 1)
| Solution |
python | django-haystack__django-haystack | haystack/backends/__init__.py | {
"start": 1736,
"end": 7239
} | class ____:
"""
Abstract search engine base class.
"""
# Backends should include their own reserved words/characters.
RESERVED_WORDS = []
RESERVED_CHARACTERS = []
def __init__(self, connection_alias, **connection_options):
self.connection_alias = connection_alias
self.timeout = connection_options.get("TIMEOUT", 10)
self.include_spelling = connection_options.get("INCLUDE_SPELLING", False)
self.batch_size = connection_options.get("BATCH_SIZE", 1000)
self.silently_fail = connection_options.get("SILENTLY_FAIL", True)
self.distance_available = connection_options.get("DISTANCE_AVAILABLE", False)
def update(self, index, iterable, commit=True):
"""
Updates the backend when given a SearchIndex and a collection of
documents.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
def remove(self, obj_or_string):
"""
Removes a document/object from the backend. Can be either a model
instance or the identifier (i.e. ``app_name.model_name.id``) in the
event the object no longer exists.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
def clear(self, models=None, commit=True):
"""
Clears the backend of all documents/objects for a collection of models.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
@log_query
def search(self, query_string, **kwargs):
"""
Takes a query to search on and returns dictionary.
The query should be a string that is appropriate syntax for the backend.
The returned dictionary should contain the keys 'results' and 'hits'.
The 'results' value should be an iterable of populated SearchResult
objects. The 'hits' should be an integer count of the number of matched
results the search backend found.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError
def build_search_kwargs(
self,
query_string,
sort_by=None,
start_offset=0,
end_offset=None,
fields="",
highlight=False,
facets=None,
date_facets=None,
query_facets=None,
narrow_queries=None,
spelling_query=None,
within=None,
dwithin=None,
distance_point=None,
models=None,
limit_to_registered_models=None,
result_class=None,
**extra_kwargs
):
# A convenience method most backends should include in order to make
# extension easier.
raise NotImplementedError
def prep_value(self, value):
"""
Hook to give the backend a chance to prep an attribute value before
sending it to the search engine. By default, just force it to unicode.
"""
return force_str(value)
def more_like_this(
self, model_instance, additional_query_string=None, result_class=None
):
"""
Takes a model object and returns results the backend thinks are similar.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError(
"Subclasses must provide a way to fetch similar record via the 'more_like_this' method if supported by the backend."
)
def extract_file_contents(self, file_obj):
"""
Hook to allow backends which support rich-content types such as PDF,
Word, etc. extraction to process the provided file object and return
the contents for indexing
Returns None if metadata cannot be extracted; otherwise returns a
dictionary containing at least two keys:
:contents:
Extracted full-text content, if applicable
:metadata:
key:value pairs of text strings
"""
raise NotImplementedError(
"Subclasses must provide a way to extract metadata via the 'extract' method if supported by the backend."
)
def build_schema(self, fields):
"""
Takes a dictionary of fields and returns schema information.
This method MUST be implemented by each backend, as it will be highly
specific to each one.
"""
raise NotImplementedError(
"Subclasses must provide a way to build their schema."
)
def build_models_list(self):
"""
Builds a list of models for searching.
The ``search`` method should use this and the ``django_ct`` field to
narrow the results (unless the user indicates not to). This helps ignore
any results that are not currently handled models and ensures
consistent caching.
"""
from haystack import connections
models = []
for model in (
connections[self.connection_alias].get_unified_index().get_indexed_models()
):
models.append(get_model_ct(model))
return models
# Alias for easy loading within SearchQuery objects.
SearchBackend = BaseSearchBackend
| BaseSearchBackend |
python | sqlalchemy__sqlalchemy | test/orm/test_selectin_relations.py | {
"start": 98979,
"end": 100425
} | class ____(
fixtures.DeclarativeMappedTest, testing.AssertsExecutionResults
):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(ComparableEntity, Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
b_id = Column(Integer)
b = relationship("B", primaryjoin="foreign(A.b_id) == B.id")
q = Column(Integer)
class B(ComparableEntity, Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
x = Column(Integer)
y = Column(Integer)
@classmethod
def insert_data(cls, connection):
A, B = cls.classes("A", "B")
s = Session(connection)
b1, b2 = B(id=1, x=5, y=9), B(id=2, x=10, y=8)
s.add_all(
[
A(id=1, b_id=1),
A(id=2, b_id=5),
A(id=3, b_id=2),
A(id=4, b=None),
b1,
b2,
]
)
s.commit()
def test_missing_rec(self):
A, B = self.classes("A", "B")
s = fixture_session()
eq_(
s.query(A).options(selectinload(A.b)).order_by(A.id).all(),
[
A(id=1, b=B(id=1)),
A(id=2, b=None, b_id=5),
A(id=3, b=B(id=2)),
A(id=4, b=None, b_id=None),
],
)
| MissingForeignTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/batch.py | {
"start": 17276,
"end": 22155
} | class ____(AwsBaseOperator[BatchClientHook]):
"""
Create an AWS Batch compute environment.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BatchCreateComputeEnvironmentOperator`
:param compute_environment_name: Name of the AWS batch compute
environment (templated).
:param environment_type: Type of the compute-environment.
:param state: State of the compute-environment.
:param compute_resources: Details about the resources managed by the
compute-environment (templated). More details:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html#Batch.Client.create_compute_environment
:param unmanaged_v_cpus: Maximum number of vCPU for an unmanaged compute
environment. This parameter is only supported when the ``type``
parameter is set to ``UNMANAGED``.
:param service_role: IAM role that allows Batch to make calls to other AWS
services on your behalf (templated).
:param tags: Tags that you apply to the compute-environment to help you
categorize and organize your resources.
:param poll_interval: How long to wait in seconds between 2 polls at the environment status.
Only useful when deferrable is True.
:param max_retries: How many times to poll for the environment status.
Only useful when deferrable is True.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param deferrable: If True, the operator will wait asynchronously for the environment to be created.
This mode requires aiobotocore module to be installed. (default: False)
"""
aws_hook_class = BatchClientHook
template_fields: Sequence[str] = aws_template_fields(
"compute_environment_name",
"compute_resources",
"service_role",
)
template_fields_renderers = {"compute_resources": "json"}
def __init__(
self,
compute_environment_name: str,
environment_type: str,
state: str,
compute_resources: dict,
unmanaged_v_cpus: int | None = None,
service_role: str | None = None,
tags: dict | None = None,
poll_interval: int = 30,
max_retries: int | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.compute_environment_name = compute_environment_name
self.environment_type = environment_type
self.state = state
self.unmanaged_v_cpus = unmanaged_v_cpus
self.compute_resources = compute_resources
self.service_role = service_role
self.tags = tags or {}
self.poll_interval = poll_interval
self.max_retries = max_retries or 120
self.deferrable = deferrable
def execute(self, context: Context):
"""Create an AWS batch compute environment."""
kwargs: dict[str, Any] = {
"computeEnvironmentName": self.compute_environment_name,
"type": self.environment_type,
"state": self.state,
"unmanagedvCpus": self.unmanaged_v_cpus,
"computeResources": self.compute_resources,
"serviceRole": self.service_role,
"tags": self.tags,
}
response = self.hook.client.create_compute_environment(**trim_none_values(kwargs))
arn = response["computeEnvironmentArn"]
if self.deferrable:
self.defer(
trigger=BatchCreateComputeEnvironmentTrigger(
arn, self.poll_interval, self.max_retries, self.aws_conn_id, self.region_name
),
method_name="execute_complete",
)
self.log.info("AWS Batch compute environment created successfully")
return arn
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(
f"Error while waiting for the compute environment to be ready: {validated_event}"
)
return validated_event["value"]
| BatchCreateComputeEnvironmentOperator |
python | pytorch__pytorch | test/distributed/tensor/experimental/test_tp_transform.py | {
"start": 480,
"end": 1147
} | class ____(torch.nn.Module):
"""
A dummy model with list of MLPs.
"""
def __init__(self, num_mlps=3, bias=True):
super().__init__()
self.mlps = torch.nn.ModuleList()
for _ in range(num_mlps):
self.mlps.append(
torch.nn.Sequential(
torch.nn.Linear(6, 18),
torch.nn.ReLU(),
torch.nn.Linear(18, 6, bias=bias),
)
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = torch.chunk(x, 2, dim=1)[0]
for mlp in self.mlps:
x = mlp(x)
return x + torch.ones_like(x)
| MLPListModule |
python | getsentry__sentry | src/sentry/utils/outcomes.py | {
"start": 4458,
"end": 8625
} | class ____(IntEnum):
ACCEPTED = 0
FILTERED = 1
RATE_LIMITED = 2
INVALID = 3
ABUSE = 4
CLIENT_DISCARD = 5
CARDINALITY_LIMITED = 6
def api_name(self) -> str:
return self.name.lower()
@classmethod
def parse(cls, name: str) -> Outcome:
return Outcome[name.upper()]
def is_billing(self) -> bool:
return self in (Outcome.ACCEPTED, Outcome.RATE_LIMITED)
outcomes_publisher: KafkaPublisher | None = None
billing_publisher: KafkaPublisher | None = None
LATE_OUTCOME_THRESHOLD = timedelta(days=1)
def track_outcome(
org_id: int,
project_id: int,
key_id: int | None,
outcome: Outcome,
reason: str | None = None,
timestamp: datetime | None = None,
event_id: str | None = None,
category: DataCategory | None = None,
quantity: int | None = None,
) -> None:
"""
This is a central point to track org/project counters per incoming event.
NB: This should only ever be called once per incoming event, which means
it should only be called at the point we know the final outcome for the
event (invalid, rate_limited, accepted, discarded, etc.)
This sends the "outcome" message to Kafka which is used by Snuba to serve
data for SnubaTSDB and RedisSnubaTSDB, such as # of rate-limited/filtered
events.
"""
global outcomes_publisher
global billing_publisher
if quantity is None:
quantity = 1
assert isinstance(org_id, int)
assert isinstance(project_id, int)
assert isinstance(key_id, (type(None), int))
assert isinstance(outcome, Outcome)
assert isinstance(timestamp, (type(None), datetime))
assert isinstance(category, (type(None), DataCategory))
assert isinstance(quantity, int)
outcomes_config = kafka_config.get_topic_definition(Topic.OUTCOMES)
billing_config = kafka_config.get_topic_definition(Topic.OUTCOMES_BILLING)
use_billing = outcome.is_billing()
# Create a second producer instance only if the cluster differs. Otherwise,
# reuse the same producer and just send to the other topic.
if use_billing and billing_config["cluster"] != outcomes_config["cluster"]:
if billing_publisher is None:
cluster_name = billing_config["cluster"]
billing_publisher = KafkaPublisher(
kafka_config.get_kafka_producer_cluster_options(cluster_name)
)
publisher = billing_publisher
else:
if outcomes_publisher is None:
cluster_name = outcomes_config["cluster"]
outcomes_publisher = KafkaPublisher(
kafka_config.get_kafka_producer_cluster_options(cluster_name)
)
publisher = outcomes_publisher
now = to_datetime(time.time())
timestamp = timestamp or now
# Send billing outcomes to a dedicated topic.
topic_name = (
billing_config["real_topic_name"] if use_billing else outcomes_config["real_topic_name"]
)
# Send a snuba metrics payload.
publisher.publish(
topic_name,
json.dumps(
{
"timestamp": timestamp,
"org_id": org_id,
"project_id": project_id,
"key_id": key_id,
"outcome": outcome.value,
"reason": reason,
"event_id": event_id,
"category": category,
"quantity": quantity,
}
),
)
if now - timestamp.replace(tzinfo=now.tzinfo) > LATE_OUTCOME_THRESHOLD:
metrics.incr(
"events.outcomes.late",
skip_internal=True,
tags={
"outcome": outcome.name.lower(),
"reason": reason,
"category": category.api_name() if category is not None else "null",
"topic": topic_name,
},
)
metrics.incr(
"events.outcomes",
skip_internal=True,
tags={
"outcome": outcome.name.lower(),
"reason": reason,
"category": category.api_name() if category is not None else "null",
"topic": topic_name,
},
)
| Outcome |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/layers.py | {
"start": 4745,
"end": 5819
} | class ____(torch.nn.Module):
"""
Linear layers.
"""
def __init__(
self,
input_size: int,
num_layers: int,
hidden_size: int,
kernel_init: Initialization = Initialization.KaimingHeNormal,
kernel_gain: float = 1.0,
):
super().__init__()
self.layers = [
linear_layer(
input_size,
hidden_size,
kernel_init=kernel_init,
kernel_gain=kernel_gain,
)
]
self.layers.append(Swish())
for _ in range(num_layers - 1):
self.layers.append(
linear_layer(
hidden_size,
hidden_size,
kernel_init=kernel_init,
kernel_gain=kernel_gain,
)
)
self.layers.append(Swish())
self.seq_layers = torch.nn.Sequential(*self.layers)
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
return self.seq_layers(input_tensor)
| LinearEncoder |
python | python-pillow__Pillow | Tests/test_file_libtiff.py | {
"start": 586,
"end": 1219
} | class ____:
def _assert_noerr(self, tmp_path: Path, im: TiffImagePlugin.TiffImageFile) -> None:
"""Helper tests that assert basic sanity about the g4 tiff reading"""
# 1 bit
assert im.mode == "1"
# Does the data actually load
im.load()
im.getdata()
assert isinstance(im, TiffImagePlugin.TiffImageFile)
assert im._compression == "group4"
# can we write it back out, in a different form.
out = tmp_path / "temp.png"
im.save(out)
out_bytes = io.BytesIO()
im.save(out_bytes, format="tiff", compression="group4")
| LibTiffTestCase |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/conjecture/test_provider.py | {
"start": 15503,
"end": 17092
} | class ____(TrivialProvider):
def observe_test_case(self):
return {"msg_key": "some message", "data_key": [1, "2", {}]}
def observe_information_messages(self, *, lifetime):
if lifetime == "test_case":
yield {"type": "info", "title": "trivial-data", "content": {"k2": "v2"}}
else:
assert lifetime == "test_function"
yield {"type": "alert", "title": "Trivial alert", "content": "message here"}
yield {"type": "info", "title": "trivial-data", "content": {"k2": "v2"}}
def realize(self, value, *, for_failure=False):
# Get coverage of the can't-realize path for observability outputs
raise BackendCannotProceed
def test_custom_observations_from_backend():
with temp_register_backend("observable", ObservableProvider):
@given(st.booleans())
@settings(backend="observable", database=None)
def test_function(_):
pass
with capture_observations() as ls:
test_function()
assert len(ls) >= 3
cases = [t.metadata.backend for t in ls if t.type == "test_case"]
assert {"msg_key": "some message", "data_key": [1, "2", {}]} in cases
assert "<backend failed to realize symbolic arguments>" in repr(ls)
infos = [
{k: v for k, v in dataclasses.asdict(t).items() if k in ("title", "content")}
for t in ls
if t.type != "test_case"
]
assert {"title": "Trivial alert", "content": "message here"} in infos
assert {"title": "trivial-data", "content": {"k2": "v2"}} in infos
| ObservableProvider |
python | huggingface__transformers | src/transformers/models/mixtral/modular_mixtral.py | {
"start": 11085,
"end": 11890
} | class ____(MistralPreTrainedModel):
_can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
_can_record_outputs = {
"router_logits": OutputRecorder(MixtralTopKRouter, index=0),
"hidden_states": MixtralDecoderLayer,
"attentions": MixtralAttention,
}
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
std = self.config.initializer_range
if isinstance(module, MixtralExperts):
init.normal_(module.gate_up_proj, mean=0.0, std=std)
init.normal_(module.down_proj, mean=0.0, std=std)
elif isinstance(module, MixtralTopKRouter):
init.normal_(module.weight, mean=0.0, std=std)
| MixtralPreTrainedModel |
python | anthropics__anthropic-sdk-python | src/anthropic/resources/beta/messages/messages.py | {
"start": 154108,
"end": 154611
} | class ____:
def __init__(self, messages: AsyncMessages) -> None:
self._messages = messages
self.create = _legacy_response.async_to_raw_response_wrapper(
messages.create,
)
self.count_tokens = _legacy_response.async_to_raw_response_wrapper(
messages.count_tokens,
)
@cached_property
def batches(self) -> AsyncBatchesWithRawResponse:
return AsyncBatchesWithRawResponse(self._messages.batches)
| AsyncMessagesWithRawResponse |
python | pypa__pip | src/pip/_vendor/truststore/_windows.py | {
"start": 1845,
"end": 1973
} | class ____(Structure):
_fields_ = (
("dwErrorStatus", DWORD),
("dwInfoStatus", DWORD),
)
| CERT_TRUST_STATUS |
python | mwaskom__seaborn | seaborn/_base.py | {
"start": 21646,
"end": 56600
} | class ____:
"""Base class for objects underlying *plot functions."""
wide_structure = {
"x": "@index", "y": "@values", "hue": "@columns", "style": "@columns",
}
flat_structure = {"x": "@index", "y": "@values"}
_default_size_range = 1, 2 # Unused but needed in tests, ugh
def __init__(self, data=None, variables={}):
self._var_levels = {}
# var_ordered is relevant only for categorical axis variables, and may
# be better handled by an internal axis information object that tracks
# such information and is set up by the scale_* methods. The analogous
# information for numeric axes would be information about log scales.
self._var_ordered = {"x": False, "y": False} # alt., used DefaultDict
self.assign_variables(data, variables)
# TODO Lots of tests assume that these are called to initialize the
# mappings to default values on class initialization. I'd prefer to
# move away from that and only have a mapping when explicitly called.
for var in ["hue", "size", "style"]:
if var in variables:
getattr(self, f"map_{var}")()
@property
def has_xy_data(self):
"""Return True at least one of x or y is defined."""
return bool({"x", "y"} & set(self.variables))
@property
def var_levels(self):
"""Property interface to ordered list of variables levels.
Each time it's accessed, it updates the var_levels dictionary with the
list of levels in the current semantic mappers. But it also allows the
dictionary to persist, so it can be used to set levels by a key. This is
used to track the list of col/row levels using an attached FacetGrid
object, but it's kind of messy and ideally fixed by improving the
faceting logic so it interfaces better with the modern approach to
tracking plot variables.
"""
for var in self.variables:
if (map_obj := getattr(self, f"_{var}_map", None)) is not None:
self._var_levels[var] = map_obj.levels
return self._var_levels
def assign_variables(self, data=None, variables={}):
"""Define plot variables, optionally using lookup from `data`."""
x = variables.get("x", None)
y = variables.get("y", None)
if x is None and y is None:
self.input_format = "wide"
frame, names = self._assign_variables_wideform(data, **variables)
else:
# When dealing with long-form input, use the newer PlotData
# object (internal but introduced for the objects interface)
# to centralize / standardize data consumption logic.
self.input_format = "long"
plot_data = PlotData(data, variables)
frame = plot_data.frame
names = plot_data.names
self.plot_data = frame
self.variables = names
self.var_types = {
v: variable_type(
frame[v],
boolean_type="numeric" if v in "xy" else "categorical"
)
for v in names
}
return self
def _assign_variables_wideform(self, data=None, **kwargs):
"""Define plot variables given wide-form data.
Parameters
----------
data : flat vector or collection of vectors
Data can be a vector or mapping that is coerceable to a Series
or a sequence- or mapping-based collection of such vectors, or a
rectangular numpy array, or a Pandas DataFrame.
kwargs : variable -> data mappings
Behavior with keyword arguments is currently undefined.
Returns
-------
plot_data : :class:`pandas.DataFrame`
Long-form data object mapping seaborn variables (x, y, hue, ...)
to data vectors.
variables : dict
Keys are defined seaborn variables; values are names inferred from
the inputs (or None when no name can be determined).
"""
# Raise if semantic or other variables are assigned in wide-form mode
assigned = [k for k, v in kwargs.items() if v is not None]
if any(assigned):
s = "s" if len(assigned) > 1 else ""
err = f"The following variable{s} cannot be assigned with wide-form data: "
err += ", ".join(f"`{v}`" for v in assigned)
raise ValueError(err)
# Determine if the data object actually has any data in it
empty = data is None or not len(data)
# Then, determine if we have "flat" data (a single vector)
if isinstance(data, dict):
values = data.values()
else:
values = np.atleast_1d(np.asarray(data, dtype=object))
flat = not any(
isinstance(v, Iterable) and not isinstance(v, (str, bytes))
for v in values
)
if empty:
# Make an object with the structure of plot_data, but empty
plot_data = pd.DataFrame()
variables = {}
elif flat:
# Handle flat data by converting to pandas Series and using the
# index and/or values to define x and/or y
# (Could be accomplished with a more general to_series() interface)
flat_data = pd.Series(data).copy()
names = {
"@values": flat_data.name,
"@index": flat_data.index.name
}
plot_data = {}
variables = {}
for var in ["x", "y"]:
if var in self.flat_structure:
attr = self.flat_structure[var]
plot_data[var] = getattr(flat_data, attr[1:])
variables[var] = names[self.flat_structure[var]]
plot_data = pd.DataFrame(plot_data)
else:
# Otherwise assume we have some collection of vectors.
# Handle Python sequences such that entries end up in the columns,
# not in the rows, of the intermediate wide DataFrame.
# One way to accomplish this is to convert to a dict of Series.
if isinstance(data, Sequence):
data_dict = {}
for i, var in enumerate(data):
key = getattr(var, "name", i)
# TODO is there a safer/more generic way to ensure Series?
# sort of like np.asarray, but for pandas?
data_dict[key] = pd.Series(var)
data = data_dict
# Pandas requires that dict values either be Series objects
# or all have the same length, but we want to allow "ragged" inputs
if isinstance(data, Mapping):
data = {key: pd.Series(val) for key, val in data.items()}
# Otherwise, delegate to the pandas DataFrame constructor
# This is where we'd prefer to use a general interface that says
# "give me this data as a pandas DataFrame", so we can accept
# DataFrame objects from other libraries
wide_data = pd.DataFrame(data, copy=True)
# At this point we should reduce the dataframe to numeric cols
numeric_cols = [
k for k, v in wide_data.items() if variable_type(v) == "numeric"
]
wide_data = wide_data[numeric_cols]
# Now melt the data to long form
melt_kws = {"var_name": "@columns", "value_name": "@values"}
use_index = "@index" in self.wide_structure.values()
if use_index:
melt_kws["id_vars"] = "@index"
try:
orig_categories = wide_data.columns.categories
orig_ordered = wide_data.columns.ordered
wide_data.columns = wide_data.columns.add_categories("@index")
except AttributeError:
category_columns = False
else:
category_columns = True
wide_data["@index"] = wide_data.index.to_series()
plot_data = wide_data.melt(**melt_kws)
if use_index and category_columns:
plot_data["@columns"] = pd.Categorical(plot_data["@columns"],
orig_categories,
orig_ordered)
# Assign names corresponding to plot semantics
for var, attr in self.wide_structure.items():
plot_data[var] = plot_data[attr]
# Define the variable names
variables = {}
for var, attr in self.wide_structure.items():
obj = getattr(wide_data, attr[1:])
variables[var] = getattr(obj, "name", None)
# Remove redundant columns from plot_data
plot_data = plot_data[list(variables)]
return plot_data, variables
def map_hue(self, palette=None, order=None, norm=None, saturation=1):
mapping = HueMapping(self, palette, order, norm, saturation)
self._hue_map = mapping
def map_size(self, sizes=None, order=None, norm=None):
mapping = SizeMapping(self, sizes, order, norm)
self._size_map = mapping
def map_style(self, markers=None, dashes=None, order=None):
mapping = StyleMapping(self, markers, dashes, order)
self._style_map = mapping
def iter_data(
self, grouping_vars=None, *,
reverse=False, from_comp_data=False,
by_facet=True, allow_empty=False, dropna=True,
):
"""Generator for getting subsets of data defined by semantic variables.
Also injects "col" and "row" into grouping semantics.
Parameters
----------
grouping_vars : string or list of strings
Semantic variables that define the subsets of data.
reverse : bool
If True, reverse the order of iteration.
from_comp_data : bool
If True, use self.comp_data rather than self.plot_data
by_facet : bool
If True, add faceting variables to the set of grouping variables.
allow_empty : bool
If True, yield an empty dataframe when no observations exist for
combinations of grouping variables.
dropna : bool
If True, remove rows with missing data.
Yields
------
sub_vars : dict
Keys are semantic names, values are the level of that semantic.
sub_data : :class:`pandas.DataFrame`
Subset of ``plot_data`` for this combination of semantic values.
"""
# TODO should this default to using all (non x/y?) semantics?
# or define grouping vars somewhere?
if grouping_vars is None:
grouping_vars = []
elif isinstance(grouping_vars, str):
grouping_vars = [grouping_vars]
elif isinstance(grouping_vars, tuple):
grouping_vars = list(grouping_vars)
# Always insert faceting variables
if by_facet:
facet_vars = {"col", "row"}
grouping_vars.extend(
facet_vars & set(self.variables) - set(grouping_vars)
)
# Reduce to the semantics used in this plot
grouping_vars = [var for var in grouping_vars if var in self.variables]
if from_comp_data:
data = self.comp_data
else:
data = self.plot_data
if dropna:
data = data.dropna()
levels = self.var_levels.copy()
if from_comp_data:
for axis in {"x", "y"} & set(grouping_vars):
converter = self.converters[axis].iloc[0]
if self.var_types[axis] == "categorical":
if self._var_ordered[axis]:
# If the axis is ordered, then the axes in a possible
# facet grid are by definition "shared", or there is a
# single axis with a unique cat -> idx mapping.
# So we can just take the first converter object.
levels[axis] = converter.convert_units(levels[axis])
else:
# Otherwise, the mappings may not be unique, but we can
# use the unique set of index values in comp_data.
levels[axis] = np.sort(data[axis].unique())
else:
transform = converter.get_transform().transform
levels[axis] = transform(converter.convert_units(levels[axis]))
if grouping_vars:
grouped_data = data.groupby(
grouping_vars, sort=False, as_index=False, observed=False,
)
grouping_keys = []
for var in grouping_vars:
key = levels.get(var)
grouping_keys.append([] if key is None else key)
iter_keys = itertools.product(*grouping_keys)
if reverse:
iter_keys = reversed(list(iter_keys))
for key in iter_keys:
pd_key = (
key[0] if len(key) == 1 and _version_predates(pd, "2.2.0") else key
)
try:
data_subset = grouped_data.get_group(pd_key)
except KeyError:
# XXX we are adding this to allow backwards compatibility
# with the empty artists that old categorical plots would
# add (before 0.12), which we may decide to break, in which
# case this option could be removed
data_subset = data.loc[[]]
if data_subset.empty and not allow_empty:
continue
sub_vars = dict(zip(grouping_vars, key))
yield sub_vars, data_subset.copy()
else:
yield {}, data.copy()
@property
def comp_data(self):
"""Dataframe with numeric x and y, after unit conversion and log scaling."""
if not hasattr(self, "ax"):
# Probably a good idea, but will need a bunch of tests updated
# Most of these tests should just use the external interface
# Then this can be re-enabled.
# raise AttributeError("No Axes attached to plotter")
return self.plot_data
if not hasattr(self, "_comp_data"):
comp_data = (
self.plot_data
.copy(deep=False)
.drop(["x", "y"], axis=1, errors="ignore")
)
for var in "yx":
if var not in self.variables:
continue
parts = []
grouped = self.plot_data[var].groupby(self.converters[var], sort=False)
for converter, orig in grouped:
orig = orig.mask(orig.isin([np.inf, -np.inf]), np.nan)
orig = orig.dropna()
if var in self.var_levels:
# TODO this should happen in some centralized location
# it is similar to GH2419, but more complicated because
# supporting `order` in categorical plots is tricky
orig = orig[orig.isin(self.var_levels[var])]
comp = pd.to_numeric(converter.convert_units(orig)).astype(float)
transform = converter.get_transform().transform
parts.append(pd.Series(transform(comp), orig.index, name=orig.name))
if parts:
comp_col = pd.concat(parts)
else:
comp_col = pd.Series(dtype=float, name=var)
comp_data.insert(0, var, comp_col)
self._comp_data = comp_data
return self._comp_data
def _get_axes(self, sub_vars):
"""Return an Axes object based on existence of row/col variables."""
row = sub_vars.get("row", None)
col = sub_vars.get("col", None)
if row is not None and col is not None:
return self.facets.axes_dict[(row, col)]
elif row is not None:
return self.facets.axes_dict[row]
elif col is not None:
return self.facets.axes_dict[col]
elif self.ax is None:
return self.facets.ax
else:
return self.ax
def _attach(
self,
obj,
allowed_types=None,
log_scale=None,
):
"""Associate the plotter with an Axes manager and initialize its units.
Parameters
----------
obj : :class:`matplotlib.axes.Axes` or :class:'FacetGrid`
Structural object that we will eventually plot onto.
allowed_types : str or list of str
If provided, raise when either the x or y variable does not have
one of the declared seaborn types.
log_scale : bool, number, or pair of bools or numbers
If not False, set the axes to use log scaling, with the given
base or defaulting to 10. If a tuple, interpreted as separate
arguments for the x and y axes.
"""
from .axisgrid import FacetGrid
if isinstance(obj, FacetGrid):
self.ax = None
self.facets = obj
ax_list = obj.axes.flatten()
if obj.col_names is not None:
self.var_levels["col"] = obj.col_names
if obj.row_names is not None:
self.var_levels["row"] = obj.row_names
else:
self.ax = obj
self.facets = None
ax_list = [obj]
# Identify which "axis" variables we have defined
axis_variables = set("xy").intersection(self.variables)
# -- Verify the types of our x and y variables here.
# This doesn't really make complete sense being here here, but it's a fine
# place for it, given the current system.
# (Note that for some plots, there might be more complicated restrictions)
# e.g. the categorical plots have their own check that as specific to the
# non-categorical axis.
if allowed_types is None:
allowed_types = ["numeric", "datetime", "categorical"]
elif isinstance(allowed_types, str):
allowed_types = [allowed_types]
for var in axis_variables:
var_type = self.var_types[var]
if var_type not in allowed_types:
err = (
f"The {var} variable is {var_type}, but one of "
f"{allowed_types} is required"
)
raise TypeError(err)
# -- Get axis objects for each row in plot_data for type conversions and scaling
facet_dim = {"x": "col", "y": "row"}
self.converters = {}
for var in axis_variables:
other_var = {"x": "y", "y": "x"}[var]
converter = pd.Series(index=self.plot_data.index, name=var, dtype=object)
share_state = getattr(self.facets, f"_share{var}", True)
# Simplest cases are that we have a single axes, all axes are shared,
# or sharing is only on the orthogonal facet dimension. In these cases,
# all datapoints get converted the same way, so use the first axis
if share_state is True or share_state == facet_dim[other_var]:
converter.loc[:] = getattr(ax_list[0], f"{var}axis")
else:
# Next simplest case is when no axes are shared, and we can
# use the axis objects within each facet
if share_state is False:
for axes_vars, axes_data in self.iter_data():
ax = self._get_axes(axes_vars)
converter.loc[axes_data.index] = getattr(ax, f"{var}axis")
# In the more complicated case, the axes are shared within each
# "file" of the facetgrid. In that case, we need to subset the data
# for that file and assign it the first axis in the slice of the grid
else:
names = getattr(self.facets, f"{share_state}_names")
for i, level in enumerate(names):
idx = (i, 0) if share_state == "row" else (0, i)
axis = getattr(self.facets.axes[idx], f"{var}axis")
converter.loc[self.plot_data[share_state] == level] = axis
# Store the converter vector, which we use elsewhere (e.g comp_data)
self.converters[var] = converter
# Now actually update the matplotlib objects to do the conversion we want
grouped = self.plot_data[var].groupby(self.converters[var], sort=False)
for converter, seed_data in grouped:
if self.var_types[var] == "categorical":
if self._var_ordered[var]:
order = self.var_levels[var]
else:
order = None
seed_data = categorical_order(seed_data, order)
converter.update_units(seed_data)
# -- Set numerical axis scales
# First unpack the log_scale argument
if log_scale is None:
scalex = scaley = False
else:
# Allow single value or x, y tuple
try:
scalex, scaley = log_scale
except TypeError:
scalex = log_scale if self.var_types.get("x") == "numeric" else False
scaley = log_scale if self.var_types.get("y") == "numeric" else False
# Now use it
for axis, scale in zip("xy", (scalex, scaley)):
if scale:
for ax in ax_list:
set_scale = getattr(ax, f"set_{axis}scale")
if scale is True:
set_scale("log", nonpositive="mask")
else:
set_scale("log", base=scale, nonpositive="mask")
# For categorical y, we want the "first" level to be at the top of the axis
if self.var_types.get("y", None) == "categorical":
for ax in ax_list:
ax.yaxis.set_inverted(True)
# TODO -- Add axes labels
def _get_scale_transforms(self, axis):
"""Return a function implementing the scale transform (or its inverse)."""
if self.ax is None:
axis_list = [getattr(ax, f"{axis}axis") for ax in self.facets.axes.flat]
scales = {axis.get_scale() for axis in axis_list}
if len(scales) > 1:
# It is a simplifying assumption that faceted axes will always have
# the same scale (even if they are unshared and have distinct limits).
# Nothing in the seaborn API allows you to create a FacetGrid with
# a mixture of scales, although it's possible via matplotlib.
# This is constraining, but no more so than previous behavior that
# only (properly) handled log scales, and there are some places where
# it would be much too complicated to use axes-specific transforms.
err = "Cannot determine transform with mixed scales on faceted axes."
raise RuntimeError(err)
transform_obj = axis_list[0].get_transform()
else:
# This case is more straightforward
transform_obj = getattr(self.ax, f"{axis}axis").get_transform()
return transform_obj.transform, transform_obj.inverted().transform
def _add_axis_labels(self, ax, default_x="", default_y=""):
"""Add axis labels if not present, set visibility to match ticklabels."""
# TODO ax could default to None and use attached axes if present
# but what to do about the case of facets? Currently using FacetGrid's
# set_axis_labels method, which doesn't add labels to the interior even
# when the axes are not shared. Maybe that makes sense?
if not ax.get_xlabel():
x_visible = any(t.get_visible() for t in ax.get_xticklabels())
ax.set_xlabel(self.variables.get("x", default_x), visible=x_visible)
if not ax.get_ylabel():
y_visible = any(t.get_visible() for t in ax.get_yticklabels())
ax.set_ylabel(self.variables.get("y", default_y), visible=y_visible)
def add_legend_data(
self, ax, func, common_kws=None, attrs=None, semantic_kws=None,
):
"""Add labeled artists to represent the different plot semantics."""
verbosity = self.legend
if isinstance(verbosity, str) and verbosity not in ["auto", "brief", "full"]:
err = "`legend` must be 'auto', 'brief', 'full', or a boolean."
raise ValueError(err)
elif verbosity is True:
verbosity = "auto"
keys = []
legend_kws = {}
common_kws = {} if common_kws is None else common_kws.copy()
semantic_kws = {} if semantic_kws is None else semantic_kws.copy()
# Assign a legend title if there is only going to be one sub-legend,
# otherwise, subtitles will be inserted into the texts list with an
# invisible handle (which is a hack)
titles = {
title for title in
(self.variables.get(v, None) for v in ["hue", "size", "style"])
if title is not None
}
title = "" if len(titles) != 1 else titles.pop()
title_kws = dict(
visible=False, color="w", s=0, linewidth=0, marker="", dashes=""
)
def update(var_name, val_name, **kws):
key = var_name, val_name
if key in legend_kws:
legend_kws[key].update(**kws)
else:
keys.append(key)
legend_kws[key] = dict(**kws)
if attrs is None:
attrs = {"hue": "color", "size": ["linewidth", "s"], "style": None}
for var, names in attrs.items():
self._update_legend_data(
update, var, verbosity, title, title_kws, names, semantic_kws.get(var),
)
legend_data = {}
legend_order = []
# Don't allow color=None so we can set a neutral color for size/style legends
if common_kws.get("color", False) is None:
common_kws.pop("color")
for key in keys:
_, label = key
kws = legend_kws[key]
level_kws = {}
use_attrs = [
*self._legend_attributes,
*common_kws,
*[attr for var_attrs in semantic_kws.values() for attr in var_attrs],
]
for attr in use_attrs:
if attr in kws:
level_kws[attr] = kws[attr]
artist = func(label=label, **{"color": ".2", **common_kws, **level_kws})
if _version_predates(mpl, "3.5.0"):
if isinstance(artist, mpl.lines.Line2D):
ax.add_line(artist)
elif isinstance(artist, mpl.patches.Patch):
ax.add_patch(artist)
elif isinstance(artist, mpl.collections.Collection):
ax.add_collection(artist)
else:
ax.add_artist(artist)
legend_data[key] = artist
legend_order.append(key)
self.legend_title = title
self.legend_data = legend_data
self.legend_order = legend_order
def _update_legend_data(
self,
update,
var,
verbosity,
title,
title_kws,
attr_names,
other_props,
):
"""Generate legend tick values and formatted labels."""
brief_ticks = 6
mapper = getattr(self, f"_{var}_map", None)
if mapper is None:
return
brief = mapper.map_type == "numeric" and (
verbosity == "brief"
or (verbosity == "auto" and len(mapper.levels) > brief_ticks)
)
if brief:
if isinstance(mapper.norm, mpl.colors.LogNorm):
locator = mpl.ticker.LogLocator(numticks=brief_ticks)
else:
locator = mpl.ticker.MaxNLocator(nbins=brief_ticks)
limits = min(mapper.levels), max(mapper.levels)
levels, formatted_levels = locator_to_legend_entries(
locator, limits, self.plot_data[var].infer_objects().dtype
)
elif mapper.levels is None:
levels = formatted_levels = []
else:
levels = formatted_levels = mapper.levels
if not title and self.variables.get(var, None) is not None:
update((self.variables[var], "title"), self.variables[var], **title_kws)
other_props = {} if other_props is None else other_props
for level, formatted_level in zip(levels, formatted_levels):
if level is not None:
attr = mapper(level)
if isinstance(attr_names, list):
attr = {name: attr for name in attr_names}
elif attr_names is not None:
attr = {attr_names: attr}
attr.update({k: v[level] for k, v in other_props.items() if level in v})
update(self.variables[var], formatted_level, **attr)
# XXX If the scale_* methods are going to modify the plot_data structure, they
# can't be called twice. That means that if they are called twice, they should
# raise. Alternatively, we could store an original version of plot_data and each
# time they are called they operate on the store, not the current state.
def scale_native(self, axis, *args, **kwargs):
# Default, defer to matplotlib
raise NotImplementedError
def scale_numeric(self, axis, *args, **kwargs):
# Feels needed to completeness, what should it do?
# Perhaps handle log scaling? Set the ticker/formatter/limits?
raise NotImplementedError
def scale_datetime(self, axis, *args, **kwargs):
# Use pd.to_datetime to convert strings or numbers to datetime objects
# Note, use day-resolution for numeric->datetime to match matplotlib
raise NotImplementedError
def scale_categorical(self, axis, order=None, formatter=None):
"""
Enforce categorical (fixed-scale) rules for the data on given axis.
Parameters
----------
axis : "x" or "y"
Axis of the plot to operate on.
order : list
Order that unique values should appear in.
formatter : callable
Function mapping values to a string representation.
Returns
-------
self
"""
# This method both modifies the internal representation of the data
# (converting it to string) and sets some attributes on self. It might be
# a good idea to have a separate object attached to self that contains the
# information in those attributes (i.e. whether to enforce variable order
# across facets, the order to use) similar to the SemanticMapping objects
# we have for semantic variables. That object could also hold the converter
# objects that get used, if we can decouple those from an existing axis
# (cf. https://github.com/matplotlib/matplotlib/issues/19229).
# There are some interactions with faceting information that would need
# to be thought through, since the converts to use depend on facets.
# If we go that route, these methods could become "borrowed" methods similar
# to what happens with the alternate semantic mapper constructors, although
# that approach is kind of fussy and confusing.
# TODO this method could also set the grid state? Since we like to have no
# grid on the categorical axis by default. Again, a case where we'll need to
# store information until we use it, so best to have a way to collect the
# attributes that this method sets.
# TODO if we are going to set visual properties of the axes with these methods,
# then we could do the steps currently in CategoricalPlotter._adjust_cat_axis
# TODO another, and distinct idea, is to expose a cut= param here
_check_argument("axis", ["x", "y"], axis)
# Categorical plots can be "univariate" in which case they get an anonymous
# category label on the opposite axis.
if axis not in self.variables:
self.variables[axis] = None
self.var_types[axis] = "categorical"
self.plot_data[axis] = ""
# If the "categorical" variable has a numeric type, sort the rows so that
# the default result from categorical_order has those values sorted after
# they have been coerced to strings. The reason for this is so that later
# we can get facet-wise orders that are correct.
# XXX Should this also sort datetimes?
# It feels more consistent, but technically will be a default change
# If so, should also change categorical_order to behave that way
if self.var_types[axis] == "numeric":
self.plot_data = self.plot_data.sort_values(axis, kind="mergesort")
# Now get a reference to the categorical data vector and remove na values
cat_data = self.plot_data[axis].dropna()
# Get the initial categorical order, which we do before string
# conversion to respect the original types of the order list.
# Track whether the order is given explicitly so that we can know
# whether or not to use the order constructed here downstream
self._var_ordered[axis] = order is not None or cat_data.dtype.name == "category"
order = pd.Index(categorical_order(cat_data, order), name=axis)
# Then convert data to strings. This is because in matplotlib,
# "categorical" data really mean "string" data, so doing this artists
# will be drawn on the categorical axis with a fixed scale.
# TODO implement formatter here; check that it returns strings?
if formatter is not None:
cat_data = cat_data.map(formatter)
order = order.map(formatter)
else:
cat_data = cat_data.astype(str)
order = order.astype(str)
# Update the levels list with the type-converted order variable
self.var_levels[axis] = order
# Now ensure that seaborn will use categorical rules internally
self.var_types[axis] = "categorical"
# Put the string-typed categorical vector back into the plot_data structure
self.plot_data[axis] = cat_data
return self
| VectorPlotter |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 14243,
"end": 14332
} | class ____(OpcodeWithArg):
_FLAGS = HAS_FREE | HAS_ARGUMENT
__slots__ = ()
| LOAD_CLOSURE |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF009.py | {
"start": 232,
"end": 303
} | class ____(NamedTuple):
something: int = 8
@dataclass()
| ImmutableType |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/old_sbang/package.py | {
"start": 276,
"end": 780
} | class ____(Package):
"""Package for testing sbang relocation"""
homepage = "https://www.example.com"
url = "https://www.example.com/old-sbang.tar.gz"
version("1.0.0", md5="0123456789abcdef0123456789abcdef")
def install(self, spec, prefix):
mkdirp(prefix.bin)
contents = f"""\
{sbang_shebang_line()}
#!/usr/bin/env python3
{prefix.bin}
"""
with open(os.path.join(self.prefix.bin, "script.sh"), "w", encoding="utf-8") as f:
f.write(contents)
| OldSbang |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_checks.py | {
"start": 8858,
"end": 9273
} | class ____(graphene.ObjectType):
message = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneError,)
name = "AssetCheckNeedsUserCodeUpgrade"
AssetChecksOrErrorUnion = Union[
GrapheneAssetCheckNeedsMigrationError,
GrapheneAssetCheckNeedsUserCodeUpgrade,
GrapheneAssetCheckNeedsAgentUpgradeError,
GrapheneAssetChecks,
]
| GrapheneAssetCheckNeedsUserCodeUpgrade |
python | kubernetes-client__python | kubernetes/client/models/v1_namespace_condition.py | {
"start": 383,
"end": 7363
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1NamespaceCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_transition_time = None
self._message = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
if last_transition_time is not None:
self.last_transition_time = last_transition_time
if message is not None:
self.message = message
if reason is not None:
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1NamespaceCondition. # noqa: E501
Last time the condition transitioned from one status to another. # noqa: E501
:return: The last_transition_time of this V1NamespaceCondition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1NamespaceCondition.
Last time the condition transitioned from one status to another. # noqa: E501
:param last_transition_time: The last_transition_time of this V1NamespaceCondition. # noqa: E501
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1NamespaceCondition. # noqa: E501
Human-readable message indicating details about last transition. # noqa: E501
:return: The message of this V1NamespaceCondition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1NamespaceCondition.
Human-readable message indicating details about last transition. # noqa: E501
:param message: The message of this V1NamespaceCondition. # noqa: E501
:type: str
"""
self._message = message
@property
def reason(self):
"""Gets the reason of this V1NamespaceCondition. # noqa: E501
Unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
:return: The reason of this V1NamespaceCondition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1NamespaceCondition.
Unique, one-word, CamelCase reason for the condition's last transition. # noqa: E501
:param reason: The reason of this V1NamespaceCondition. # noqa: E501
:type: str
"""
self._reason = reason
@property
def status(self):
"""Gets the status of this V1NamespaceCondition. # noqa: E501
Status of the condition, one of True, False, Unknown. # noqa: E501
:return: The status of this V1NamespaceCondition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1NamespaceCondition.
Status of the condition, one of True, False, Unknown. # noqa: E501
:param status: The status of this V1NamespaceCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1NamespaceCondition. # noqa: E501
Type of namespace controller condition. # noqa: E501
:return: The type of this V1NamespaceCondition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1NamespaceCondition.
Type of namespace controller condition. # noqa: E501
:param type: The type of this V1NamespaceCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NamespaceCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NamespaceCondition):
return True
return self.to_dict() != other.to_dict()
| V1NamespaceCondition |
python | kubernetes-client__python | kubernetes/client/models/v1_ip_address.py | {
"start": 383,
"end": 6511
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1IPAddressSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1IPAddress - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1IPAddress. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1IPAddress. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1IPAddress.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1IPAddress. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1IPAddress. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1IPAddress. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1IPAddress.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1IPAddress. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1IPAddress. # noqa: E501
:return: The metadata of this V1IPAddress. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1IPAddress.
:param metadata: The metadata of this V1IPAddress. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1IPAddress. # noqa: E501
:return: The spec of this V1IPAddress. # noqa: E501
:rtype: V1IPAddressSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1IPAddress.
:param spec: The spec of this V1IPAddress. # noqa: E501
:type: V1IPAddressSpec
"""
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1IPAddress):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1IPAddress):
return True
return self.to_dict() != other.to_dict()
| V1IPAddress |
python | pytorch__pytorch | torch/distributed/fsdp/_optim_utils.py | {
"start": 54952,
"end": 90432
} | class ____:
# The key of these dictionaries are the state name, e.g., `exp_avg`.
tensors: dict[str, _PosDimTensorInfo]
scalar_tensors: dict[str, torch.Tensor]
non_tensors: dict[str, Any]
def _allgather_state_info(
fsdp_state: _FSDPState,
input_states: dict[str, Any],
) -> list[dict[str, StateInfo]]:
"""
Given the ``input_states``, allgather StateInfo for each state. The function
uses all_gather_object to gather StateInfo so no GPU tensors are sent.
"""
processed_state_dict: dict[str, StateInfo] = {}
gathered_state_info: list[dict[str, StateInfo]] = [
{} for _ in range(fsdp_state.world_size)
]
for fqn, optim_state in input_states.items():
# Allgather the scalar tensor state, non-tensor states and tensors metadata.
processed_state = StateInfo({}, {}, {})
for state_name, value in sorted_items(optim_state):
if torch.is_tensor(value):
if value.dim() == 0:
# Ensure that `step` is on CPU.
processed_state.scalar_tensors[state_name] = value.cpu()
else:
processed_state.tensors[state_name] = _PosDimTensorInfo(
value.shape, value.dtype
)
else:
processed_state.non_tensors[state_name] = value
processed_state_dict[fqn] = processed_state
dist.all_gather_object(
gathered_state_info,
processed_state_dict,
group=fsdp_state.process_group,
)
return gathered_state_info
def _convert_all_state_info(
fsdp_param_info: FSDPParamInfo,
gathered_state_info: list[dict[str, StateInfo]],
input_states: dict[str, Any],
output_states: dict[str, dict[str, Any]],
) -> tuple[Optional[torch.dtype], dict[str, list[Optional[torch.Tensor]]]]:
"""
Given the ``gathered_state_info`` and ``input_states``, the API converted
the StateInfo into the original state if the state is not a non-scalar
tensor. For a multi-dimensional tensor, the local state will be stored in
``state_buffer`` in a correct order for later allgather purpose.
"""
state_buffers: dict[str, list[Optional[torch.Tensor]]] = {}
for fqn, gathered_state in output_states.items():
state_info = [s[fqn] for s in gathered_state_info]
all_tensor_states = sorted({n for state in state_info for n in state.tensors})
empty_ranks: set[int] = set()
dtype: Optional[torch.dtype] = None
# First check all the non-scalar states and get the information of
# states on each rank.
for state_name in all_tensor_states:
numels = []
_empty_ranks: set[int] = set()
for rank, object_state in enumerate(state_info):
numels.append(0)
info = object_state.tensors.get(state_name, None)
if info is not None:
numels[-1] = info.shape.numel()
if not dtype:
dtype = info.dtype
else:
if dtype != info.dtype:
raise AssertionError(
f"Expected dtype == info.dtype, got {dtype} != {info.dtype}"
)
if numels[-1] == 0:
_empty_ranks.add(rank)
if not (not empty_ranks or empty_ranks == _empty_ranks):
raise AssertionError(
f"Expected empty_ranks to be empty or equal to _empty_ranks, got {empty_ranks} vs {_empty_ranks}"
)
empty_ranks = _empty_ranks
if state_name not in state_buffers:
state_buffers[state_name] = [
None for _ in fsdp_param_info.param_indices
]
local_state = input_states[fqn].get(state_name, None)
# N.B. We need to move the state to compute_device. The reason is
# not yet clear and we need to figure out why the state may be on a
# different device.
if local_state is not None:
local_state = local_state.to(fsdp_param_info.state.compute_device)
state_buffers[state_name][fsdp_param_info.param_indices[fqn]] = local_state
# Restoring the scalar and non-tensor states. If the corresponding
# non-scalar states do not exist on the rank, we also skip the scalar
# non-tensor states on that rank.
for rank, object_state in enumerate(state_info):
if rank in empty_ranks:
continue
for name, non_tensor_value in object_state.non_tensors.items():
curr_non_tensor_value = gathered_state.get(name, None)
if not (
curr_non_tensor_value is None
or curr_non_tensor_value == non_tensor_value
):
raise AssertionError(
f"Rank {rank} has different values for {name}: {non_tensor_value}."
+ f" Other ranks: {curr_non_tensor_value}"
)
gathered_state[name] = non_tensor_value
for name, scalar_tensor_value in object_state.scalar_tensors.items():
curr_scalar_tensor_value = gathered_state.get(name, None)
if not (
curr_scalar_tensor_value is None
or torch.equal(scalar_tensor_value, curr_scalar_tensor_value)
):
raise AssertionError(
f"Rank {rank} has different values for {name}: {scalar_tensor_value}."
+ f" Other ranks: {curr_scalar_tensor_value}"
)
gathered_state[name] = scalar_tensor_value
return dtype, state_buffers # type: ignore[possibly-undefined]
def _unflatten_orig_param_states(
fsdp_param_info: FSDPParamInfo,
output_states: dict[str, dict[str, Any]],
state_name: str,
shard_state: bool,
to_save: bool,
cpu_offload: bool,
) -> None:
"""
Given a output state dict, ``output_states``, which the keys are FQNs to the
original parameters (not FlatParameters nor parameter ID), and the values
are gathered states, unflatten the states to the original dimensions.
This function performs the unflattening process in-place.
"""
if not to_save:
return
flat_param = fsdp_param_info.handle.flat_param
fsdp_state = fsdp_param_info.state
for fqn, gathered_state in output_states.items():
value = gathered_state[state_name]
param_idx = fsdp_param_info.param_indices[fqn]
# TODO: This solution is not general and only apply to PTD TP solution.
if isinstance(value, DTensor):
placement = value.placements[0]
# If gathered state is a DTensor and its TP placement is not Replicate(), we need to
# gather the tensor on its TP dimension before chunking them into DTensor again.
if placement != Replicate():
placement_dim = placement.dim # type: ignore[attr-defined]
value.redistribute(placements=(Replicate(),))
reshape_size = list(flat_param._shapes[param_idx])
reshape_size[placement_dim] *= value.device_mesh.size(0)
reshape_size = torch.Size(reshape_size)
value = value.reshape(reshape_size)
# If gathered state is a replicate DTensor, we directly reshape it.
else:
value = value.reshape(flat_param._shapes[param_idx])
else:
# If gathered state is a tensor, we directly reshape it into unflatten state.
value = value.reshape(flat_param._shapes[param_idx])
if shard_state:
osd_config = fsdp_state._optim_state_dict_config
if getattr(osd_config, "_use_dtensor", False):
if fsdp_state._device_mesh is None:
raise AssertionError(
f"Expected _device_mesh to be not None, got {fsdp_state._device_mesh}"
)
value = _ext_chunk_dtensor(
value,
fsdp_state.rank,
fsdp_state._device_mesh,
fsdp_state._fsdp_extension,
)
else:
if fsdp_state.process_group is None:
raise AssertionError(
f"Expected process_group to be not None, got {fsdp_state.process_group}"
)
value = _ext_chunk_tensor(
value,
fsdp_state.rank,
fsdp_state.world_size,
fsdp_state._device_handle.device_count(),
fsdp_state.process_group,
fsdp_state._fsdp_extension,
)
elif not cpu_offload:
with SimpleProfiler.profile("clone"):
value = value.detach().clone()
if cpu_offload:
with SimpleProfiler.profile(SimpleProfiler.Type.D2H):
value = value.cpu()
gathered_state[state_name] = value
def _allgather_orig_param_states(
fsdp_param_info: FSDPParamInfo,
gathered_state_info: list[dict[str, StateInfo]],
input_states: dict[str, Any],
shard_state: bool,
to_save: bool,
cpu_offload: bool,
) -> dict[str, dict[str, Any]]:
"""
Given the ``gathered_state_info`` and ``input_states``, the API allgathers
all tensor states and restore non-tensor states from ``gathered_state_info``.
"""
fsdp_state = fsdp_param_info.state
if fsdp_state.rank == 0 and dist.get_debug_level() == dist.DebugLevel.DETAIL:
logger.info(
"Memory Summary before calling to _allgather_orig_param_states %s",
fsdp_state._device_handle.memory_summary(),
)
output_states: dict[str, dict[str, Any]] = {fqn: {} for fqn in input_states}
dtype, state_buffers = _convert_all_state_info(
fsdp_param_info, gathered_state_info, input_states, output_states
)
if len(state_buffers) == 0:
return output_states
has_state_params: list[bool] = [
fqn in output_states for fqn, idx in fsdp_param_info.param_indices.items()
]
# Loop through the ``state_buffers`` and construct the flattened, concatenated,
# sharded states. The size of the constructed state will be the same size as
# flat_param (also sharded).
# Then we perform an allgather_into_tensor to get the full flat_param state.
# The full flat_param state is the result of concatenation of multiple states
# the order of of flat_param._fqns.
# The final step is to split the flat_param state into original param states
# and return the result.
flat_param = fsdp_param_info.handle.flat_param
empty_func = functools.partial(
torch.empty, dtype=dtype, device=fsdp_state.compute_device
)
gathered_tensor = empty_func(flat_param._padded_unsharded_size)
# Synchronize can be slow but this will be easier for us to debug.
fsdp_state._device_handle.synchronize()
for state_name, buffers in state_buffers.items():
local_buffers: list[torch.Tensor] = []
begin = fsdp_state.rank * flat_param._sharded_size.numel()
# End is inclusive.
end = begin + flat_param._sharded_size.numel() - 1
# param_idx corresponds to the parameter index in the FlatParameter.
mem_offset, param_idx = 0, 0
for numel, is_padding in zip(
flat_param._numels_with_padding, flat_param._is_padding_mask
):
frozen_and_no_state = not is_padding and (
not fsdp_param_info.param_requires_grad[param_idx]
and not has_state_params[param_idx]
)
if is_padding or frozen_and_no_state:
# This memory range is a padding or the param is frozen and does
# not require gradient. For the later case, we treat it as a
# padding and add empty values to the local_buffers.
padding_begin, padding_end = mem_offset, mem_offset + numel - 1
if padding_begin <= begin <= padding_end:
# The range is an align padding before the first parameter in
# the shard. The shard includes parts of this align padding.
padding_len = (
padding_end - begin + 1
if end >= padding_end
else end - begin + 1
)
elif padding_begin <= end <= padding_end:
# The range is an align padding after the last parameter in
# the shard. The shard includes parts of this align padding.
padding_len = (
end - padding_begin + 1
if begin <= padding_begin
else end - begin + 1
)
elif begin < padding_begin <= padding_end < end:
# The range is an align padding that is completely in the
# shard.
padding_len = numel
else:
padding_len = 0
if padding_len:
local_buffers.append(empty_func(padding_len))
if not is_padding:
# This memory range is a parameter in FlatParameter. So there
# should be an corresponding state in the optimizer unless the
# parameter is frozen, which we treat it as a padding above.
# We need to check if this rank owns the buffer. If this is None:
# 1.) the rank does not own any part of the original parameter.
# As a result, there is no corresponding optimizer state on
# the rank as well.
# 2.) the parameter is frozen AND no optimizer state for the
# parameter. If a parameter is frozen, there can still be
# optimizer state if the parameter is not frozen in the
# previous steps.
if buffers[param_idx] is not None:
local_buffers.append(cast(torch.Tensor, buffers[param_idx]))
param_idx += 1
mem_offset += numel
shard_numel_padded = flat_param._sharded_size.numel() - (
sum(t.numel() for t in local_buffers)
)
if flat_param._shard_numel_padded != shard_numel_padded:
raise AssertionError(
"Manually calculated _sharded_numel_padded is incorrect. "
f"_shard_numel_padded={flat_param._shard_numel_padded}, "
f"shard_numel_padded={shard_numel_padded}, "
f"_sharded_size.numel={flat_param._sharded_size.numel()}, "
f"_numels_with_padding={flat_param._numels_with_padding}, "
f"begin={begin}, end={end},"
)
if shard_numel_padded > 0:
# Add right-handed padding.
local_buffers.append(empty_func(shard_numel_padded))
local_shard = torch.cat(local_buffers)
if local_shard.numel() * fsdp_state.world_size != gathered_tensor.numel():
raise AssertionError(
"The size of local shard times the world size should equal to the "
"gathered tensor size. The inconsistency may be from a bug of "
"FlatParameter's metadata or the reconstruction logic in optimizer "
"state dict."
)
fsdp_state._device_handle.synchronize()
with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER):
dist.all_gather_into_tensor(
gathered_tensor, local_shard, group=fsdp_state.process_group
)
# Synchronize can be slow but this will be easier for us to debug.
fsdp_state._device_handle.synchronize()
unpadded_tensor = gathered_tensor[: flat_param._unpadded_unsharded_size.numel()]
flat_param_handle = fsdp_param_info.handle
orig_states = flat_param_handle._get_unflat_views_aligned(unpadded_tensor)
if len(orig_states) != len(fsdp_param_info.param_indices):
raise AssertionError(
"The number of parameters from FlatParameter is not consistent to "
"the number of states used by optimizer state dict reconstruction "
"logic."
)
for fqn, idx in fsdp_param_info.param_indices.items():
if fsdp_param_info.param_requires_grad[idx] or fqn in output_states:
output_states[fqn][state_name] = orig_states[idx]
_unflatten_orig_param_states(
fsdp_param_info,
output_states,
state_name,
shard_state,
to_save,
cpu_offload,
)
del gathered_tensor
return output_states
def _gather_all_orig_param_state(
fsdp_param_info: FSDPParamInfo,
input_states: dict[str, Any],
shard_state: bool,
to_save: bool,
cpu_offload: bool,
) -> dict[str, Any]:
"""
Given a optimizer state dict, ``input_states``, which the keys are FQNs to the
original parameters (not FlatParameters nor parameter ID), gather all the
states and unflatten them to the original dimensions. Note that all the
params referred by the ``input_states`` must be managed by FSDP.
"""
fsdp_state = fsdp_param_info.state
if (
fsdp_state.world_size == 1
or fsdp_state.sharding_strategy == ShardingStrategy.NO_SHARD
):
return input_states if to_save else {}
with SimpleProfiler.profile(SimpleProfiler.Type.RESHARDING):
with SimpleProfiler.profile(SimpleProfiler.Type.ALLGATHER_OBJ):
gathered_state_info = _allgather_state_info(fsdp_state, input_states)
output_states = _allgather_orig_param_states(
fsdp_param_info,
gathered_state_info,
input_states,
shard_state,
to_save,
cpu_offload,
)
if to_save:
for key, idx in fsdp_param_info.param_indices.items():
if key in output_states:
continue
if not fsdp_param_info.param_requires_grad[idx]:
continue
raise RuntimeError(
f"{key} is not in the output state. "
"The FSDPParamInfo has the param keys "
f"{sorted(fsdp_param_info.param_indices.keys())} while "
"the output_states has the param keys "
f"{sorted(output_states.keys())}."
)
return output_states
else:
return {}
def _convert_state_with_orig_params(
all_optim_state_keys: list[_OptimStateKey],
optim_state_key_to_param_key: dict[_OptimStateKey, Union[int, str]],
fqn_to_fsdp_param_info: dict[str, FSDPParamInfo],
optim_state_dict: dict[Union[str, int], Any],
to_save: bool,
shard_state: bool,
cpu_offload: bool = True,
) -> dict[str, Any]:
fsdp_osd_state: dict[str, Any] = {}
# This variable is used to deduplicate the FSDPParamInfo as one FSDPParamInfo
# usually corresponds to multiple parameters. We could not use FSDPParamInfo
# as the key because FSDPParamInfo is not hashable. As a result, we fall back
# to `id(FSDPParamInfo)`, which the type is an integer.
all_states: dict[int, dict[str, Any]] = {}
# Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers
# across ranks
for optim_state_key in all_optim_state_keys:
param_key: Union[str, int, None] = optim_state_key_to_param_key.get(
optim_state_key
)
if param_key is None and not optim_state_key.is_fsdp_managed:
continue
if optim_state_key.is_fsdp_managed:
fqn = optim_state_key.unflat_param_names[0]
fsdp_param_info = fqn_to_fsdp_param_info.get(fqn)
if fsdp_param_info is None:
# This can happen if the not all FSDP instances have all the
# parameters. This can happen with FSDP + some MPMD style
# parallelism.
# TODO: it is unclear if we need to do the same check with
# non-FSDP managed keys.
continue
state = {} if param_key is None else optim_state_dict[param_key]
if id(fsdp_param_info) not in all_states:
all_states[id(fsdp_param_info)] = {}
all_states[id(fsdp_param_info)][fqn] = state
elif to_save:
if len(optim_state_key.unflat_param_names) != 1:
raise AssertionError(
f"Expected len(optim_state_key.unflat_param_names) == 1, got {len(optim_state_key.unflat_param_names)}"
)
unflat_param_name = optim_state_key.unflat_param_names[0]
with SimpleProfiler.profile("none_fsdp_managed_copy"):
param_key = cast(Union[str, int], param_key)
fsdp_osd_state[unflat_param_name] = copy.copy(
optim_state_dict[param_key]
)
if cpu_offload:
for state_name, value in sorted_items(
fsdp_osd_state[unflat_param_name]
):
if not torch.is_tensor(value):
continue
fsdp_osd_state[unflat_param_name][state_name] = value.cpu()
# Instead of gathering the state of each parameter individually, we perform
# the gathering all at once to speed up the process.
for _all_states in all_states.values():
fqn = next(iter(_all_states.keys()))
fsdp_param_info = fqn_to_fsdp_param_info[fqn]
if len(fsdp_param_info.param_requires_grad) <= 0:
raise AssertionError(
"With use_orig_params, FSDPParamInfo should have requires_grad "
"information. However, the length is zero."
)
for key, idx in fsdp_param_info.param_indices.items():
if key in _all_states:
continue
if not fsdp_param_info.param_requires_grad[idx]:
continue
raise RuntimeError(
f"{key} is not in the optimizer state. "
"The FSDPParamInfo has the param keys "
f"{sorted(fsdp_param_info.param_indices.keys())} while "
"the optimizer has the param keys "
f"{sorted(_all_states.keys())}."
)
fsdp_osd_state.update(
_gather_all_orig_param_state(
fsdp_param_info,
_all_states,
shard_state,
to_save,
cpu_offload,
)
)
return fsdp_osd_state
def _convert_state_with_flat_params(
all_optim_state_keys: list[_OptimStateKey],
optim_state_key_to_param_key: dict[_OptimStateKey, Union[int, str]],
fqn_to_fsdp_param_info: dict[str, FSDPParamInfo],
optim_state_dict: dict[Union[str, int], Any],
to_save: bool,
shard_state: bool,
cpu_offload: bool = True,
) -> dict[str, Any]:
fsdp_osd_state: dict[str, Any] = {}
# Iterate in rank 0's flat parameter ID order to ensure aligned all-gathers
# across ranks
for optim_state_key in all_optim_state_keys:
param_key: Union[str, int, None] = optim_state_key_to_param_key.get(
optim_state_key
)
if param_key is None:
raise AssertionError(
"If use_orig_params is False, we must be able to find the "
f"corresponding param id. {optim_state_key} {param_key}"
)
if optim_state_key.is_fsdp_managed:
# If there are multiple unflat_param_names (not use_orig_params),
# they share the same FSDPParamInfo. So the first unflat_param_name
# is sufficient to fetch the FSDPParamInfo.
fqn = optim_state_key.unflat_param_names[0]
fsdp_param_info = fqn_to_fsdp_param_info[fqn]
unflat_state = _unflatten_optim_state(
fsdp_param_info,
optim_state_dict[param_key],
to_save,
shard_state,
cpu_offload,
)
if to_save:
if len(unflat_state) != len(optim_state_key.unflat_param_names):
raise AssertionError(
f"Expected len(unflat_state) == len(optim_state_key.unflat_param_names), "
f"got {len(unflat_state)} != {len(optim_state_key.unflat_param_names)}"
)
fsdp_osd_state.update(
zip(
optim_state_key.unflat_param_names,
unflat_state,
)
)
elif to_save:
if len(optim_state_key.unflat_param_names) != 1:
raise AssertionError(
f"Expected len(optim_state_key.unflat_param_names) == 1, got {len(optim_state_key.unflat_param_names)}"
)
unflat_param_name = optim_state_key.unflat_param_names[0]
fsdp_osd_state[unflat_param_name] = copy.copy(optim_state_dict[param_key])
if cpu_offload:
for state_name, value in sorted_items(
fsdp_osd_state[unflat_param_name]
):
if not torch.is_tensor(value):
continue
fsdp_osd_state[unflat_param_name][state_name] = value.cpu()
return fsdp_osd_state
@torch.no_grad()
def _optim_state_dict(
model: nn.Module,
optim: torch.optim.Optimizer,
optim_state_dict: dict[str, Any],
optim_input: Optional[
Union[
list[dict[str, Any]],
Iterable[nn.Parameter],
]
],
rank0_only: bool,
shard_state: bool,
group: Optional[dist.ProcessGroup],
using_optim_input: bool,
use_orig_params: bool = False,
cpu_offload: bool = True,
) -> dict[str, Any]:
"""
Consolidates the optimizer state and returns it as a :class:`dict`
following the convention of :meth:`torch.optim.Optimizer.state_dict`,
i.e. with keys ``"state"`` and ``"param_groups"``.
The flat parameters in ``FSDP`` modules contained in ``model`` are mapped
back to their unflattened parameters.
Parameter keys are not well-defined. For a regular optimizer, the optimizer
state_dict contains a mapping from parameter IDs to parameter states.
Parameter IDs are the order of parameters in ``optim.param_groups()`` across
all the groups. This API also allows user to pass ``optim_input`` for the
mapping between parameters and parameter IDs. Using ``optim_input`` is being
deprecated.
If the optimizer is a ``NamedOptimizer``, the optimizer state_dict does not
contain parameter IDs mapping but a mapping from parameter FQNs to parameter
states. This API finds the mapping from FQNs to parameters if the optimizer
is a ``NamedOptimizer``.
If ``use_orig_params`` is True, each rank will have all FSDP-managed
parameters but some of these parameters may be empty due to the sharding.
For a regular optim.Optimizer, states for those empty parameters will
not be initialized. So, when aggregating the FQNs across ranks, no assert
will be raised on a rank even if it does not have all the states -- it is
valid and FSDP knows how to aggregate them. However, FSDP has to ignore
handling those parameters that are not managed by FSDP and do not exist on
the local rank -- those are managed by other parallelisms and FSDP does not
know how to handle/aggregate them.
Args:
model (nn.Module): Root module (which may or may not be a
:class:`FullyShardedDataParallel` instance) whose parameters
were passed into the optimizer ``optim``.
optim (torch.optim.Optimizer): Optimizer for ``model`` 's
parameters.
rank0_only (bool): If ``True``, saves the populated :class:`dict`
only on rank 0; if ``False``, saves it on all ranks. (Default:
``True``)
shard_state (bool): If ``True``, shard and distribute all
non-zero-dimension states.
Returns:
Dict[str, Any]: A :class:`dict` containing the optimizer state for
``model`` 's original unflattened parameters and including keys
"state" and "param_groups" following the convention of
:meth:`torch.optim.Optimizer.state_dict`. If ``rank0_only=False``,
then nonzero ranks return an empty :class:`dict`.
"""
SimpleProfiler.reset()
cm = ExitStack()
cm.enter_context(SimpleProfiler.profile(SimpleProfiler.Type.ALL))
_reset_flat_param_grad_info_if_needed(traversal_utils._get_fsdp_handles(model))
to_save = not rank0_only or dist.get_rank(group) == 0 or shard_state
with SimpleProfiler.profile("preprocessing"):
param_to_fqns = _get_param_to_fqns(model)
flat_param_to_fqn = _get_flat_param_to_fqn(model)
is_named_optimizer = _is_named_optimizer(optim_state_dict)
param_key_to_param = cast(
dict[Union[int, str], nn.Parameter],
(
_get_param_id_to_param_from_optim_input(model, optim_input)
if using_optim_input
else _get_param_key_to_param(
optim, model, is_named_optimizer, param_to_fqns, flat_param_to_fqn
)
),
)
fqn_to_fsdp_param_info = _get_fqn_to_fsdp_param_info(model)
with SimpleProfiler.profile("preprocessing_with_comm"):
(
all_optim_state_keys,
optim_state_key_to_param_key,
) = _map_param_key_to_optim_keys(
optim_state_dict,
group,
param_key_to_param,
param_to_fqns,
fqn_to_fsdp_param_info,
merge_keys=use_orig_params,
)
with SimpleProfiler.profile("state_converting"):
convert_fn = (
_convert_state_with_orig_params
if use_orig_params
else _convert_state_with_flat_params
)
fsdp_osd_state = convert_fn(
all_optim_state_keys,
optim_state_key_to_param_key,
fqn_to_fsdp_param_info,
optim_state_dict["state"],
to_save,
shard_state,
cpu_offload,
)
# At this point, communication is complete and ranks can return early if nothing
# will be saved on that rank.
if not to_save:
return {}
fsdp_osd: dict[str, Any] = {"state": fsdp_osd_state}
flat_param_fqns = set(flat_param_to_fqn.values())
for key, value in optim_state_dict["state"].items():
if key in fsdp_osd_state:
continue
if key in flat_param_fqns:
continue
if key in param_key_to_param:
continue
# This key is not recognized by FSDP. It may be a user-defined state
# or some parameters state that FSDP is unable to map from
# ``optim.param_groups``.
warnings.warn(
f"Found a optim state, {key}, that FSDP cannot process. FSDP "
"will directly copy everything to the returned state_dict. In "
"most cases, this is a user-defined state that is not "
"associated with any particular parameter. Another possible "
"case is this state is managed by TorchRec. Otherwise, there may "
" be a mismatched assumption of optim_state_dict of this mode.",
stacklevel=2,
)
fsdp_osd_state[key] = value
if "param_groups" in optim_state_dict:
fsdp_osd["param_groups"] = _unflatten_param_groups(
optim_state_dict, param_key_to_param, param_to_fqns
)
cm.close()
SimpleProfiler.dump_and_reset("FSDP _optim_state_dict() profiling: ")
return fsdp_osd
def _get_fqn_to_fsdp_param_info(model: nn.Module) -> dict[str, FSDPParamInfo]:
"""
Construct the mapping from a param's fqn to its corresponding ``FSDPParamInfo``
if the param is managed by FSDP. Shared parameters, or original parameters that
are shared across multiple nn.Modules, are required to belong to one and only
one FSDP instance and thus correspond to one ``FlatParameter``. Within the one
``FlatParameter``, ``FlatParameter._fqns`` only stores the first FQN of a shared
parameter. Thus, the keys in the mapping are guaranteed to map to unique parameters.
"""
def module_fn(module, prefix, tree_level, fqn_to_param_info):
fsdp_state = _get_module_fsdp_state_if_fully_sharded_module(module)
if fsdp_state is None:
return
_lazy_init(fsdp_state, module)
handle = _module_handle(fsdp_state, module)
if not handle:
return
flat_param = handle.flat_param
fsdp_param_info = FSDPParamInfo(fsdp_state, handle, {}, [])
# NOTE: `idx` indexes into the data structures *without* padding
# elements
for idx, local_fqn in enumerate(flat_param._fqns):
fqn = clean_tensor_name(prefix + local_fqn)
if fqn in fqn_to_param_info:
if fqn_to_param_info[fqn].handle.flat_param is not flat_param:
raise AssertionError(
f"Expected fqn_to_param_info[fqn].handle.flat_param is flat_param for {fqn}"
)
fqn_to_param_info[fqn] = fsdp_param_info
fsdp_param_info.param_indices[fqn] = idx
if flat_param._params is not None:
fsdp_param_info.param_requires_grad.append(
flat_param._params[idx].requires_grad
)
def return_fn(fqn_to_param_info):
return fqn_to_param_info
fqn_to_param_info: dict[str, FSDPParamInfo] = {}
# FlatParameter._fqns stores the local fqn, starting from the root of the
# FSDP. Using _apply_to_modules() with model (may not be the FSDP root
# module) allows us to construct the global fqn.
return _apply_to_modules(
model,
module_fn,
return_fn,
[fqn for fqn, _ in _named_parameters_with_duplicates(model)],
fqn_to_param_info,
)
@no_type_check
def _set_optim_use_dtensor(
fsdp_state: _FSDPState,
state_dict_settings: StateDictSettings,
) -> None:
# If device_mesh is passed in when initializing FSDP, we automatically turn the
# _use_dtensor flag to be true for ShardedOptimStateDictConfig() if state_dict_type
# has to be set to SHARDED_STATE_DICT.
if getattr(fsdp_state, "_device_mesh", None):
state_dict_type = state_dict_settings.state_dict_type
if state_dict_type == StateDictType.LOCAL_STATE_DICT:
raise RuntimeError(
"Found state_dict_type LOCAL_STATE_DICT.",
"DeviceMesh is not compatible with LOCAL_STATE_DICT.",
"Please set state_dict_type to SHARDED_STATE_DICT to get DTensor state_dict.",
)
else:
state_dict_settings.optim_state_dict_config._use_dtensor = True
| StateInfo |
python | yandexdataschool__Practical_RL | week06_policy_based/runners.py | {
"start": 58,
"end": 2534
} | class ____:
"""Reinforcement learning runner in an environment with given policy"""
def __init__(self, env, policy, nsteps, transforms=None, step_var=None):
self.env = env
self.policy = policy
self.nsteps = nsteps
self.transforms = transforms or []
self.step_var = step_var if step_var is not None else 0
self.state = {"latest_observation": self.env.reset()[0]}
@property
def nenvs(self):
"""Returns number of batched envs or `None` if env is not batched"""
return getattr(self.env.unwrapped, "nenvs", None)
def reset(self, **kwargs):
"""Resets env and runner states."""
self.state["latest_observation"] = self.env.reset(**kwargs)[0]
self.policy.reset()
def add_summary(self, name, val):
"""Writes logs"""
add_summary = self.env.get_wrapper_attr("add_summary")
add_summary(name, val)
def get_next(self):
"""Runs the agent in the environment."""
trajectory = defaultdict(list, {"actions": []})
observations = []
rewards = []
resets = []
self.state["env_steps"] = self.nsteps
for i in range(self.nsteps):
observations.append(self.state["latest_observation"])
act = self.policy.act(self.state["latest_observation"])
if "actions" not in act:
raise ValueError(
"result of policy.act must contain 'actions' "
f"but has keys {list(act.keys())}"
)
for key, val in act.items():
trajectory[key].append(val)
obs, rew, terminated, truncated, _ = self.env.step(
trajectory["actions"][-1]
)
self.state["latest_observation"] = obs
rewards.append(rew)
reset = np.logical_or(terminated, truncated)
resets.append(reset)
self.step_var += self.nenvs or 1
# Only reset if the env is not batched. Batched envs should
# auto-reset.
if not self.nenvs and np.all(reset):
self.state["env_steps"] = i + 1
self.state["latest_observation"] = self.env.reset()[0]
trajectory.update(observations=observations, rewards=rewards, resets=resets)
trajectory["state"] = self.state
for transform in self.transforms:
transform(trajectory)
return trajectory
| EnvRunner |
python | django-debug-toolbar__django-debug-toolbar | tests/panels/test_request.py | {
"start": 186,
"end": 8869
} | class ____(BaseTestCase):
panel_id = RequestPanel.panel_id
def test_non_ascii_session(self):
self.request.session = {"où": "où"}
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
self.assertIn("où", self.panel.content)
def test_object_with_non_ascii_repr_in_request_params(self):
request = rf.get("/non_ascii_request/")
response = self.panel.process_request(request)
self.panel.generate_stats(request, response)
self.assertIn("nôt åscíì", self.panel.content)
def test_insert_content(self):
"""
Test that the panel only inserts content after generate_stats and
not the process_request.
"""
request = rf.get("/non_ascii_request/")
response = self.panel.process_request(request)
# ensure the panel does not have content yet.
self.assertNotIn("nôt åscíì", self.panel.content)
self.panel.generate_stats(request, response)
# ensure the panel renders correctly.
content = self.panel.content
self.assertIn("nôt åscíì", content)
self.assertValidHTML(content)
def test_query_dict_for_request_in_method_get(self):
"""
Test verifies the correctness of the statistics generation method
in the case when the GET request is class QueryDict
"""
self.request.GET = QueryDict("foo=bar")
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
# ensure the panel GET request data is processed correctly.
content = self.panel.content
self.assertIn("foo", content)
self.assertIn("bar", content)
def test_dict_for_request_in_method_get(self):
"""
Test verifies the correctness of the statistics generation method
in the case when the GET request is class dict
"""
self.request.GET = {"foo": "bar"}
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
# ensure the panel GET request data is processed correctly.
content = self.panel.content
self.assertIn("foo", content)
self.assertIn("bar", content)
def test_query_dict_for_request_in_method_post(self):
"""
Test verifies the correctness of the statistics generation method
in the case when the POST request is class QueryDict
"""
self.request.POST = QueryDict("foo=bar")
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
# ensure the panel POST request data is processed correctly.
content = self.panel.content
self.assertIn("foo", content)
self.assertIn("bar", content)
def test_dict_for_request_in_method_post(self):
"""
Test verifies the correctness of the statistics generation method
in the case when the POST request is class dict
"""
self.request.POST = {"foo": "bar"}
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
# ensure the panel POST request data is processed correctly.
content = self.panel.content
self.assertIn("foo", content)
self.assertIn("bar", content)
def test_list_for_request_in_method_post(self):
"""
Verify that the toolbar doesn't crash if request.POST contains unexpected data.
See https://github.com/django-commons/django-debug-toolbar/issues/1621
"""
self.request.POST = [{"a": 1}, {"b": 2}]
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
# ensure the panel POST request data is processed correctly.
content = self.panel.content
self.assertIn("[{'a': 1}, {'b': 2}]", content)
def test_namespaced_url(self):
request = rf.get("/admin/login/")
response = self.panel.process_request(request)
self.panel.generate_stats(request, response)
panel_stats = self.panel.get_stats()
self.assertEqual(panel_stats["view_urlname"], "admin:login")
def test_session_list_sorted_or_not(self):
"""
Verify the session is sorted when all keys are strings.
See https://github.com/django-commons/django-debug-toolbar/issues/1668
"""
self.request.session = {
1: "value",
"data": ["foo", "bar", 1],
(2, 3): "tuple_key",
}
data = {
"list": [(1, "value"), ("data", ["foo", "bar", 1]), ((2, 3), "tuple_key")]
}
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
panel_stats = self.panel.get_stats()
self.assertEqual(panel_stats["session"], data)
self.request.session = {
"b": "b-value",
"a": "a-value",
}
data = {"list": [("a", "a-value"), ("b", "b-value")]}
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
panel_stats = self.panel.get_stats()
self.assertEqual(panel_stats["session"], data)
def test_sensitive_post_data_sanitized(self):
"""Test that sensitive POST data is redacted."""
self.request.POST = {"username": "testuser", "password": "secret123"}
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
# Check that password is redacted in panel content
content = self.panel.content
self.assertIn("username", content)
self.assertIn("testuser", content)
self.assertIn("password", content)
self.assertNotIn("secret123", content)
self.assertIn("********************", content)
def test_sensitive_get_data_sanitized(self):
"""Test that sensitive GET data is redacted."""
self.request.GET = {"api_key": "abc123", "q": "search term"}
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
# Check that api_key is redacted in panel content
content = self.panel.content
self.assertIn("api_key", content)
self.assertNotIn("abc123", content)
self.assertIn("********************", content)
self.assertIn("q", content)
self.assertIn("search term", content)
def test_sensitive_cookie_data_sanitized(self):
"""Test that sensitive cookie data is redacted."""
self.request.COOKIES = {"session_id": "abc123", "auth_token": "xyz789"}
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
# Check that auth_token is redacted in panel content
content = self.panel.content
self.assertIn("session_id", content)
self.assertIn("abc123", content)
self.assertIn("auth_token", content)
self.assertNotIn("xyz789", content)
self.assertIn("********************", content)
def test_sensitive_session_data_sanitized(self):
"""Test that sensitive session data is redacted."""
self.request.session = {"user_id": 123, "auth_token": "xyz789"}
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
# Check that auth_token is redacted in panel content
content = self.panel.content
self.assertIn("user_id", content)
self.assertIn("123", content)
self.assertIn("auth_token", content)
self.assertNotIn("xyz789", content)
self.assertIn("********************", content)
def test_querydict_sanitized(self):
"""Test that sensitive data in QueryDict objects is properly redacted."""
query_dict = QueryDict("username=testuser&password=secret123&token=abc456")
self.request.GET = query_dict
response = self.panel.process_request(self.request)
self.panel.generate_stats(self.request, response)
# Check that sensitive data is redacted in panel content
content = self.panel.content
self.assertIn("username", content)
self.assertIn("testuser", content)
self.assertIn("password", content)
self.assertNotIn("secret123", content)
self.assertIn("token", content)
self.assertNotIn("abc456", content)
self.assertIn("********************", content)
| RequestPanelTestCase |
python | spack__spack | lib/spack/spack/repo.py | {
"start": 78244,
"end": 78472
} | class ____(BadRepoError):
"""Raised when repo API version is too high or too low for Spack."""
def __init__(self, api, *args, **kwargs):
self.api = api
super().__init__(*args, **kwargs)
| BadRepoVersionError |
python | donnemartin__interactive-coding-challenges | arrays_strings/hash_map/test_hash_map.py | {
"start": 18,
"end": 1438
} | class ____(unittest.TestCase):
# TODO: It would be better if we had unit tests for each
# method in addition to the following end-to-end test
def test_end_to_end(self):
hash_table = HashTable(10)
print("Test: get on an empty hash table index")
self.assertRaises(KeyError, hash_table.get, 0)
print("Test: set on an empty hash table index")
hash_table.set(0, 'foo')
self.assertEqual(hash_table.get(0), 'foo')
hash_table.set(1, 'bar')
self.assertEqual(hash_table.get(1), 'bar')
print("Test: set on a non empty hash table index")
hash_table.set(10, 'foo2')
self.assertEqual(hash_table.get(0), 'foo')
self.assertEqual(hash_table.get(10), 'foo2')
print("Test: set on a key that already exists")
hash_table.set(10, 'foo3')
self.assertEqual(hash_table.get(0), 'foo')
self.assertEqual(hash_table.get(10), 'foo3')
print("Test: remove on a key that already exists")
hash_table.remove(10)
self.assertEqual(hash_table.get(0), 'foo')
self.assertRaises(KeyError, hash_table.get, 10)
print("Test: remove on a key that doesn't exist")
self.assertRaises(KeyError, hash_table.remove, -1)
print('Success: test_end_to_end')
def main():
test = TestHashMap()
test.test_end_to_end()
if __name__ == '__main__':
main()
| TestHashMap |
python | ray-project__ray | python/ray/serve/tests/test_telemetry_1.py | {
"start": 6176,
"end": 12393
} | class ____:
def __call__(self, *args):
pass
def reconfigure(self, *args):
pass
tester = Tester.bind()
@pytest.mark.parametrize(
"lightweight_option,value,new_value",
[
("num_replicas", 1, 2),
("user_config", {}, {"some_setting": 10}),
("autoscaling_config", {"max_replicas": 3}, {"max_replicas": 5}),
],
)
def test_lightweight_config_options(
manage_ray_with_telemetry, lightweight_option, value, new_value
):
"""Check that lightweight config options are detected by telemetry."""
storage = manage_ray_with_telemetry
lightweight_tagkeys = {
"num_replicas": ServeUsageTag.NUM_REPLICAS_LIGHTWEIGHT_UPDATED,
"user_config": ServeUsageTag.USER_CONFIG_LIGHTWEIGHT_UPDATED,
"autoscaling_config": ServeUsageTag.AUTOSCALING_CONFIG_LIGHTWEIGHT_UPDATED,
}
# Check that REST API telemetry is not set
check_telemetry(ServeUsageTag.REST_API_VERSION, expected=None)
config = {
"applications": [
{
"name": "test_app",
"import_path": "ray.serve.tests.test_telemetry_1.tester",
"deployments": [{"name": "Tester"}],
},
]
}
config["applications"][0]["deployments"][0][lightweight_option] = value
# Deploy first config
client = _get_global_client()
client.deploy_apps(ServeDeploySchema(**config))
wait_for_condition(check_apps_running, apps=["test_app"], timeout=15)
current_num_reports = ray.get(storage.get_reports_received.remote())
wait_for_condition(
lambda: ray.get(storage.get_reports_received.remote()) > current_num_reports,
timeout=10,
)
report = ray.get(storage.get_report.remote())
# Check
assert int(ServeUsageTag.NUM_APPS.get_value_from_report(report)) == 2
assert ServeUsageTag.API_VERSION.get_value_from_report(report) == "v2"
for tagkey in lightweight_tagkeys.values():
assert tagkey.get_value_from_report(report) is None
# Change config and deploy again
config["applications"][0]["deployments"][0][lightweight_option] = new_value
client.deploy_apps(ServeDeploySchema(**config))
wait_for_condition(check_apps_running, apps=["test_app"], timeout=15)
# Check again
wait_for_condition(
check_telemetry,
tag=lightweight_tagkeys[lightweight_option],
expected="True",
timeout=5,
)
report = ray.get(storage.get_report.remote())
assert int(ServeUsageTag.NUM_APPS.get_value_from_report(report)) == 2
assert ServeUsageTag.API_VERSION.get_value_from_report(report) == "v2"
for tagkey in lightweight_tagkeys.values():
if tagkey != lightweight_tagkeys[lightweight_option]:
assert tagkey.get_value_from_report(report) is None
@pytest.mark.parametrize("call_in_deployment", [False, True])
def test_handle_apis_detected(manage_ray_with_telemetry, call_in_deployment):
"""Check that the various handles are detected correctly by telemetry."""
check_telemetry(ServeUsageTag.DEPLOYMENT_HANDLE_API_USED, expected=None)
@serve.deployment
class Downstream:
def __call__(self):
return "hi"
@serve.deployment
class Caller:
def __init__(self, h):
self._h = h
async def __call__(self, call_downstream=True):
if call_downstream:
await self._h.remote()
return "ok"
handle = serve.run(Caller.bind(Downstream.bind()))
if call_in_deployment:
result = httpx.get("http://localhost:8000/").text
else:
result = handle.remote(call_downstream=False).result()
assert result == "ok"
wait_for_condition(
check_telemetry, tag=ServeUsageTag.DEPLOYMENT_HANDLE_API_USED, expected="1"
)
@pytest.mark.parametrize("mode", ["http", "outside_deployment", "inside_deployment"])
def test_deployment_handle_to_obj_ref_detected(manage_ray_with_telemetry, mode):
"""Check that the handle to_object_ref API is detected correctly by telemetry."""
check_telemetry(
ServeUsageTag.DEPLOYMENT_HANDLE_TO_OBJECT_REF_API_USED, expected=None
)
@serve.deployment
class Downstream:
def __call__(self):
return "hi"
@serve.deployment
class Caller:
def __init__(self, h):
self._h = h
async def get(self, call_downstream=False):
if call_downstream:
await self._h.remote()._to_object_ref()
return "ok"
async def __call__(self):
return await self.get()
handle = serve.run(Caller.bind(Downstream.bind()))
if mode == "http":
result = httpx.get("http://localhost:8000/").text
elif mode == "outside_deployment":
result = ray.get(handle.get.remote()._to_object_ref_sync())
else:
result = handle.get.remote(call_downstream=True).result()
assert result == "ok"
if mode == "http":
for _ in range(20):
check_telemetry(
ServeUsageTag.DEPLOYMENT_HANDLE_TO_OBJECT_REF_API_USED, expected=None
)
time.sleep(0.1)
else:
wait_for_condition(
check_telemetry,
tag=ServeUsageTag.DEPLOYMENT_HANDLE_TO_OBJECT_REF_API_USED,
expected="1",
)
def test_multiplexed_detect(manage_ray_with_telemetry):
"""Check that multiplexed api is detected by telemetry."""
@serve.deployment
class Model:
@serve.multiplexed(max_num_models_per_replica=1)
async def get_model(self, tag):
return tag
async def __call__(self, request):
tag = serve.get_multiplexed_model_id()
await self.get_model(tag)
return tag
serve.run(Model.bind(), name="app", route_prefix="/app")
wait_for_condition(check_apps_running, apps=["app"])
check_telemetry(ServeUsageTag.MULTIPLEXED_API_USED, expected=None)
headers = {SERVE_MULTIPLEXED_MODEL_ID: "1"}
resp = httpx.get("http://localhost:8000/app", headers=headers)
assert resp.status_code == 200
wait_for_condition(
check_telemetry, tag=ServeUsageTag.MULTIPLEXED_API_USED, expected="1"
)
| Tester |
python | dask__dask | dask/dataframe/dask_expr/_groupby.py | {
"start": 30776,
"end": 31038
} | class ____(GroupByTransform):
func = staticmethod(functools.partial(_fillna, what="bfill"))
def _simplify_up(self, parent, dependents):
if isinstance(parent, Projection):
return groupby_projection(self, parent, dependents)
| GroupByBFill |
python | spack__spack | lib/spack/spack/spec.py | {
"start": 223263,
"end": 223606
} | class ____(SpecFormatStringError):
"""Called for mismatched sigils and attributes in format strings"""
def __init__(self, sigil, requirement, used):
msg = "The sigil %s may only be used for %s." % (sigil, requirement)
msg += " It was used with the attribute %s." % used
super().__init__(msg)
| SpecFormatSigilError |
python | facebook__pyre-check | tools/incremental_test/runner.py | {
"start": 7330,
"end": 7898
} | class ____:
incremental_update_logs: List[Mapping[str, int]]
cold_start_log: Mapping[str, int]
def to_json(self) -> Dict[str, Any]:
return {
"incremental_update_logs": self.incremental_update_logs,
"cold_start_log": self.cold_start_log,
}
def total_incremental_check_time(self) -> int:
return sum(log["total"] for log in self.incremental_update_logs) // 1000
def full_check_time(self) -> int:
return sum(duration for _, duration in self.cold_start_log.items()) // 1000
@dataclass
| ProfileLogs |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/duplicate_bases.py | {
"start": 309,
"end": 352
} | class ____(
A,
A,
B
):
...
| G3 |
python | huggingface__transformers | src/transformers/models/nougat/processing_nougat.py | {
"start": 875,
"end": 6063
} | class ____(ProcessorMixin):
r"""
Constructs a Nougat processor which wraps a Nougat image processor and a Nougat tokenizer into a single processor.
[`NougatProcessor`] offers all the functionalities of [`NougatImageProcessor`] and [`NougatTokenizerFast`]. See the
[`~NougatProcessor.__call__`] and [`~NougatProcessor.decode`] for more information.
Args:
image_processor ([`NougatImageProcessor`]):
An instance of [`NougatImageProcessor`]. The image processor is a required input.
tokenizer ([`NougatTokenizerFast`]):
An instance of [`NougatTokenizerFast`]. The tokenizer is a required input.
"""
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
def __call__(
self,
images=None,
text=None,
do_crop_margin: Optional[bool] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: "PILImageResampling" = None, # noqa: F821
do_thumbnail: Optional[bool] = None,
do_align_long_axis: Optional[bool] = None,
do_pad: Optional[bool] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[Union[int, float]] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional["ChannelDimension"] = "channels_first", # noqa: F821
input_data_format: Optional[Union[str, "ChannelDimension"]] = None, # noqa: F821
text_pair: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
text_target: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
text_pair_target: Optional[
Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]
] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Optional[Union[bool, str, TruncationStrategy]] = None,
max_length: Optional[int] = None,
stride: int = 0,
is_split_into_words: bool = False,
pad_to_multiple_of: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
):
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process.")
if images is not None:
inputs = self.image_processor(
images,
do_crop_margin=do_crop_margin,
do_resize=do_resize,
size=size,
resample=resample,
do_thumbnail=do_thumbnail,
do_align_long_axis=do_align_long_axis,
do_pad=do_pad,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
return_tensors=return_tensors,
data_format=data_format,
input_data_format=input_data_format,
)
if text is not None:
encodings = self.tokenizer(
text,
text_pair=text_pair,
text_target=text_target,
text_pair_target=text_pair_target,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_multiple_of=pad_to_multiple_of,
return_tensors=return_tensors,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
)
if text is None:
return inputs
elif images is None:
return encodings
else:
inputs["labels"] = encodings["input_ids"]
return inputs
def post_process_generation(self, *args, **kwargs):
"""
This method forwards all its arguments to NougatTokenizer's [`~PreTrainedTokenizer.post_process_generation`].
Please refer to the docstring of this method for more information.
"""
return self.tokenizer.post_process_generation(*args, **kwargs)
__all__ = ["NougatProcessor"]
| NougatProcessor |
python | spack__spack | lib/spack/spack/vendor/jsonschema/exceptions.py | {
"start": 4195,
"end": 4605
} | class ____(Exception):
"""
A type checker was asked to check a type it did not have registered.
"""
def __init__(self, type):
self.type = type
def __unicode__(self):
return "Type %r is unknown to this type checker" % self.type
if PY3:
__str__ = __unicode__
else:
def __str__(self):
return unicode(self).encode("utf-8")
| UndefinedTypeCheck |
python | jazzband__django-redis | django_redis/compressors/base.py | {
"start": 0,
"end": 253
} | class ____:
def __init__(self, options):
self._options = options
def compress(self, value: bytes) -> bytes:
raise NotImplementedError
def decompress(self, value: bytes) -> bytes:
raise NotImplementedError
| BaseCompressor |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 9483,
"end": 12861
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
is_causal: bool = False,
config: Optional[PatchTSMixerConfig] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
# TODO: we need a refactor so that the different attention modules can get their specific kwargs
# ATM, we have mixed things encoder, decoder, and encoder-decoder attn
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
# get query proj
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
current_states = key_value_states if is_cross_attention else hidden_states
key_states = self.k_proj(current_states).view(*kv_input_shape).transpose(1, 2)
value_states = self.v_proj(current_states).view(*kv_input_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
output_attentions=output_attentions,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights, None
| PatchTSMixerAttention |
python | getsentry__sentry | tests/sentry/seer/similarity/test_config.py | {
"start": 418,
"end": 1368
} | class ____(TestCase):
def test_returns_stable_when_rollout_disabled(self):
"""When new model rollout is disabled, return stable version"""
with patch("sentry.seer.similarity.config.SEER_GROUPING_NEW_VERSION", None):
result = get_grouping_model_version(self.project)
assert result == SEER_GROUPING_STABLE_VERSION
def test_returns_stable_when_feature_not_enabled(self):
"""When feature flag is not enabled for project, return stable version"""
result = get_grouping_model_version(self.project)
assert result == SEER_GROUPING_STABLE_VERSION
def test_returns_new_when_feature_enabled(self):
"""When feature flag is enabled for project, return new version"""
with self.feature(SEER_GROUPING_NEW_MODEL_ROLLOUT_FEATURE):
result = get_grouping_model_version(self.project)
assert result == SEER_GROUPING_NEW_VERSION
| GetGroupingModelVersionTest |
python | astropy__astropy | astropy/cosmology/_src/tests/test_core.py | {
"start": 1876,
"end": 2370
} | class ____(Cosmology):
"""Defined here to be serializable."""
H0: Parameter = Parameter(unit="km/(s Mpc)")
Tcmb0: Parameter = Parameter(default=0 * u.K, unit=u.K)
m_nu: Parameter = Parameter(default=0 * u.eV, unit=u.eV)
@property
def is_flat(self) -> bool:
return super().is_flat()
##############################################################################
# TESTS
##############################################################################
| SubCosmology |
python | cython__cython | Cython/Plex/Actions.py | {
"start": 1777,
"end": 2209
} | class ____(Action):
"""
Begin(state_name) is a Plex action which causes the Scanner to
enter the state |state_name|. See the docstring of Plex.Lexicon
for more information.
"""
def __init__(self, state_name):
self.state_name = state_name
def perform(self, token_stream, text):
token_stream.begin(self.state_name)
def __repr__(self):
return "Begin(%s)" % self.state_name
| Begin |
python | ansible__ansible | lib/ansible/_internal/_yaml/_dumper.py | {
"start": 576,
"end": 999
} | class ____(SafeDumper, metaclass=abc.ABCMeta):
"""Base class for Ansible YAML dumpers."""
@classmethod
@abc.abstractmethod
def _register_representers(cls) -> None:
"""Method used to register representers to derived types during class initialization."""
def __init_subclass__(cls, **kwargs) -> None:
"""Initialization for derived types."""
cls._register_representers()
| _BaseDumper |
python | getsentry__sentry | src/sentry/notifications/notifications/user_report.py | {
"start": 785,
"end": 3471
} | class ____(ProjectNotification):
metrics_key = "user_report"
template_path = "sentry/emails/activity/new-user-feedback"
def __init__(self, project: Project, report: Mapping[str, Any]) -> None:
super().__init__(project)
self.group = Group.objects.get(id=report["issue"]["id"])
self.report = report
def get_participants_with_group_subscription_reason(self) -> ParticipantMap:
data_by_provider = GroupSubscription.objects.get_participants(group=self.group)
email_participants = data_by_provider.get_participants_by_provider(ExternalProviders.EMAIL)
result = ParticipantMap()
for actor, reason in email_participants:
result.add(ExternalProviders.EMAIL, actor, reason)
return result
def get_subject(self, context: Mapping[str, Any] | None = None) -> str:
message = f"{self.group.qualified_short_id} - New Feedback from {self.report['name']}"
message = force_str(message)
return message
def get_notification_title(
self, provider: ExternalProviders, context: Mapping[str, Any] | None = None
) -> str:
return self.get_subject(context)
@property
def reference(self) -> Model | None:
return self.project
def get_context(self) -> MutableMapping[str, Any]:
organization = self.organization
link_query = f"project={self.project.id}"
if hasattr(self, "notification_uuid"):
link_query += f"&notification_uuid={self.notification_uuid}"
return {
"enhanced_privacy": organization.flags.enhanced_privacy,
"group": self.group,
"issue_link": organization.absolute_url(
f"/organizations/{organization.slug}/issues/{self.group.id}/",
query=link_query,
),
# TODO(dcramer): we don't have permalinks to feedback yet
"link": organization.absolute_url(
f"/organizations/{organization.slug}/issues/{self.group.id}/feedback/",
query=link_query,
),
"project": self.project,
"project_link": organization.absolute_url(
f"/organizations/{self.organization.slug}/projects/{self.project.slug}/"
),
"report": self.report,
}
def get_recipient_context(
self, recipient: Actor, extra_context: Mapping[str, Any]
) -> MutableMapping[str, Any]:
context = super().get_recipient_context(recipient, extra_context)
return {**context, **get_reason_context(context)}
def send(self) -> None:
return send_activity_notification(self)
| UserReportNotification |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_v2_test.py | {
"start": 117298,
"end": 121427
} | class ____(test.TestCase):
class _TestFeatureColumn(BaseFeatureColumnForTests,
collections.namedtuple('_TestFeatureColumn',
('parse_spec'))):
@property
def _is_v2_column(self):
return True
@property
def name(self):
return '_TestFeatureColumn'
def transform_feature(self, transformation_cache, state_manager):
pass
def _transform_feature(self, inputs):
pass
@property
def parse_example_spec(self):
return self.parse_spec
@property
def _parse_example_spec(self):
return self.parse_spec
def test_no_feature_columns(self):
actual = fc.make_parse_example_spec_v2([])
self.assertDictEqual({}, actual)
def test_invalid_type(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
with self.assertRaisesRegex(
ValueError,
'All feature_columns must be FeatureColumn instances.*invalid_column'):
fc.make_parse_example_spec_v2((self._TestFeatureColumn({
key1: parse_spec1
}), 'invalid_column'))
def test_one_feature_column(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({
key1: parse_spec1
}),))
self.assertDictEqual({key1: parse_spec1}, actual)
def test_two_feature_columns(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
key2 = 'key2'
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({
key1: parse_spec1
}), self._TestFeatureColumn({
key2: parse_spec2
})))
self.assertDictEqual({key1: parse_spec1, key2: parse_spec2}, actual)
def test_equal_keys_different_parse_spec(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
with self.assertRaisesRegex(
ValueError,
'feature_columns contain different parse_spec for key key1'):
fc.make_parse_example_spec_v2((self._TestFeatureColumn({
key1: parse_spec1
}), self._TestFeatureColumn({
key1: parse_spec2
})))
def test_equal_keys_equal_parse_spec(self):
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({
key1: parse_spec1
}), self._TestFeatureColumn({
key1: parse_spec1
})))
self.assertDictEqual({key1: parse_spec1}, actual)
def test_multiple_features_dict(self):
"""parse_spc for one column is a dict with length > 1."""
key1 = 'key1'
parse_spec1 = parsing_ops.FixedLenFeature(
shape=(2,), dtype=dtypes.float32, default_value=0.)
key2 = 'key2'
parse_spec2 = parsing_ops.VarLenFeature(dtype=dtypes.string)
key3 = 'key3'
parse_spec3 = parsing_ops.VarLenFeature(dtype=dtypes.int32)
actual = fc.make_parse_example_spec_v2((self._TestFeatureColumn({
key1: parse_spec1
}), self._TestFeatureColumn({
key2: parse_spec2,
key3: parse_spec3
})))
self.assertDictEqual({
key1: parse_spec1,
key2: parse_spec2,
key3: parse_spec3
}, actual)
def _assert_sparse_tensor_value(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(
np.array(expected.values).dtype,
np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
| MakeParseExampleSpecTest |
python | eventlet__eventlet | tests/debug_test.py | {
"start": 2860,
"end": 4194
} | class ____(tests.LimitedTestCase):
def test_everything(self):
debug.hub_exceptions(True)
debug.hub_exceptions(False)
debug.tpool_exceptions(True)
debug.tpool_exceptions(False)
debug.hub_listener_stacks(True)
debug.hub_listener_stacks(False)
debug.hub_timer_stacks(True)
debug.hub_timer_stacks(False)
debug.format_hub_listeners()
debug.format_hub_timers()
def test_hub_exceptions(self):
debug.hub_exceptions(True)
server = eventlet.listen(('0.0.0.0', 0))
client = eventlet.connect(('127.0.0.1', server.getsockname()[1]))
client_2, addr = server.accept()
def hurl(s):
s.recv(1)
{}[1] # keyerror
with tests.capture_stderr() as fake:
gt = eventlet.spawn(hurl, client_2)
eventlet.sleep(0.001)
client.send(b' ')
eventlet.sleep(0.001)
# allow the "hurl" greenlet to trigger the KeyError
# not sure why the extra context switch is needed
eventlet.sleep(0.001)
self.assertRaises(KeyError, gt.wait)
debug.hub_exceptions(False)
# look for the KeyError exception in the traceback
assert 'KeyError: 1' in fake.getvalue(), "Traceback not in:\n" + fake.getvalue()
| TestDebug |
python | streamlit__streamlit | lib/streamlit/connections/snowflake_connection.py | {
"start": 1946,
"end": 22860
} | class ____(BaseConnection["InternalSnowflakeConnection"]):
"""A connection to Snowflake using the Snowflake Connector for Python.
Initialize this connection object using ``st.connection("snowflake")`` or
``st.connection("<name>", type="snowflake")``. Connection parameters for a
SnowflakeConnection can be specified using ``secrets.toml`` and/or
``**kwargs``. Connection parameters are passed to
|snowflake.connector.connect()|.
When an app is running in Streamlit in Snowflake,
``st.connection("snowflake")`` connects automatically using the app owner's
role without further configuration. ``**kwargs`` will be ignored in this
case. Use ``secrets.toml`` and ``**kwargs`` to configure your connection
for local development.
SnowflakeConnection includes several convenience methods. For example, you
can directly execute a SQL query with ``.query()`` or access the underlying
Snowflake Connector object with ``.raw_connection``.
.. |snowflake.connector.connect()| replace:: ``snowflake.connector.connect()``
.. _snowflake.connector.connect(): https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-api#label-snowflake-connector-methods-connect
.. Important::
`snowflake-snowpark-python <https://pypi.org/project/snowflake-snowpark-python/>`_
must be installed in your environment to use this connection. You can
install it as an extra with Streamlit:
.. code-block:: shell
pip install streamlit[snowflake]
.. Important::
Account identifiers must be of the form ``<orgname>-<account_name>``
where ``<orgname>`` is the name of your Snowflake organization and
``<account_name>`` is the unique name of your account within your
organization. This is dash-separated, not dot-separated like when used
in SQL queries. For more information, see `Account identifiers
<https://docs.snowflake.com/en/user-guide/admin-account-identifier>`_.
Examples
--------
**Example 1: Configuration with Streamlit secrets**
You can configure your Snowflake connection using Streamlit's
`Secrets management <https://docs.streamlit.io/develop/concepts/connections/secrets-management>`_.
For example, if you have MFA enabled on your account, you can connect using
`key-pair authentication <https://docs.snowflake.com/en/user-guide/key-pair-auth>`_.
``.streamlit/secrets.toml``:
>>> [connections.snowflake]
>>> account = "xxx-xxx"
>>> user = "xxx"
>>> private_key_file = "/xxx/xxx/xxx.p8"
>>> role = "xxx"
>>> warehouse = "xxx"
>>> database = "xxx"
>>> schema = "xxx"
Your app code:
>>> import streamlit as st
>>> conn = st.connection("snowflake")
>>> df = conn.query("SELECT * FROM my_table")
**Example 2: Configuration with keyword arguments and external authentication**
You can configure your Snowflake connection with keyword arguments. The
keyword arguments are merged with (and take precedence over) the values in
``secrets.toml``. However, if you name your connection ``"snowflake"`` and
don't have a ``[connections.snowflake]`` dictionary in your
``secrets.toml`` file, Streamlit will ignore any keyword arguments and use
the default Snowflake connection as described in Example 5 and Example 6.
To configure your connection using only keyword arguments, declare a name
for the connection other than ``"snowflake"``.
For example, if your Snowflake account supports SSO, you can set up a quick
local connection for development using `browser-based SSO
<https://docs.snowflake.com/en/user-guide/admin-security-fed-auth-use#how-browser-based-sso-works>`_.
Because there is nothing configured in ``secrets.toml``, the name is an
empty string and the type is set to ``"snowflake"``. This prevents
Streamlit from ignoring the keyword arguments and using a default
Snowflake connection.
>>> import streamlit as st
>>> conn = st.connection(
... "",
... type="snowflake",
... account="xxx-xxx",
... user="xxx",
... authenticator="externalbrowser",
... )
>>> df = conn.query("SELECT * FROM my_table")
**Example 3: Named connection with Snowflake's connection configuration file**
Snowflake's Python Connector supports a `connection configuration file
<https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-connect#connecting-using-the-connections-toml-file>`_,
which is well integrated with Streamlit's ``SnowflakeConnection``. If you
already have one or more connections configured, all you need to do is pass
the name of the connection to use.
``~/.snowflake/connections.toml``:
>>> [my_connection]
>>> account = "xxx-xxx"
>>> user = "xxx"
>>> password = "xxx"
>>> warehouse = "xxx"
>>> database = "xxx"
>>> schema = "xxx"
Your app code:
>>> import streamlit as st
>>> conn = st.connection("my_connection", type="snowflake")
>>> df = conn.query("SELECT * FROM my_table")
**Example 4: Named connection with Streamlit secrets and Snowflake's connection configuration file**
If you have a Snowflake configuration file with a connection named
``my_connection`` as in Example 3, you can pass the connection name through
``secrets.toml``.
``.streamlit/secrets.toml``:
>>> [connections.snowflake]
>>> connection_name = "my_connection"
Your app code:
>>> import streamlit as st
>>> conn = st.connection("snowflake")
>>> df = conn.query("SELECT * FROM my_table")
**Example 5: Default connection with an environment variable**
If you don't have a ``[connections.snowflake]`` dictionary in your
``secrets.toml`` file and use ``st.connection("snowflake")``, Streamlit
will use the default connection for the `Snowflake Python Connector
<https://docs.snowflake.cn/en/developer-guide/python-connector/python-connector-connect#setting-a-default-connection>`_.
If you have a Snowflake configuration file with a connection named
``my_connection`` as in Example 3, you can set an environment variable to
declare it as the default Snowflake connection.
>>> SNOWFLAKE_DEFAULT_CONNECTION_NAME = "my_connection"
Your app code:
>>> import streamlit as st
>>> conn = st.connection("snowflake")
>>> df = conn.query("SELECT * FROM my_table")
**Example 6: Default connection in Snowflake's connection configuration file**
If you have a Snowflake configuration file that defines your ``default``
connection, Streamlit will automatically use it if no other connection is
declared.
``~/.snowflake/connections.toml``:
>>> [default]
>>> account = "xxx-xxx"
>>> user = "xxx"
>>> password = "xxx"
>>> warehouse = "xxx"
>>> database = "xxx"
>>> schema = "xxx"
Your app code:
>>> import streamlit as st
>>> conn = st.connection("snowflake")
>>> df = conn.query("SELECT * FROM my_table")
"""
def _connect(self, **kwargs: Any) -> InternalSnowflakeConnection:
import snowflake.connector # type:ignore[import]
from snowflake.connector import Error as SnowflakeError # type:ignore[import]
# If we're running in SiS, just call get_active_session() and retrieve the
# lower-level connection from it.
if running_in_sis():
from snowflake.snowpark.context import ( # type:ignore[import] # isort: skip
get_active_session,
)
session = get_active_session()
if hasattr(session, "connection"):
return session.connection
# session.connection is only a valid attr in more recent versions of
# snowflake-connector-python, so we fall back to grabbing
# session._conn._conn if `.connection` is unavailable.
return session._conn._conn
# We require qmark-style parameters everywhere for consistency across different
# environments where SnowflakeConnections may be used.
snowflake.connector.paramstyle = "qmark"
# Otherwise, attempt to create a new connection from whatever credentials we
# have available.
try:
st_secrets = self._secrets.to_dict()
if len(st_secrets):
_LOGGER.info(
"Connect to Snowflake using the Streamlit secret defined under "
"[connections.snowflake]."
)
conn_kwargs = {**st_secrets, **kwargs}
return snowflake.connector.connect(**conn_kwargs)
# Use the default configuration as defined in https://docs.snowflake.cn/en/developer-guide/python-connector/python-connector-connect#setting-a-default-connection
if self._connection_name == "snowflake":
_LOGGER.info(
"Connect to Snowflake using the default configuration as defined "
"in https://docs.snowflake.cn/en/developer-guide/python-connector/python-connector-connect#setting-a-default-connection"
)
return snowflake.connector.connect()
return snowflake.connector.connect(**kwargs)
except SnowflakeError:
if not len(st_secrets) and not kwargs:
raise StreamlitAPIException(
"Missing Snowflake connection configuration. "
"Did you forget to set this in `secrets.toml`, a Snowflake configuration file, "
"or as kwargs to `st.connection`? "
"See the [SnowflakeConnection configuration documentation]"
"(https://docs.streamlit.io/st.connections.snowflakeconnection-configuration) "
"for more details and examples."
)
raise
def query(
self,
sql: str,
*, # keyword-only arguments:
ttl: float | int | timedelta | None = None,
show_spinner: bool | str = "Running `snowflake.query(...)`.",
params: Any = None,
**kwargs: Any,
) -> DataFrame:
"""Run a read-only SQL query.
This method implements query result caching and simple error
handling/retries. The caching behavior is identical to that of using
``@st.cache_data``.
.. note::
Queries that are run without a specified ``ttl`` are cached
indefinitely.
Parameters
----------
sql : str
The read-only SQL query to execute.
ttl : float, int, timedelta or None
The maximum number of seconds to keep results in the cache. If this
is ``None`` (default), cached results do not expire with time.
show_spinner : boolean or string
Whether to enable the spinner. When a cached query is executed, no
spinner is displayed because the result is immediately available.
When a new query is executed, the default is to show a spinner with
the message "Running ``snowflake.query(...)``."
If this is ``False``, no spinner displays while executing the
query. If this is a string, the string will be used as the message
for the spinner.
params : list, tuple, dict or None
List of parameters to pass to the Snowflake Connector for Python
``Cursor.execute()`` method. This connector supports binding data
to a SQL statement using qmark bindings. For more information and
examples, see the `Snowflake Connector for Python documentation
<https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-example#using-qmark-or-numeric-binding>`_.
This defaults to ``None``.
Returns
-------
pandas.DataFrame
The result of running the query, formatted as a pandas DataFrame.
Example
-------
>>> import streamlit as st
>>>
>>> conn = st.connection("snowflake")
>>> df = conn.query("SELECT * FROM my_table")
>>> st.dataframe(df)
"""
from tenacity import retry, retry_if_exception, stop_after_attempt, wait_fixed
@retry(
after=lambda _: self.reset(),
stop=stop_after_attempt(3),
reraise=True,
# We don't have to implement retries ourself for most error types as the
# `snowflake-connector-python` library already implements retries for
# retryable HTTP errors.
retry=retry_if_exception(
lambda e: hasattr(e, "sqlstate")
and e.sqlstate == SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
),
wait=wait_fixed(1),
)
def _query(sql: str) -> DataFrame:
cur = self._instance.cursor()
cur.execute(sql, params=params, **kwargs)
return cur.fetch_pandas_all() # type: ignore
# We modify our helper function's `__qualname__` here to work around default
# `@st.cache_data` behavior. Otherwise, `.query()` being called with different
# `ttl` values will reset the cache with each call, and the query caches won't
# be scoped by connection.
ttl_str = str( # Avoid adding extra `.` characters to `__qualname__`
ttl
).replace(".", "_")
_query.__qualname__ = f"{_query.__qualname__}_{self._connection_name}_{ttl_str}"
_query = cache_data(
show_spinner=show_spinner,
ttl=ttl,
)(_query)
return _query(sql)
def write_pandas(
self,
df: DataFrame,
table_name: str,
database: str | None = None,
schema: str | None = None,
chunk_size: int | None = None,
**kwargs: Any,
) -> tuple[bool, int, int]:
"""Write a ``pandas.DataFrame`` to a table in a Snowflake database.
This convenience method is a thin wrapper around
``snowflake.connector.pandas_tools.write_pandas()`` using the
underlying connection. The ``conn`` parameter is passed automatically.
For more information and additional keyword arguments, see the
`Snowflake Connector for Python documentation
<https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-api#write_pandas>`_.
Parameters
----------
df: pandas.DataFrame
The ``pandas.DataFrame`` object containing the data to be copied
into the table.
table_name: str
Name of the table where the data should be copied to.
database: str
Name of the database containing the table. By default, the function
writes to the database that is currently in use in the session.
.. Note::
If you specify this parameter, you must also specify the schema
parameter.
schema: str
Name of the schema containing the table. By default, the function
writes to the table in the schema that is currently in use in the
session.
chunk_size: int
Number of elements to insert at a time. By default, the function
inserts all elements in one chunk.
**kwargs: Any
Additional keyword arguments for
``snowflake.connector.pandas_tools.write_pandas()``.
Returns
-------
tuple[bool, int, int]
A tuple containing three values:
1. A boolean value that is ``True`` if the write was successful.
2. An integer giving the number of chunks of data that were copied.
3. An integer giving the number of rows that were inserted.
Example
-------
The following example uses the database and schema currently in use in
the session and copies the data into a table named "my_table."
>>> import streamlit as st
>>> import pandas as pd
>>>
>>> df = pd.DataFrame(
... {"Name": ["Mary", "John", "Robert"], "Pet": ["dog", "cat", "bird"]}
... )
>>> conn = st.connection("snowflake")
>>> conn.write_pandas(df, "my_table")
"""
from snowflake.connector.pandas_tools import write_pandas # type:ignore[import]
success, nchunks, nrows, _ = write_pandas(
conn=self._instance,
df=df,
table_name=table_name,
database=database,
schema=schema,
chunk_size=chunk_size,
**kwargs,
)
return (success, nchunks, nrows)
def cursor(self) -> SnowflakeCursor:
"""Create a new cursor object from this connection.
Snowflake Connector cursors implement the Python Database API v2.0
specification (PEP-249). For more information, see the
`Snowflake Connector for Python documentation
<https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-api#object-cursor>`_.
Returns
-------
snowflake.connector.cursor.SnowflakeCursor
A cursor object for the connection.
Example
-------
The following example uses a cursor to insert multiple rows into a
table. The ``qmark`` parameter style is specified as an optional
keyword argument. Alternatively, the parameter style can be declared in
your connection configuration file. For more information, see the
`Snowflake Connector for Python documentation
<https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-example#using-qmark-or-numeric-binding>`_.
>>> import streamlit as st
>>>
>>> conn = st.connection("snowflake", "paramstyle"="qmark")
>>> rows_to_insert = [("Mary", "dog"), ("John", "cat"), ("Robert", "bird")]
>>> conn.cursor().executemany(
... "INSERT INTO mytable (name, pet) VALUES (?, ?)", rows_to_insert
... )
"""
return self._instance.cursor()
@property
def raw_connection(self) -> InternalSnowflakeConnection:
"""Access the underlying connection object from the Snowflake\
Connector for Python.
For information on how to use the Snowflake Connector for Python, see
the `Snowflake Connector for Python documentation
<https://docs.snowflake.com/en/developer-guide/python-connector/python-connector-example>`_.
Returns
-------
snowflake.connector.connection.SnowflakeConnection
The connection object.
Example
-------
The following example uses a cursor to submit an asynchronous query,
saves the query ID, then periodically checks the query status through
the connection before retrieving the results.
>>> import streamlit as st
>>> import time
>>>
>>> conn = st.connection("snowflake")
>>> cur = conn.cursor()
>>> cur.execute_async("SELECT * FROM my_table")
>>> query_id = cur.sfqid
>>> while True:
... status = conn.raw_connection.get_query_status(query_id)
... if conn.raw_connection.is_still_running(status):
... time.sleep(1)
... else:
... break
>>> cur.get_results_from_sfqid(query_id)
>>> df = cur.fetchall()
"""
return self._instance
def session(self) -> Session:
"""Create a new Snowpark session from this connection.
For information on how to use Snowpark sessions, see the
`Snowpark developer guide
<https://docs.snowflake.com/en/developer-guide/snowpark/python/working-with-dataframes>`_
and `Snowpark API Reference
<https://docs.snowflake.com/en/developer-guide/snowpark/reference/python/latest/snowpark/session>`_.
Returns
-------
snowflake.snowpark.Session
A new Snowpark session for this connection.
Example
-------
The following example creates a new Snowpark session and uses it to run
a query.
>>> import streamlit as st
>>>
>>> conn = st.connection("snowflake")
>>> session = conn.session()
>>> df = session.sql("SELECT * FROM my_table").collect()
"""
from snowflake.snowpark.context import get_active_session # type:ignore[import]
from snowflake.snowpark.session import Session # type:ignore[import]
if running_in_sis():
return get_active_session()
return cast(
"Session", Session.builder.configs({"connection": self._instance}).create()
)
| SnowflakeConnection |
python | langchain-ai__langchain | libs/langchain/langchain_classic/agents/output_parsers/react_json_single_input.py | {
"start": 376,
"end": 2664
} | class ____(AgentOutputParser):
"""Parses ReAct-style LLM calls that have a single tool input in json format.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thought: agent thought here
Action:
```
{
"action": "search",
"action_input": "what is the temperature in SF"
}
```
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thought: agent thought here
Final Answer: The temperature is 100 degrees
```
"""
pattern: Pattern = re.compile(r"^.*?`{3}(?:json)?\n?(.*?)`{3}.*?$", re.DOTALL)
"""Regex pattern to parse the output."""
@override
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
@override
def parse(self, text: str) -> AgentAction | AgentFinish:
includes_answer = FINAL_ANSWER_ACTION in text
try:
found = self.pattern.search(text)
if not found:
# Fast fail to parse Final Answer.
msg = "action not found"
raise ValueError(msg)
action = found.group(1)
response = json.loads(action.strip())
includes_action = "action" in response
if includes_answer and includes_action:
msg = (
"Parsing LLM output produced a final answer "
f"and a parse-able action: {text}"
)
raise OutputParserException(msg)
return AgentAction(
response["action"],
response.get("action_input", {}),
text,
)
except Exception as e:
if not includes_answer:
msg = f"Could not parse LLM output: {text}"
raise OutputParserException(msg) from e
output = text.split(FINAL_ANSWER_ACTION)[-1].strip()
return AgentFinish({"output": output}, text)
@property
def _type(self) -> str:
return "react-json-single-input"
| ReActJsonSingleInputOutputParser |
python | ansible__ansible | test/units/playbook/test_playbook.py | {
"start": 947,
"end": 1987
} | class ____(unittest.TestCase):
def test_empty_playbook(self):
fake_loader = DictDataLoader({})
p = Playbook(loader=fake_loader)
def test_basic_playbook(self):
fake_loader = DictDataLoader({
"test_file.yml": """
- hosts: all
""",
})
p = Playbook.load("test_file.yml", loader=fake_loader)
plays = p.get_plays()
def test_bad_playbook_files(self):
fake_loader = DictDataLoader({
# represents a playbook which is not a list of plays
"bad_list.yml": """
foo: bar
""",
# represents a playbook where a play entry is mis-formatted
"bad_entry.yml": """
-
- "This should be a mapping..."
""",
})
vm = VariableManager()
self.assertRaises(AnsibleParserError, Playbook.load, "bad_list.yml", vm, fake_loader)
self.assertRaises(AnsibleParserError, Playbook.load, "bad_entry.yml", vm, fake_loader)
| TestPlaybook |
python | doocs__leetcode | lcci/17.14.Smallest K/Solution.py | {
"start": 0,
"end": 109
} | class ____:
def smallestK(self, arr: List[int], k: int) -> List[int]:
return sorted(arr)[:k]
| Solution |
python | numpy__numpy | numpy/ma/tests/test_core.py | {
"start": 202359,
"end": 205123
} | class ____:
def _create_data(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
a.mask[0] = (1, 0)
controlmask = np.array([1] + 19 * [0], dtype=bool)
return data, a, controlmask
def test_view_to_nothing(self):
a = self._create_data()[1]
test = a.view()
assert_(isinstance(test, MaskedArray))
assert_equal(test._data, a._data)
assert_equal(test._mask, a._mask)
def test_view_to_type(self):
data, a, _ = self._create_data()
test = a.view(np.ndarray)
assert_(not isinstance(test, MaskedArray))
assert_equal(test, a._data)
assert_equal_records(test, data.view(a.dtype).squeeze())
def test_view_to_simple_dtype(self):
data, a, controlmask = self._create_data()
# View globally
test = a.view(float)
assert_(isinstance(test, MaskedArray))
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
def test_view_to_flexible_dtype(self):
a = self._create_data()[1]
test = a.view([('A', float), ('B', float)])
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'])
assert_equal(test['B'], a['b'])
test = a[0].view([('A', float), ('B', float)])
assert_(isinstance(test, MaskedArray))
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][0])
assert_equal(test['B'], a['b'][0])
test = a[-1].view([('A', float), ('B', float)])
assert_(isinstance(test, MaskedArray))
assert_equal(test.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][-1])
assert_equal(test['B'], a['b'][-1])
def test_view_to_subdtype(self):
data, a, controlmask = self._create_data()
# View globally
test = a.view((float, 2))
assert_(isinstance(test, MaskedArray))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
# View on 1 masked element
test = a[0].view((float, 2))
assert_(isinstance(test, MaskedArray))
assert_equal(test, data[0])
assert_equal(test.mask, (1, 0))
# View on 1 unmasked element
test = a[-1].view((float, 2))
assert_(isinstance(test, MaskedArray))
assert_equal(test, data[-1])
def test_view_to_dtype_and_type(self):
data, a, _ = self._create_data()
test = a.view((float, 2), np.recarray)
assert_equal(test, data)
assert_(isinstance(test, np.recarray))
assert_(not isinstance(test, MaskedArray))
| TestMaskedView |
python | huggingface__transformers | src/transformers/cache_utils.py | {
"start": 49652,
"end": 52131
} | class ____(Cache):
"""
A quantizer cache similar to what is described in the
[KIVI: A Tuning-Free Asymmetric 2bit Quantization for KV Cache paper](https://huggingface.co/papers/2402.02750).
It allows the model to generate longer sequence length without allocating too much memory for keys and values
by applying quantization.
The cache has two types of storage, one for original precision and one for the
quantized cache. A `residual length` is set as a maximum capacity for the original precision cache. When the
length goes beyond maximum capacity, the original precision cache is discarded and moved into the quantized cache.
The quantization is done per-channel with a set `q_group_size` for both keys and values, in contrast to what was
described in the paper.
See `Cache` for details on common methods that are implemented by all cache classes.
Args:
backend (`str`):
The quantization backend to use. One of `("quanto", "hqq").
config (`PreTrainedConfig`):
The config of the model for which this Cache will be used.
nbits (`int`, *optional*, defaults to 4):
The number of bits for quantization.
axis_key (`int`, *optional*, defaults to 0):
The axis on which to quantize the keys.
axis_value (`int`, *optional*, defaults to 0):
The axis on which to quantize the values.
q_group_size (`int`, *optional*, defaults to 64):
Quantization is done per-channel according to a set `q_group_size` for both keys and values.
residual_length (`int`, *optional*, defaults to 128):
Maximum capacity for the original precision cache
"""
def __init__(
self,
backend: str,
config: PreTrainedConfig,
nbits: int = 4,
axis_key: int = 0,
axis_value: int = 0,
q_group_size: int = 64,
residual_length: int = 128,
):
if backend == "quanto":
layer_class = QuantoQuantizedLayer
elif backend == "hqq":
layer_class = HQQQuantizedLayer
else:
raise ValueError(f"Unknown quantization backend `{backend}`")
config = config.get_text_config(decoder=True)
layers = [
layer_class(nbits, axis_key, axis_value, q_group_size, residual_length)
for _ in range(config.num_hidden_layers)
]
super().__init__(layers=layers)
| QuantizedCache |
python | django__django | django/db/models/functions/text.py | {
"start": 1256,
"end": 2032
} | class ____(Transform):
function = "CHR"
lookup_name = "chr"
output_field = CharField()
def as_mysql(self, compiler, connection, **extra_context):
return super().as_sql(
compiler,
connection,
function="CHAR",
template="%(function)s(%(expressions)s USING utf16)",
**extra_context,
)
def as_oracle(self, compiler, connection, **extra_context):
return super().as_sql(
compiler,
connection,
template="%(function)s(%(expressions)s USING NCHAR_CS)",
**extra_context,
)
def as_sqlite(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function="CHAR", **extra_context)
| Chr |
python | openai__openai-python | tests/test_transform.py | {
"start": 4854,
"end": 4906
} | class ____(BaseModel):
foo: datetime
| DatetimeModel |
python | dask__distributed | distributed/shuffle/_rechunk.py | {
"start": 28882,
"end": 31451
} | class ____(NamedTuple):
"""Slice of a chunk that is concatenated with other splits to create a new chunk
Splits define how to slice an input chunk on a single axis into small pieces
that can be concatenated together with splits from other input chunks to create
output chunks of a rechunk operation.
"""
#: Index of the new output chunk to which this split belongs.
chunk_index: int
#: Index of the split within the list of splits that are concatenated
#: to create the new chunk.
split_index: int
#: Slice of the input chunk.
slice: slice
SplitChunk: TypeAlias = list[Split]
SplitAxis: TypeAlias = list[SplitChunk]
SplitAxes: TypeAlias = list[SplitAxis]
def split_axes(old: ChunkedAxes, new: ChunkedAxes) -> SplitAxes:
"""Calculate how to split the old chunks on each axis to create the new chunks
Parameters
----------
old : ChunkedAxes
Chunks along each axis of the old array
new : ChunkedAxes
Chunks along each axis of the new array
Returns
-------
SplitAxes
Splits along each axis that determine how to slice the input chunks to create
the new chunks by concatenating the resulting shards.
"""
from dask.array.rechunk import old_to_new
_old_to_new = old_to_new(old, new)
axes = []
for axis_index, new_axis in enumerate(_old_to_new):
old_axis: SplitAxis = [[] for _ in old[axis_index]]
for new_chunk_index, new_chunk in enumerate(new_axis):
for split_index, (old_chunk_index, slice) in enumerate(new_chunk):
old_axis[old_chunk_index].append(
Split(new_chunk_index, split_index, slice)
)
for old_chunk in old_axis:
old_chunk.sort(key=lambda split: split.slice.start)
axes.append(old_axis)
return axes
def convert_chunk(shards: list[list[tuple[NDIndex, np.ndarray]]]) -> np.ndarray:
import numpy as np
from dask.array.core import concatenate3
indexed: dict[NDIndex, np.ndarray] = {}
for sublist in shards:
for index, shard in sublist:
indexed[index] = shard
subshape = [max(dim) + 1 for dim in zip(*indexed.keys())]
assert len(indexed) == np.prod(subshape)
rec_cat_arg = np.empty(subshape, dtype="O")
for index, shard in indexed.items():
rec_cat_arg[tuple(index)] = shard
arrs = rec_cat_arg.tolist()
# This may block for several seconds, as it physically reads the memory-mapped
# buffers from disk
return concatenate3(arrs)
| Split |
python | django__django | tests/generic_relations_regress/models.py | {
"start": 1866,
"end": 1985
} | class ____(models.Model):
name = models.CharField(max_length=100)
clinks = GenericRelation(CharLink)
| OddRelation1 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 552997,
"end": 553336
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of DeleteDeployment"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id",)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| DeleteDeploymentPayload |
python | numba__numba | numba/core/typing/listdecl.py | {
"start": 3960,
"end": 4439
} | class ____(AbstractTemplate):
def generic(self, args, kws):
[lhs, rhs] = args
if isinstance(lhs, types.List) and isinstance(rhs, types.List):
# Check element-wise comparability
res = self.context.resolve_function_type(self.key,
(lhs.dtype, rhs.dtype), {})
if res is not None:
return signature(types.boolean, lhs, rhs)
@infer_global(operator.eq)
| ListCompare |
python | django__django | tests/decorators/tests.py | {
"start": 4417,
"end": 12742
} | class ____(SimpleTestCase):
"""
Tests for method_decorator
"""
def test_preserve_signature(self):
class Test:
@simple_dec_m
def say(self, arg):
return arg
self.assertEqual("test:hello", Test().say("hello"))
def test_preserve_attributes(self):
# Sanity check myattr_dec and myattr2_dec
@myattr_dec
def func():
pass
self.assertIs(getattr(func, "myattr", False), True)
@myattr2_dec
def func():
pass
self.assertIs(getattr(func, "myattr2", False), True)
@myattr_dec
@myattr2_dec
def func():
pass
self.assertIs(getattr(func, "myattr", False), True)
self.assertIs(getattr(func, "myattr2", False), False)
# Decorate using method_decorator() on the method.
class TestPlain:
@myattr_dec_m
@myattr2_dec_m
def method(self):
"A method"
pass
# Decorate using method_decorator() on both the class and the method.
# The decorators applied to the methods are applied before the ones
# applied to the class.
@method_decorator(myattr_dec_m, "method")
class TestMethodAndClass:
@method_decorator(myattr2_dec_m)
def method(self):
"A method"
pass
# Decorate using an iterable of function decorators.
@method_decorator((myattr_dec, myattr2_dec), "method")
class TestFunctionIterable:
def method(self):
"A method"
pass
# Decorate using an iterable of method decorators.
decorators = (myattr_dec_m, myattr2_dec_m)
@method_decorator(decorators, "method")
class TestMethodIterable:
def method(self):
"A method"
pass
tests = (
TestPlain,
TestMethodAndClass,
TestFunctionIterable,
TestMethodIterable,
)
for Test in tests:
with self.subTest(Test=Test):
self.assertIs(getattr(Test().method, "myattr", False), True)
self.assertIs(getattr(Test().method, "myattr2", False), True)
self.assertIs(getattr(Test.method, "myattr", False), True)
self.assertIs(getattr(Test.method, "myattr2", False), True)
self.assertEqual(Test.method.__doc__, "A method")
self.assertEqual(Test.method.__name__, "method")
def test_new_attribute(self):
"""A decorator that sets a new attribute on the method."""
def decorate(func):
func.x = 1
return func
class MyClass:
@method_decorator(decorate)
def method(self):
return True
obj = MyClass()
self.assertEqual(obj.method.x, 1)
self.assertIs(obj.method(), True)
def test_bad_iterable(self):
decorators = {myattr_dec_m, myattr2_dec_m}
msg = "'set' object is not subscriptable"
with self.assertRaisesMessage(TypeError, msg):
@method_decorator(decorators, "method")
class TestIterable:
def method(self):
"A method"
pass
# Test for argumented decorator
def test_argumented(self):
class Test:
@method_decorator(ClsDec(False))
def method(self):
return True
self.assertIs(Test().method(), False)
def test_descriptors(self):
def original_dec(wrapped):
def _wrapped(arg):
return wrapped(arg)
return _wrapped
method_dec = method_decorator(original_dec)
class bound_wrapper:
def __init__(self, wrapped):
self.wrapped = wrapped
self.__name__ = wrapped.__name__
def __call__(self, arg):
return self.wrapped(arg)
def __get__(self, instance, cls=None):
return self
class descriptor_wrapper:
def __init__(self, wrapped):
self.wrapped = wrapped
self.__name__ = wrapped.__name__
def __get__(self, instance, cls=None):
return bound_wrapper(self.wrapped.__get__(instance, cls))
class Test:
@method_dec
@descriptor_wrapper
def method(self, arg):
return arg
self.assertEqual(Test().method(1), 1)
def test_class_decoration(self):
"""
@method_decorator can be used to decorate a class and its methods.
"""
def deco(func):
def _wrapper(*args, **kwargs):
return True
return _wrapper
@method_decorator(deco, name="method")
class Test:
def method(self):
return False
self.assertTrue(Test().method())
def test_tuple_of_decorators(self):
"""
@method_decorator can accept a tuple of decorators.
"""
def add_question_mark(func):
def _wrapper(*args, **kwargs):
return func(*args, **kwargs) + "?"
return _wrapper
def add_exclamation_mark(func):
def _wrapper(*args, **kwargs):
return func(*args, **kwargs) + "!"
return _wrapper
# The order should be consistent with the usual order in which
# decorators are applied, e.g.
# @add_exclamation_mark
# @add_question_mark
# def func():
# ...
decorators = (add_exclamation_mark, add_question_mark)
@method_decorator(decorators, name="method")
class TestFirst:
def method(self):
return "hello world"
class TestSecond:
@method_decorator(decorators)
def method(self):
return "hello world"
self.assertEqual(TestFirst().method(), "hello world?!")
self.assertEqual(TestSecond().method(), "hello world?!")
def test_invalid_non_callable_attribute_decoration(self):
"""
@method_decorator on a non-callable attribute raises an error.
"""
msg = (
"Cannot decorate 'prop' as it isn't a callable attribute of "
"<class 'Test'> (1)"
)
with self.assertRaisesMessage(TypeError, msg):
@method_decorator(lambda: None, name="prop")
class Test:
prop = 1
@classmethod
def __module__(cls):
return "tests"
def test_invalid_method_name_to_decorate(self):
"""
@method_decorator on a nonexistent method raises an error.
"""
msg = (
"The keyword argument `name` must be the name of a method of the "
"decorated class: <class 'Test'>. Got 'nonexistent_method' instead"
)
with self.assertRaisesMessage(ValueError, msg):
@method_decorator(lambda: None, name="nonexistent_method")
class Test:
@classmethod
def __module__(cls):
return "tests"
def test_wrapper_assignments(self):
"""@method_decorator preserves wrapper assignments."""
func_name = None
func_module = None
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
nonlocal func_name, func_module
func_name = getattr(func, "__name__", None)
func_module = getattr(func, "__module__", None)
return func(*args, **kwargs)
return inner
class Test:
@method_decorator(decorator)
def method(self):
return "tests"
Test().method()
self.assertEqual(func_name, "method")
self.assertIsNotNone(func_module)
def async_simple_dec(func):
@wraps(func)
async def wrapper(*args, **kwargs):
result = await func(*args, **kwargs)
return f"returned: {result}"
return wrapper
async_simple_dec_m = method_decorator(async_simple_dec)
| MethodDecoratorTests |
python | spack__spack | lib/spack/spack/cmd/common/arguments.py | {
"start": 2996,
"end": 3733
} | class ____(argparse.Action):
"""Sets the correct value for parallel build jobs.
The value is set in the command line configuration scope so that
it can be retrieved using the spack.config API.
"""
def __call__(self, parser, namespace, jobs, option_string):
# Jobs is a single integer, type conversion is already applied
# see https://docs.python.org/3/library/argparse.html#action-classes
if jobs < 1:
msg = 'invalid value for argument "{0}" ' '[expected a positive integer, got "{1}"]'
raise ValueError(msg.format(option_string, jobs))
spack.config.set("config:build_jobs", jobs, scope="command_line")
setattr(namespace, "jobs", jobs)
| SetParallelJobs |
python | doocs__leetcode | solution/2700-2799/2790.Maximum Number of Groups With Increasing Length/Solution2.py | {
"start": 0,
"end": 261
} | class ____:
def maxIncreasingGroups(self, usageLimits: List[int]) -> int:
usageLimits.sort()
k = s = 0
for x in usageLimits:
s += x
if s > k:
k += 1
s -= k
return k
| Solution |
python | tensorflow__tensorflow | tensorflow/python/framework/tensor_util_test.py | {
"start": 1747,
"end": 34548
} | class ____(test.TestCase, parameterized.TestCase):
def testFloat(self):
value = 10.0
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape {}
float_val: %.1f
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.float32, a.dtype)
self.assertAllClose(np.array(value, dtype=np.float32), a)
def testFloatN(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTyped(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerce(self):
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatTypeCoerceNdarray(self):
arr = np.asarray([10, 20, 30], dtype="int")
t = tensor_util.make_tensor_proto(arr, dtype=dtypes.float32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testFloatSizes(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[1, 3])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0, 20.0, 30.0]], dtype=np.float32), a)
def testFloatSizes2(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], shape=[3, 1])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } dim { size: 1 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.float32, a.dtype)
self.assertAllClose(np.array([[10.0], [20.0], [30.0]], dtype=np.float32), a)
def testFloatSizesLessValues(self):
t = tensor_util.make_tensor_proto(10.0, shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_FLOAT
tensor_shape { dim { size: 1 } dim { size: 3 } }
float_val: 10.0
""", t)
# No conversion to Ndarray for this one: not enough values.
def testFloatNpArrayFloat64(self):
t = tensor_util.make_tensor_proto(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64))
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "@$\000\000\000\000\000\000@4\000\000\000\000\000\000@>\000\000\000\000\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_DOUBLE
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000$@\000\000\000\000\000\0004@\000\000\000\000\000\000>@"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.float64, a.dtype)
self.assertAllClose(
np.array([[10.0, 20.0, 30.0]], dtype=np.float64),
tensor_util.MakeNdarray(t))
def testFloatTypesWithImplicitRepeat(self):
for dtype, nptype in [(dtypes.float32, np.float32),
(dtypes.float64, np.float64)]:
t = tensor_util.make_tensor_proto([10.0], shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(
np.array(
[[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0],
[10.0, 10.0, 10.0, 10.0]],
dtype=nptype),
a)
def testFloatMutateArray(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, 30.0], dtype=dtypes.float32)
a = tensor_util.MakeNdarray(t)
a[0] = 5.0
self.assertEqual(np.float32, a.dtype)
self.assertAllClose(np.array([5.0, 20.0, 30.0], dtype=np.float32), a)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "A \000\000A\240\000\000A\360\000\000"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_FLOAT
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000 A\000\000\240A\000\000\360A"
""", t)
def testHalf(self):
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=np.float16))
if sys.byteorder == "big":
self.assertProtoEquals(
"""
dtype: DT_HALF
tensor_shape { dim { size: 2 } }
tensor_content: "I\000M\000"
""",
t,
)
else:
self.assertProtoEquals(
"""
dtype: DT_HALF
tensor_shape { dim { size: 2 } }
tensor_content: "\000I\000M"
""",
t,
)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.float16, a.dtype)
self.assertAllClose(np.array([10.0, 20.0], dtype=np.float16), a)
def testBfloat16(self):
test_type = dtypes.bfloat16.as_numpy_dtype
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=test_type))
if sys.byteorder == "big":
self.assertProtoEquals(
"""
dtype: DT_BFLOAT16
tensor_shape {
dim {
size: 2
}
}
tensor_content: "\x41\x20\x41\x5C\x32\x34\x30"
""",
t,
)
else:
self.assertProtoEquals(
"""
dtype: DT_BFLOAT16
tensor_shape {
dim {
size: 2
}
}
tensor_content: "\x20\x41\x5C\x32\x34\x30\x41"
""",
t,
)
a = tensor_util.MakeNdarray(t)
self.assertEqual(test_type, a.dtype)
self.assertAllClose(np.array([10.0, 20.0], dtype=test_type), a)
def testFloat8e5m2(self):
test_type = dtypes.float8_e5m2.as_numpy_dtype
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=test_type))
# 10.0: "I" = 73 = 10010 01: 2^(18 - 15) * (1 + 1/4)
# 20.0: "M" = 77 = 10011 01: 2^(19 - 15) * (1 + 1/4)
self.assertProtoEquals(
"""
dtype: DT_FLOAT8_E5M2
tensor_shape {
dim {
size: 2
}
}
tensor_content: "IM"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(test_type, a.dtype)
self.assertAllClose(np.array([10.0, 20.0], dtype=test_type), a)
def testFloat8e4m3fn(self):
test_type = dtypes.float8_e4m3fn.as_numpy_dtype
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=test_type))
# 10.0: "R" = 82 = 1010 010: 2^(10 - 7) * (1 + 1/4)
# 20.0: "Z" = 90 = 1011 010: 2^(11 - 7) * (1 + 1/4)
self.assertProtoEquals(
"""
dtype: DT_FLOAT8_E4M3FN
tensor_shape {
dim {
size: 2
}
}
tensor_content: "RZ"
""", t)
def testFloat8e4m3fnuz(self):
test_type = dtypes.float8_e4m3fnuz.as_numpy_dtype
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=test_type))
# 10.0: "Z" = 90 = 1010 010: 2^(10 - 7) * (1 + 1/4) + 8
# 20.0: "b" = 98 = 1011 010: 2^(11 - 7) * (1 + 1/4) + 8
self.assertProtoEquals(
"""
dtype: DT_FLOAT8_E4M3FNUZ
tensor_shape {
dim {
size: 2
}
}
tensor_content: "Zb"
""",
t,
)
def testFloat8e4m3b11fnuz(self):
test_type = dtypes.float8_e4m3b11fnuz.as_numpy_dtype
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=test_type))
# 10.0: "r" = 114 = 1010 010: 2^(10 - 7) * (1 + 1/4) + 36
# 20.0: "z" = 126 = 1011 010: 2^(11 - 7) * (1 + 1/4) + 36
self.assertProtoEquals(
"""
dtype: DT_FLOAT8_E4M3B11FNUZ
tensor_shape {
dim {
size: 2
}
}
tensor_content: "rz"
""",
t,
)
def testFloat8e5m2fnuz(self):
test_type = dtypes.float8_e5m2fnuz.as_numpy_dtype
t = tensor_util.make_tensor_proto(np.array([10.0, 20.0], dtype=test_type))
# 10.0: "M" = 77 = 1010 010: 2^(10 - 7) * (1 + 1/4) - 3
# 20.0: "Q" = 87 = 1011 010: 2^(11 - 7) * (1 + 1/4) - 3
self.assertProtoEquals(
"""
dtype: DT_FLOAT8_E5M2FNUZ
tensor_shape {
dim {
size: 2
}
}
tensor_content: "MQ"
""",
t,
)
def testFloat4e2m1fn(self):
test_type = dtypes.float4_e2m1fn.as_numpy_dtype
t = tensor_util.make_tensor_proto(np.array([6, 0.5], dtype=test_type))
# 0x7 = 011 1 = 2^(3-1) x (1+0.5) = 6
# 0x1 = 000 1 = 2^(0) x 0.5 = 0.5
expected_bytes = r"\x07\x01"
self.assertProtoEquals(
f"""
dtype: DT_FLOAT4_E2M1FN
tensor_shape {{
dim {{
size: 2
}}
}}
tensor_content: "{expected_bytes}"
""",
t,
)
a = tensor_util.MakeNdarray(t)
self.assertEqual(test_type, a.dtype)
self.assertAllClose(np.array([6, 0.5], dtype=test_type), a)
def testInt(self):
t = tensor_util.make_tensor_proto(10)
self.assertProtoEquals(
"""
dtype: DT_INT32
tensor_shape {}
int_val: 10
""",
t,
)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.int32, a.dtype)
self.assertAllClose(np.array(10, dtype=np.int32), a)
def testInt4(self):
test_type = dtypes.int4.as_numpy_dtype
t = tensor_util.make_tensor_proto(
np.array(
[-8, -1, 0, 1, 7],
dtype=test_type,
)
)
#
self.assertProtoEquals(
"""
dtype: DT_INT4
tensor_shape {
dim {
size: 5
}
}
int_val: -8
int_val: -1
int_val: 0
int_val: 1
int_val: 7
""",
t,
)
def testUInt4(self):
test_type = dtypes.uint4.as_numpy_dtype
t = tensor_util.make_tensor_proto(
np.array(
[0, 1, 7, 8, 15],
dtype=test_type,
)
)
self.assertProtoEquals(
"""
dtype: DT_UINT4
tensor_shape {
dim {
size: 5
}
}
int_val: 0
int_val: 1
int_val: 7
int_val: 8
int_val: 15
""",
t,
)
def testInt2(self):
test_type = dtypes.int2.as_numpy_dtype
t = tensor_util.make_tensor_proto(
np.array(
[-2, -1, 0, 1],
dtype=test_type,
)
)
self.assertProtoEquals(
"""
dtype: DT_INT2
tensor_shape {
dim {
size: 4
}
}
int_val: -2
int_val: -1
int_val: 0
int_val: 1
""",
t,
)
def testUInt2(self):
test_type = dtypes.uint2.as_numpy_dtype
t = tensor_util.make_tensor_proto(
np.array(
[0, 1, 2, 3],
dtype=test_type,
)
)
self.assertProtoEquals(
"""
dtype: DT_UINT2
tensor_shape {
dim {
size: 4
}
}
int_val: 0
int_val: 1
int_val: 2
int_val: 3
""",
t,
)
def testLargeInt(self):
value = np.iinfo(np.int64).max
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testLargeNegativeInt(self):
# We don't use the min np.int64 value here
# because it breaks np.abs().
#
# np.iinfo(np.int64).min = -9223372036854775808
# np.iinfo(np.int64).max = 9223372036854775807
# np.abs(-9223372036854775808) = -9223372036854775808
value = np.iinfo(np.int64).min + 1
t = tensor_util.make_tensor_proto(value)
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: %d
""" % value, t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.int64, a.dtype)
self.assertAllClose(np.array(value, dtype=np.int64), a)
def testIntNDefaultType(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\000\000\000\n\000\000\000\024\000\000\000\036\000\000\000("
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_INT32
tensor_shape { dim { size: 2 } dim { size: 2 } }
tensor_content: "\n\000\000\000\024\000\000\000\036\000\000\000(\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.int32, a.dtype)
self.assertAllClose(np.array([[10, 20], [30, 40]], dtype=np.int32), a)
@parameterized.named_parameters(
("_int8", dtypes.int8, np.int8), ("_int16", dtypes.int16, np.int16),
("_int32", dtypes.int32, np.int32), ("_int64", dtypes.int64, np.int64),
("_uint8", dtypes.uint8, np.uint8), ("_uint16", dtypes.uint16, np.uint16),
("_uint32", dtypes.uint32, np.uint32),
("_uint64", dtypes.uint64, np.uint64))
def testIntTypes(self, dtype, nptype):
# Test with array.
t = tensor_util.make_tensor_proto([10, 20, 30], dtype=dtype)
self.assertEqual(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEqual(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
# Test with ndarray.
t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype))
self.assertEqual(dtype, t.dtype)
self.assertProtoEquals("dim { size: 3 }", t.tensor_shape)
a = tensor_util.MakeNdarray(t)
self.assertEqual(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
@parameterized.named_parameters(
("_int8", dtypes.int8, np.int8), ("_int16", dtypes.int16, np.int16),
("_int32", dtypes.int32, np.int32), ("_int64", dtypes.int64, np.int64),
("_uint8", dtypes.uint8, np.uint8), ("_uint16", dtypes.uint16, np.uint16),
("_uint32", dtypes.uint32, np.uint32),
("_uint64", dtypes.uint64, np.uint64))
def testIntTypesWithImplicitRepeat(self, dtype, nptype):
self.assertAllEqual(
np.array([[10, 11, 12, 12], [12, 12, 12, 12], [12, 12, 12, 12]],
dtype=nptype),
tensor_util.MakeNdarray(
tensor_util.make_tensor_proto([10, 11, 12],
shape=[3, 4],
dtype=dtype)))
def testIntMixedWithDimension(self):
# Github issue: 11974
dtype = dtypes.int32
nptype = np.int32
t = tensor_util.make_tensor_proto(
[10, tensor_shape.Dimension(20), 30], dtype=dtype)
self.assertEqual(dtype, t.dtype)
a = tensor_util.MakeNdarray(t)
self.assertEqual(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
@parameterized.named_parameters(
("_int64", dtypes.int64, np.int64, "DT_INT64", "int64_val"),
("_uint64", dtypes.uint64, np.uint64, "DT_UINT64", "uint64_val"))
def testLong(self, dtype, nptype, proto_dtype, proto_value_name):
t = tensor_util.make_tensor_proto(10, dtype=dtype)
self.assertProtoEquals(
"""
dtype: %s
tensor_shape {}
%s: 10
""" % (proto_dtype, proto_value_name), t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(nptype, a.dtype)
self.assertAllClose(np.array(10, dtype=nptype), a)
@parameterized.named_parameters(
("_int64", dtypes.int64, np.int64, "DT_INT64"),
("_uint64", dtypes.uint64, np.uint64, "DT_UINT64"))
def testLongN(self, dtype, nptype, proto_dtype):
t = tensor_util.make_tensor_proto([10, 20, 30], shape=[1, 3], dtype=dtype)
if sys.byteorder == "big":
# pylint: disable=line-too-long
self.assertProtoEquals(
r"""
dtype: %s
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""" % proto_dtype, t)
# pylint: enable=line-too-long
else:
# pylint: disable=line-too-long
self.assertProtoEquals(
r"""
dtype: %s
tensor_shape { dim { size: 1 } dim { size: 3 } }
tensor_content: "\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""" % proto_dtype, t)
# pylint: enable=line-too-long
a = tensor_util.MakeNdarray(t)
self.assertEqual(nptype, a.dtype)
self.assertAllClose(np.array([[10, 20, 30]], dtype=nptype), a)
@parameterized.named_parameters(("_int64", np.int64, "DT_INT64"),
("_uint64", np.uint64, "DT_UINT64"))
def testLongNpArray(self, nptype, proto_dtype):
t = tensor_util.make_tensor_proto(np.array([10, 20, 30], dtype=nptype))
if sys.byteorder == "big":
# pylint: disable=line-too-long
self.assertProtoEquals(
r"""
dtype: %s
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\000\000\000\000\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036"
""" % proto_dtype, t)
# pylint: enable=line-too-long
else:
# pylint: disable=line-too-long
self.assertProtoEquals(
r"""
dtype: %s
tensor_shape { dim { size: 3 } }
tensor_content: "\n\000\000\000\000\000\000\000\024\000\000\000\000\000\000\000\036\000\000\000\000\000\000\000"
""" % proto_dtype, t)
# pylint: enable=line-too-long
a = tensor_util.MakeNdarray(t)
self.assertEqual(nptype, a.dtype)
self.assertAllClose(np.array([10, 20, 30], dtype=nptype), a)
def testQuantizedTypes(self):
# Test with array.
data = [(21,), (22,), (23,)]
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint32)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\000\000\000\025\000\000\000\026\000\000\000\027"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_QINT32
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\000\000\026\000\000\000\027\000\000\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(dtypes.qint32.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint8)
self.assertProtoEquals(r"""
dtype: DT_QUINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(dtypes.quint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint8)
self.assertProtoEquals(r"""
dtype: DT_QINT8
tensor_shape { dim { size: 3 } }
tensor_content: "\025\026\027"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(dtypes.qint8.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.quint16)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_QUINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(dtypes.quint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
t = tensor_util.make_tensor_proto(data, dtype=dtypes.qint16)
if sys.byteorder == "big":
self.assertProtoEquals(r"""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\000\025\000\026\000\027"
""", t)
else:
self.assertProtoEquals(r"""
dtype: DT_QINT16
tensor_shape { dim { size: 3 } }
tensor_content: "\025\000\026\000\027\000"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(dtypes.qint16.as_numpy_dtype, a.dtype)
self.assertAllEqual(np.array(data, dtype=a.dtype), a)
def testString(self):
t = tensor_util.make_tensor_proto("foo")
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape {}
string_val: "foo"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.object_, a.dtype)
self.assertEqual([b"foo"], a)
def testStringWithImplicitRepeat(self):
t = tensor_util.make_tensor_proto(["f", "g"], shape=[3, 4])
a = tensor_util.MakeNdarray(t)
self.assertAllEqual(
np.array([[b"f", b"g", b"g", b"g"], [b"g", b"g", b"g", b"g"],
[b"g", b"g", b"g", b"g"]],
dtype=np.object_), a)
def testStringN(self):
t = tensor_util.make_tensor_proto([b"foo", b"bar", b"baz"], shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.object_, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringNpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[b"a", b"ab"], [b"abc", b"abcd"]]))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.object_, a.dtype)
self.assertAllEqual(np.array([[b"a", b"ab"], [b"abc", b"abcd"]]), a)
def testArrayMethod(self):
class Wrapper(object):
def __array__(self, dtype=None):
del dtype
return np.array([b"foo", b"bar", b"baz"])
t = tensor_util.make_tensor_proto(Wrapper(), shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.object_, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testArrayInterface(self):
class Wrapper(object):
def __init__(self):
self.a = np.array([b"foo", b"bar", b"baz"])
@property
def __array_interface__(self):
return self.a.__array_interface__
t = tensor_util.make_tensor_proto(Wrapper(), shape=[1, 3])
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 1 } dim { size: 3 } }
string_val: "foo"
string_val: "bar"
string_val: "baz"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.object_, a.dtype)
self.assertAllEqual(np.array([[b"foo", b"bar", b"baz"]]), a)
def testStringTuple(self):
t = tensor_util.make_tensor_proto((b"a", b"ab", b"abc", b"abcd"))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 4 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.object_, a.dtype)
self.assertAllEqual(np.array((b"a", b"ab", b"abc", b"abcd")), a)
def testStringNestedTuple(self):
t = tensor_util.make_tensor_proto(((b"a", b"ab"), (b"abc", b"abcd")))
self.assertProtoEquals("""
dtype: DT_STRING
tensor_shape { dim { size: 2 } dim { size: 2 } }
string_val: "a"
string_val: "ab"
string_val: "abc"
string_val: "abcd"
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.object_, a.dtype)
self.assertAllEqual(np.array(((b"a", b"ab"), (b"abc", b"abcd"))), a)
def testComplex64(self):
t = tensor_util.make_tensor_proto((1 + 2j), dtype=dtypes.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape {}
scomplex_val: 1
scomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.complex64, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplex128(self):
t = tensor_util.make_tensor_proto((1 + 2j), dtype=dtypes.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape {}
dcomplex_val: 1
dcomplex_val: 2
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.complex128, a.dtype)
self.assertAllEqual(np.array(1 + 2j), a)
def testComplexWithImplicitRepeat(self):
for dtype, np_dtype in [(dtypes.complex64, np.complex64),
(dtypes.complex128, np.complex128)]:
t = tensor_util.make_tensor_proto((1 + 1j), shape=[3, 4], dtype=dtype)
a = tensor_util.MakeNdarray(t)
self.assertAllClose(
np.array(
[[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)],
[(1 + 1j), (1 + 1j), (1 + 1j), (1 + 1j)]],
dtype=np_dtype),
a)
def testComplex64N(self):
t = tensor_util.make_tensor_proto(
[(1 + 2j), (3 + 4j), (5 + 6j)], shape=[1, 3], dtype=dtypes.complex64)
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 1 } dim { size: 3 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.complex64, a.dtype)
self.assertAllEqual(np.array([[(1 + 2j), (3 + 4j), (5 + 6j)]]), a)
def testComplex128N(self):
t = tensor_util.make_tensor_proto(
[(1 + 2j), (3 + 4j), (5 + 6j)], shape=[1, 3], dtype=dtypes.complex128)
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 1 } dim { size: 3 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.complex128, a.dtype)
self.assertAllEqual(np.array([[(1 + 2j), (3 + 4j), (5 + 6j)]]), a)
def testComplex64NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]),
dtype=dtypes.complex64)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX64
tensor_shape { dim { size: 2 } dim { size: 2 } }
scomplex_val: 1
scomplex_val: 2
scomplex_val: 3
scomplex_val: 4
scomplex_val: 5
scomplex_val: 6
scomplex_val: 7
scomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.complex64, a.dtype)
self.assertAllEqual(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]), a)
def testComplex128NpArray(self):
t = tensor_util.make_tensor_proto(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]),
dtype=dtypes.complex128)
# scomplex_val are real_0, imag_0, real_1, imag_1, ...
self.assertProtoEquals("""
dtype: DT_COMPLEX128
tensor_shape { dim { size: 2 } dim { size: 2 } }
dcomplex_val: 1
dcomplex_val: 2
dcomplex_val: 3
dcomplex_val: 4
dcomplex_val: 5
dcomplex_val: 6
dcomplex_val: 7
dcomplex_val: 8
""", t)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.complex128, a.dtype)
self.assertAllEqual(
np.array([[(1 + 2j), (3 + 4j)], [(5 + 6j), (7 + 8j)]]), a)
def testNestedNumpyArrayWithoutDType(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, np.array(30.0)])
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testNestedNumpyArrayWithDType(self):
t = tensor_util.make_tensor_proto([10.0, 20.0, np.array(30.0)],
dtype=dtypes.float32)
a = tensor_util.MakeNdarray(t)
self.assertEqual(np.float32, a.dtype)
self.assertAllClose(np.array([10.0, 20.0, 30.0], dtype=np.float32), a)
def testUnsupportedDTypes(self):
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(np.array([1]), 0)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(3, dtype=dtypes.qint8)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto([3], dtype=dtypes.qint8)
# Validate the helpful error message when trying to convert an
# unconvertible list as strings.
with self.assertRaisesRegex(TypeError, "Failed to convert elements"):
tensor_util.make_tensor_proto([tensor_shape.Dimension(1)])
def testTensorShapeVerification(self):
array = np.array([[1], [2]])
correct_shape = (2, 1)
incorrect_shape = (1, 2)
tensor_util.make_tensor_proto(array, shape=correct_shape, verify_shape=True)
with self.assertRaises(TypeError):
tensor_util.make_tensor_proto(
array, shape=incorrect_shape, verify_shape=True)
def testShapeTooLarge(self):
with self.assertRaises(ValueError):
tensor_util.make_tensor_proto(np.array([1, 2]), shape=[1])
def testLowRankSupported(self):
t = tensor_util.make_tensor_proto(np.array(7))
self.assertProtoEquals("""
dtype: DT_INT64
tensor_shape {}
int64_val: 7
""", t)
def testShapeEquals(self):
t = tensor_util.make_tensor_proto([10, 20, 30, 40], shape=[2, 2])
self.assertTrue(tensor_util.ShapeEquals(t, [2, 2]))
self.assertTrue(tensor_util.ShapeEquals(t, (2, 2)))
self.assertTrue(
tensor_util.ShapeEquals(t, tensor_shape.as_shape([2, 2]).as_proto()))
self.assertFalse(tensor_util.ShapeEquals(t, [5, 3]))
self.assertFalse(tensor_util.ShapeEquals(t, [1, 4]))
self.assertFalse(tensor_util.ShapeEquals(t, [4]))
self.assertFalse(tensor_util.ShapeEquals(t, [2]))
self.assertFalse(tensor_util.ShapeEquals(t, []))
self.assertFalse(tensor_util.ShapeEquals(t, [2, 2, 1]))
@test_util.run_all_in_graph_and_eager_modes
| TensorUtilTest |
python | huggingface__transformers | src/transformers/models/x_clip/modeling_x_clip.py | {
"start": 17613,
"end": 21018
} | class ____(GradientCheckpointingLayer):
"""
This corresponds to the `CrossFramelAttentionBlock` class in the original implementation.
"""
def __init__(self, config: XCLIPConfig):
super().__init__()
self.num_frames = config.num_frames
self.embed_dim = config.hidden_size
self.message_fc = nn.Linear(self.embed_dim, self.embed_dim)
self.message_ln = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.message_attn = XCLIPAttention(config)
self.drop_path = XCLIPDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
self.self_attn = XCLIPAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = XCLIPMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
causal_attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
batch_time, seq_length, hidden_size = hidden_states.size()
batch_size = batch_time // self.num_frames
msg_token = self.message_fc(hidden_states[:, 0, :])
msg_token = msg_token.view(batch_size, self.num_frames, hidden_size)
msg_token = msg_token + self.drop_path(self.message_attn(self.message_ln(msg_token))[0])
# add dummy sequence dimension
msg_token = msg_token.view(-1, 1, hidden_size)
hidden_states = torch.cat([hidden_states, msg_token], dim=1)
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
hidden_states = hidden_states[:, :seq_length, :]
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
@auto_docstring
| XCLIPVisionEncoderLayer |
python | scikit-learn__scikit-learn | sklearn/ensemble/_forest.py | {
"start": 73657,
"end": 89347
} | class ____(ForestClassifier):
"""
An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and uses averaging to improve the predictive accuracy
and control over-fitting.
This estimator has native support for missing values (NaNs) for
random splits. During training, a random threshold will be chosen
to split the non-missing values on. Then the non-missing values will be sent
to the left and right child based on the randomly selected threshold, while
the missing values will also be randomly sent to the left or right child.
This is repeated for every feature considered at each split. The best split
among these is chosen.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
.. versionchanged:: 0.22
The default value of ``n_estimators`` changed from 10 to 100
in 0.22.
criterion : {"gini", "entropy", "log_loss"}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "log_loss" and "entropy" both for the
Shannon information gain, see :ref:`tree_mathematical_formulation`.
Note: This parameter is tree-specific.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for fractions.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for fractions.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {"sqrt", "log2", None}, int or float, default="sqrt"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`max(1, int(max_features * n_features_in_))` features are considered at each
split.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
.. versionchanged:: 1.1
The default of `max_features` changed from `"auto"` to `"sqrt"`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
.. versionadded:: 0.19
bootstrap : bool, default=False
Whether bootstrap samples are used when building trees. If False, the
whole dataset is used to build each tree.
oob_score : bool or callable, default=False
Whether to use out-of-bag samples to estimate the generalization score.
By default, :func:`~sklearn.metrics.accuracy_score` is used.
Provide a callable with signature `metric(y_true, y_pred)` to use a
custom metric. Only available if `bootstrap=True`.
For an illustration of out-of-bag (OOB) error estimation, see the example
:ref:`sphx_glr_auto_examples_ensemble_plot_ensemble_oob.py`.
n_jobs : int, default=None
The number of jobs to run in parallel. :meth:`fit`, :meth:`predict`,
:meth:`decision_path` and :meth:`apply` are all parallelized over the
trees. ``None`` means 1 unless in a :obj:`joblib.parallel_backend`
context. ``-1`` means using all processors. See :term:`Glossary
<n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls 3 sources of randomness:
- the bootstrapping of the samples used when building trees
(if ``bootstrap=True``)
- the sampling of the features to consider when looking for the best
split at each node (if ``max_features < n_features``)
- the draw of the splits for each of the `max_features`
See :term:`Glossary <random_state>` for details.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`Glossary <warm_start>` and
:ref:`tree_ensemble_warm_start` for details.
class_weight : {"balanced", "balanced_subsample"}, dict or list of dicts, \
default=None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed. See
:ref:`minimal_cost_complexity_pruning` for details. See
:ref:`sphx_glr_auto_examples_tree_plot_cost_complexity_pruning.py`
for an example of such pruning.
.. versionadded:: 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples. Thus,
`max_samples` should be in the interval `(0.0, 1.0]`.
.. versionadded:: 0.22
monotonic_cst : array-like of int of shape (n_features), default=None
Indicates the monotonicity constraint to enforce on each feature.
- 1: monotonically increasing
- 0: no constraint
- -1: monotonically decreasing
If monotonic_cst is None, no constraints are applied.
Monotonicity constraints are not supported for:
- multiclass classifications (i.e. when `n_classes > 2`),
- multioutput classifications (i.e. when `n_outputs_ > 1`),
- classifications trained on data with missing values.
The constraints hold over the probability of the positive class.
Read more in the :ref:`User Guide <monotonic_cst_gbdt>`.
.. versionadded:: 1.4
Attributes
----------
estimator_ : :class:`~sklearn.tree.ExtraTreeClassifier`
The child estimator template used to create the collection of fitted
sub-estimators.
.. versionadded:: 1.2
`base_estimator_` was renamed to `estimator_`.
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : ndarray of shape (n_classes,) or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
This attribute exists only when ``oob_score`` is True.
oob_decision_function_ : ndarray of shape (n_samples, n_classes) or \
(n_samples, n_classes, n_outputs)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN. This attribute exists
only when ``oob_score`` is True.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by an array of the indices selected.
.. versionadded:: 1.4
See Also
--------
ExtraTreesRegressor : An extra-trees regressor with random splits.
RandomForestClassifier : A random forest classifier with optimal splits.
RandomForestRegressor : Ensemble regressor using trees with optimal splits.
Notes
-----
The default values for the parameters controlling the size of the trees
(e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
unpruned trees which can potentially be very large on some data sets. To
reduce memory consumption, the complexity and size of the trees should be
controlled by setting those parameter values.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized
trees", Machine Learning, 63(1), 3-42, 2006.
Examples
--------
>>> from sklearn.ensemble import ExtraTreesClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_features=4, random_state=0)
>>> clf = ExtraTreesClassifier(n_estimators=100, random_state=0)
>>> clf.fit(X, y)
ExtraTreesClassifier(random_state=0)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
"""
_parameter_constraints: dict = {
**ForestClassifier._parameter_constraints,
**DecisionTreeClassifier._parameter_constraints,
"class_weight": [
StrOptions({"balanced_subsample", "balanced"}),
dict,
list,
None,
],
}
_parameter_constraints.pop("splitter")
def __init__(
self,
n_estimators=100,
*,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features="sqrt",
max_leaf_nodes=None,
min_impurity_decrease=0.0,
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
ccp_alpha=0.0,
max_samples=None,
monotonic_cst=None,
):
super().__init__(
estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=(
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"min_weight_fraction_leaf",
"max_features",
"max_leaf_nodes",
"min_impurity_decrease",
"random_state",
"ccp_alpha",
"monotonic_cst",
),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight,
max_samples=max_samples,
)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_decrease = min_impurity_decrease
self.ccp_alpha = ccp_alpha
self.monotonic_cst = monotonic_cst
| ExtraTreesClassifier |
python | getsentry__sentry | src/sentry/replays/consumers/recording.py | {
"start": 1414,
"end": 8271
} | class ____(ProcessingStrategyFactory[KafkaPayload]):
def __init__(
self,
input_block_size: int | None,
max_batch_size: int,
max_batch_time: int,
num_processes: int,
output_block_size: int | None,
num_threads: int = 4, # Defaults to 4 for self-hosted.
force_synchronous: bool = False, # Force synchronous runner (only used in test suite).
max_pending_futures: int = 100,
) -> None:
# For information on configuring this consumer refer to this page:
# https://getsentry.github.io/arroyo/strategies/run_task_with_multiprocessing.html
self.input_block_size = input_block_size
self.max_batch_size = max_batch_size
self.max_batch_time = max_batch_time
self.num_processes = num_processes
self.num_threads = num_threads
self.output_block_size = output_block_size
self.force_synchronous = force_synchronous
self.max_pending_futures = max_pending_futures
def create_with_partitions(
self,
commit: Commit,
partitions: Mapping[Partition, int],
) -> ProcessingStrategy[KafkaPayload]:
has_sent_replays_cache: AutoCache[int, bool] | None = None
options_cache: AutoCache[int, tuple[bool, bool]] | None = None
if options.get("replay.consumer.enable_new_query_caching_system"):
has_sent_replays_cache = make_has_sent_replays_cache()
options_cache = make_options_cache()
context: ProcessorContext = {
"has_sent_replays_cache": has_sent_replays_cache,
"options_cache": options_cache,
}
return RunTaskInThreads(
processing_function=lambda msg: process_and_commit_message(msg, context),
concurrency=self.num_threads,
max_pending_futures=self.max_pending_futures,
next_step=CommitOffsets(commit),
)
def process_and_commit_message(message: Message[KafkaPayload], context: ProcessorContext) -> None:
isolation_scope = sentry_sdk.get_isolation_scope().fork()
with sentry_sdk.scope.use_isolation_scope(isolation_scope):
with sentry_sdk.start_transaction(
name="replays.consumer.recording.process_and_commit_message",
op="replays.consumer.recording.process_and_commit_message",
custom_sampling_context={
"sample_rate": getattr(
settings, "SENTRY_REPLAY_RECORDINGS_CONSUMER_APM_SAMPLING", 0
)
},
):
processed_message = process_message(message.payload.value)
if processed_message:
commit_message(processed_message, context)
# Processing Task
@sentry_sdk.trace
def process_message(message: bytes) -> ProcessedEvent | None:
try:
recording_event = parse_recording_event(message)
set_tag("org_id", recording_event["context"]["org_id"])
set_tag("project_id", recording_event["context"]["project_id"])
return process_recording_event(
recording_event,
use_new_recording_parser=options.get("replay.consumer.msgspec_recording_parser"),
)
except DropSilently:
return None
except Exception:
logger.exception("Failed to process replay recording message.")
return None
@sentry_sdk.trace
def parse_recording_event(message: bytes) -> Event:
recording = parse_request_message(message)
segment_id, payload = parse_headers(cast(bytes, recording["payload"]), recording["replay_id"])
compressed, decompressed = decompress_segment(payload)
replay_event_json = recording.get("replay_event")
if replay_event_json:
replay_event = json.loads(cast(bytes, replay_event_json))
else:
# Check if any events are not present in the pipeline. We need
# to know because we want to write to Snuba from here soon.
metrics.incr("sentry.replays.consumer.recording.missing-replay-event")
replay_event = None
replay_video_raw = recording.get("replay_video")
if replay_video_raw is not None:
replay_video = cast(bytes, replay_video_raw)
else:
replay_video = None
relay_snuba_publish_disabled = recording.get("relay_snuba_publish_disabled", False)
# No matter what value we receive "True" is the only value that can influence our behavior.
# Otherwise we default to "False" which means our consumer does nothing. Its only when Relay
# reports that it has disabled itself that we publish to the Snuba consumer. Any other value
# is invalid and means we should _not_ publish to Snuba.
if relay_snuba_publish_disabled is not True:
relay_snuba_publish_disabled = False
return {
"context": {
"key_id": recording.get("key_id"),
"org_id": recording["org_id"],
"project_id": recording["project_id"],
"received": recording["received"],
"replay_id": recording["replay_id"],
"retention_days": recording["retention_days"],
"segment_id": segment_id,
"should_publish_replay_event": relay_snuba_publish_disabled,
},
"payload_compressed": compressed,
"payload": decompressed,
"replay_event": replay_event,
"replay_video": replay_video,
}
@sentry_sdk.trace
def parse_request_message(message: bytes) -> ReplayRecording:
try:
return RECORDINGS_CODEC.decode(message)
except ValidationError:
logger.exception("Could not decode recording message.")
raise DropSilently()
@sentry_sdk.trace
def decompress_segment(segment: bytes) -> tuple[bytes, bytes]:
try:
return (segment, zlib.decompress(segment))
except zlib.error:
if segment and segment[0] == ord("["):
return (zlib.compress(segment), segment)
else:
logger.exception("Invalid recording body.")
raise DropSilently()
@sentry_sdk.trace
def parse_headers(recording: bytes, replay_id: str) -> tuple[int, bytes]:
try:
recording_headers_json, recording_segment = recording.split(b"\n", 1)
return int(json.loads(recording_headers_json)["segment_id"]), recording_segment
except Exception:
logger.exception("Recording headers could not be extracted %s", replay_id)
raise DropSilently()
# I/O Task
@sentry_sdk.trace
def commit_message(message: ProcessedEvent, context: ProcessorContext) -> None:
try:
commit_recording_message(message, context)
track_recording_metadata(message)
return None
except GCS_RETRYABLE_ERRORS:
raise
except DropEvent:
return None
except Exception:
logger.exception("Failed to commit replay recording message.")
return None
| ProcessReplayRecordingStrategyFactory |
python | scikit-learn__scikit-learn | sklearn/tests/metadata_routing_common.py | {
"start": 12313,
"end": 14172
} | class ____(TransformerMixin, BaseEstimator):
"""A transformer which accepts metadata on fit and transform.
Parameters
----------
registry : list, default=None
If a list, the estimator will append itself to the list in order to have
a reference to the estimator later on. Since that reference is not
required in all tests, registration can be skipped by leaving this value
as None.
"""
def __init__(self, registry=None):
self.registry = registry
def fit(self, X, y=None, sample_weight="default", metadata="default"):
if self.registry is not None:
self.registry.append(self)
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
self.fitted_ = True
return self
def transform(self, X, sample_weight="default", metadata="default"):
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
return X + 1
def fit_transform(self, X, y, sample_weight="default", metadata="default"):
# implementing ``fit_transform`` is necessary since
# ``TransformerMixin.fit_transform`` doesn't route any metadata to
# ``transform``, while here we want ``transform`` to receive
# ``sample_weight`` and ``metadata``.
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
return self.fit(X, y, sample_weight=sample_weight, metadata=metadata).transform(
X, sample_weight=sample_weight, metadata=metadata
)
def inverse_transform(self, X, sample_weight=None, metadata=None):
record_metadata_not_default(
self, sample_weight=sample_weight, metadata=metadata
)
return X - 1
| ConsumingTransformer |
python | encode__django-rest-framework | tests/test_testing.py | {
"start": 14249,
"end": 14454
} | class ____(TestCase):
def test_urlpatterns(self):
# sanity test to ensure that this test module does not have a '/' route
assert self.client.get('/').status_code == 404
| TestExistingPatterns |
python | ray-project__ray | rllib/utils/filter.py | {
"start": 15251,
"end": 17209
} | class ____(MeanStdFilter):
is_concurrent = True
def __init__(self, *args, **kwargs):
super(ConcurrentMeanStdFilter, self).__init__(*args, **kwargs)
deprecation_warning(
old="ConcurrentMeanStdFilter",
error=False,
help="ConcurrentMeanStd filters are only used for testing and will "
"therefore be deprecated in the course of moving to the "
"Connetors API, where testing of filters will be done by other "
"means.",
)
self._lock = threading.RLock()
def lock_wrap(func):
def wrapper(*args, **kwargs):
with self._lock:
return func(*args, **kwargs)
return wrapper
self.__getattribute__ = lock_wrap(self.__getattribute__)
def as_serializable(self) -> "MeanStdFilter":
"""Returns non-concurrent version of current class"""
other = MeanStdFilter(self.shape)
other.sync(self)
return other
def copy(self) -> "ConcurrentMeanStdFilter":
"""Returns a copy of Filter."""
other = ConcurrentMeanStdFilter(self.shape)
other.sync(self)
return other
def __repr__(self) -> str:
return "ConcurrentMeanStdFilter({}, {}, {}, {}, {}, {})".format(
self.shape,
self.demean,
self.destd,
self.clip,
self.running_stats,
self.buffer,
)
@OldAPIStack
def get_filter(filter_config, shape):
if filter_config == "MeanStdFilter":
return MeanStdFilter(shape, clip=None)
elif filter_config == "ConcurrentMeanStdFilter":
return ConcurrentMeanStdFilter(shape, clip=None)
elif filter_config == "NoFilter":
return NoFilter()
elif callable(filter_config):
return filter_config(shape)
else:
raise Exception("Unknown observation_filter: " + str(filter_config))
| ConcurrentMeanStdFilter |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/emr.py | {
"start": 1657,
"end": 4376
} | class ____(AwsBaseSensor[EmrHook]):
"""
Contains general sensor behavior for EMR.
Subclasses should implement following methods:
- ``get_emr_response()``
- ``state_from_response()``
- ``failure_message_from_response()``
Subclasses should set ``target_states`` and ``failed_states`` fields.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.h
"""
aws_hook_class = EmrHook
ui_color = "#66c3ff"
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.target_states: Iterable[str] = [] # will be set in subclasses
self.failed_states: Iterable[str] = [] # will be set in subclasses
def poke(self, context: Context):
response = self.get_emr_response(context=context)
if response["ResponseMetadata"]["HTTPStatusCode"] != 200:
self.log.info("Bad HTTP response: %s", response)
return False
state = self.state_from_response(response)
self.log.info("Job flow currently %s", state)
if state in self.target_states:
return True
if state in self.failed_states:
raise AirflowException(f"EMR job failed: {self.failure_message_from_response(response)}")
return False
def get_emr_response(self, context: Context) -> dict[str, Any]:
"""
Make an API call with boto3 and get response.
:return: response
"""
raise NotImplementedError("Please implement get_emr_response() in subclass")
@staticmethod
def state_from_response(response: dict[str, Any]) -> str:
"""
Get state from boto3 response.
:param response: response from AWS API
:return: state
"""
raise NotImplementedError("Please implement state_from_response() in subclass")
@staticmethod
def failure_message_from_response(response: dict[str, Any]) -> str | None:
"""
Get state from boto3 response.
:param response: response from AWS API
:return: failure message
"""
raise NotImplementedError("Please implement failure_message_from_response() in subclass")
| EmrBaseSensor |
python | wandb__wandb | wandb/vendor/pygments/lexers/shell.py | {
"start": 6229,
"end": 6753
} | class ____(ShellSessionBaseLexer):
"""
Lexer for simplistic shell sessions.
.. versionadded:: 1.1
"""
name = 'Bash Session'
aliases = ['console', 'shell-session']
filenames = ['*.sh-session', '*.shell-session']
mimetypes = ['application/x-shell-session', 'application/x-sh-session']
_innerLexerCls = BashLexer
_ps1rgx = \
r'^((?:(?:\[.*?\])|(?:\(\S+\))?(?:| |sh\S*?|\w+\S+[@:]\S+(?:\s+\S+)' \
r'?|\[\S+[@:][^\n]+\].+))\s*[$#%])(.*\n?)'
_ps2 = '>'
| BashSessionLexer |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 126466,
"end": 126658
} | class ____:
xlWebFormattingAll = 1 # from enum XlWebFormatting
xlWebFormattingNone = 3 # from enum XlWebFormatting
xlWebFormattingRTF = 2 # from enum XlWebFormatting
| WebFormatting |
python | kamyu104__LeetCode-Solutions | Python/top-k-frequent-words.py | {
"start": 1631,
"end": 2675
} | class ____(object):
def topKFrequent(self, words, k):
"""
:type words: List[str]
:type k: int
:rtype: List[str]
"""
class MinHeapObj(object):
def __init__(self,val):
self.val = val
def __lt__(self,other):
return self.val[1] > other.val[1] if self.val[0] == other.val[0] else \
self.val < other.val
def __eq__(self,other):
return self.val == other.val
def __str__(self):
return str(self.val)
counts = collections.Counter(words)
min_heap = []
for word, count in counts.iteritems():
heapq.heappush(min_heap, MinHeapObj((count, word)))
if len(min_heap) == k+1:
heapq.heappop(min_heap)
result = []
while min_heap:
result.append(heapq.heappop(min_heap).val[1])
return result[::-1]
# Time: O(n + klogk) ~ O(n + nlogn)
# Space: O(n)
# Bucket Sort Solution
| Solution2 |
python | django__django | tests/serializers/models/data.py | {
"start": 920,
"end": 999
} | class ____(models.Model):
data = models.DateTimeField(null=True)
| DateTimeData |
python | pytest-dev__pytest-django | tests/test_unittest.py | {
"start": 4262,
"end": 13123
} | class ____:
"Test that setup/teardown methods of unittests are being called."
def test_django(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module(
"""
from django.test import TestCase
class TestFoo(TestCase):
@classmethod
def setUpClass(self):
print('\\nCALLED: setUpClass')
def setUp(self):
print('\\nCALLED: setUp')
def tearDown(self):
print('\\nCALLED: tearDown')
@classmethod
def tearDownClass(self):
print('\\nCALLED: tearDownClass')
def test_pass(self):
pass
"""
)
result = django_pytester.runpytest_subprocess("-v", "-s")
result.stdout.fnmatch_lines(
[
"CALLED: setUpClass",
"CALLED: setUp",
"CALLED: tearDown",
"PASSED*",
"CALLED: tearDownClass",
]
)
assert result.ret == 0
def test_setUpClass_not_being_a_classmethod(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module(
"""
from django.test import TestCase
class TestFoo(TestCase):
def setUpClass(self):
pass
def test_pass(self):
pass
"""
)
result = django_pytester.runpytest_subprocess("-v", "-s")
expected_lines = [
"* ERROR at setup of TestFoo.test_pass *",
"E * TypeError: *",
]
result.stdout.fnmatch_lines(expected_lines)
assert result.ret == 1
def test_setUpClass_multiple_subclasses(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module(
"""
from django.test import TestCase
class TestFoo(TestCase):
@classmethod
def setUpClass(cls):
super(TestFoo, cls).setUpClass()
def test_shared(self):
pass
class TestBar(TestFoo):
def test_bar1(self):
pass
class TestBar2(TestFoo):
def test_bar21(self):
pass
"""
)
result = django_pytester.runpytest_subprocess("-v")
result.stdout.fnmatch_lines(
[
"*TestFoo::test_shared PASSED*",
"*TestBar::test_bar1 PASSED*",
"*TestBar::test_shared PASSED*",
"*TestBar2::test_bar21 PASSED*",
"*TestBar2::test_shared PASSED*",
]
)
assert result.ret == 0
def test_setUpClass_mixin(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module(
"""
from django.test import TestCase
class TheMixin:
@classmethod
def setUpClass(cls):
super(TheMixin, cls).setUpClass()
class TestFoo(TheMixin, TestCase):
def test_foo(self):
pass
class TestBar(TheMixin, TestCase):
def test_bar(self):
pass
"""
)
result = django_pytester.runpytest_subprocess("-v")
result.stdout.fnmatch_lines(["*TestFoo::test_foo PASSED*", "*TestBar::test_bar PASSED*"])
assert result.ret == 0
def test_setUpClass_skip(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module(
"""
from django.test import TestCase
import pytest
class TestFoo(TestCase):
@classmethod
def setUpClass(cls):
if cls is TestFoo:
raise pytest.skip("Skip base class")
super(TestFoo, cls).setUpClass()
def test_shared(self):
pass
class TestBar(TestFoo):
def test_bar1(self):
pass
class TestBar2(TestFoo):
def test_bar21(self):
pass
"""
)
result = django_pytester.runpytest_subprocess("-v")
result.stdout.fnmatch_lines(
[
"*TestFoo::test_shared SKIPPED*",
"*TestBar::test_bar1 PASSED*",
"*TestBar::test_shared PASSED*",
"*TestBar2::test_bar21 PASSED*",
"*TestBar2::test_shared PASSED*",
]
)
assert result.ret == 0
def test_multi_inheritance_setUpClass(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module(
"""
from django.test import TestCase
# Using a mixin is a regression test, see #280 for more details:
# https://github.com/pytest-dev/pytest-django/issues/280
class SomeMixin:
pass
class TestA(SomeMixin, TestCase):
expected_state = ['A']
state = []
@classmethod
def setUpClass(cls):
super(TestA, cls).setUpClass()
cls.state.append('A')
@classmethod
def tearDownClass(cls):
assert cls.state.pop() == 'A'
super(TestA, cls).tearDownClass()
def test_a(self):
assert self.state == self.expected_state
class TestB(TestA):
expected_state = ['A', 'B']
@classmethod
def setUpClass(cls):
super(TestB, cls).setUpClass()
cls.state.append('B')
@classmethod
def tearDownClass(cls):
assert cls.state.pop() == 'B'
super(TestB, cls).tearDownClass()
def test_b(self):
assert self.state == self.expected_state
class TestC(TestB):
expected_state = ['A', 'B', 'C']
@classmethod
def setUpClass(cls):
super(TestC, cls).setUpClass()
cls.state.append('C')
@classmethod
def tearDownClass(cls):
assert cls.state.pop() == 'C'
super(TestC, cls).tearDownClass()
def test_c(self):
assert self.state == self.expected_state
"""
)
result = django_pytester.runpytest_subprocess("-vvvv", "-s")
assert result.parseoutcomes()["passed"] == 6
assert result.ret == 0
def test_unittest(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module(
"""
from unittest import TestCase
class TestFoo(TestCase):
@classmethod
def setUpClass(self):
print('\\nCALLED: setUpClass')
def setUp(self):
print('\\nCALLED: setUp')
def tearDown(self):
print('\\nCALLED: tearDown')
@classmethod
def tearDownClass(self):
print('\\nCALLED: tearDownClass')
def test_pass(self):
pass
"""
)
result = django_pytester.runpytest_subprocess("-v", "-s")
result.stdout.fnmatch_lines(
[
"CALLED: setUpClass",
"CALLED: setUp",
"CALLED: tearDown",
"PASSED*",
"CALLED: tearDownClass",
]
)
assert result.ret == 0
def test_setUpClass_leaf_but_not_in_dunder_dict(self, django_pytester: DjangoPytester) -> None:
django_pytester.create_test_module(
"""
from django.test import testcases
class CMSTestCase(testcases.TestCase):
pass
class FooBarTestCase(testcases.TestCase):
@classmethod
def setUpClass(cls):
print('FooBarTestCase.setUpClass')
super(FooBarTestCase, cls).setUpClass()
class TestContact(CMSTestCase, FooBarTestCase):
def test_noop(self):
print('test_noop')
"""
)
result = django_pytester.runpytest_subprocess("-q", "-s")
result.stdout.fnmatch_lines(["*FooBarTestCase.setUpClass*", "*test_noop*", "1 passed*"])
assert result.ret == 0
| TestUnittestMethods |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.