language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pennersr__django-allauth | allauth/headless/contrib/ninja/security.py | {
"start": 372,
"end": 1246
} | class ____(AuthBase):
"""
This security class uses the X-Session-Token that django-allauth
is using for authentication purposes.
"""
openapi_type: str = "apiKey"
def __call__(self, request: HttpRequest):
token = self.get_session_token(request)
if token:
user_session = authenticate_by_x_session_token(token)
if user_session:
return user_session[0]
return None
def get_session_token(self, request: HttpRequest) -> typing.Optional[str]:
"""
Returns the session token for the given request, by looking up the
``X-Session-Token`` header. Override this if you want to extract the token
from e.g. the ``Authorization`` header.
"""
return request.headers.get("X-Session-Token")
x_session_token_auth = XSessionTokenAuth()
| XSessionTokenAuth |
python | huggingface__transformers | tests/models/opt/test_modeling_opt.py | {
"start": 16191,
"end": 23541
} | class ____(unittest.TestCase):
@property
def prompts(self):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def test_generation_pre_attn_layer_norm(self):
model_id = "facebook/opt-125m"
EXPECTED_OUTPUTS = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
predicted_outputs = []
tokenizer = GPT2Tokenizer.from_pretrained(model_id)
model = OPTForCausalLM.from_pretrained(model_id)
for prompt in self.prompts:
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
generated_ids = model.generate(input_ids, max_length=10)
generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
predicted_outputs += generated_string
self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS)
def test_batch_generation(self):
model_id = "facebook/opt-350m"
tokenizer = GPT2Tokenizer.from_pretrained(model_id)
model = OPTForCausalLM.from_pretrained(model_id)
model.to(torch_device)
tokenizer.padding_side = "left"
# use different length sentences to test batching
sentences = [
"Hello, my dog is a little",
"Today, I",
]
inputs = tokenizer(sentences, return_tensors="pt", padding=True)
input_ids = inputs["input_ids"].to(torch_device)
outputs = model.generate(
input_ids=input_ids,
attention_mask=inputs["attention_mask"].to(torch_device),
)
inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device)
output_non_padded = model.generate(input_ids=inputs_non_padded)
num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().item()
inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device)
output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings)
batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True)
non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True)
padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True)
expected_output_sentence = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(expected_output_sentence, batch_out_sentence)
self.assertListEqual(batch_out_sentence, [non_padded_sentence, padded_sentence])
def test_generation_post_attn_layer_norm(self):
model_id = "facebook/opt-350m"
EXPECTED_OUTPUTS = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
predicted_outputs = []
tokenizer = GPT2Tokenizer.from_pretrained(model_id)
model = OPTForCausalLM.from_pretrained(model_id)
for prompt in self.prompts:
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
generated_ids = model.generate(input_ids, max_length=10)
generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
predicted_outputs += generated_string
self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS)
@require_torch_accelerator
@require_torch_fp16
def test_batched_nan_fp16(self):
# a bug manifested starting at models facebook/opt-1.3 and larger when running batched generations,
# therefore not using a tiny model, but the smallest model the problem was seen with which is opt-1.3b.
# please refer to this github thread: https://github.com/huggingface/transformers/pull/17437 for more details
model_name = "facebook/opt-1.3b"
tokenizer = GPT2Tokenizer.from_pretrained(model_name, use_fast=False, padding_side="left")
model = OPTForCausalLM.from_pretrained(model_name, dtype=torch.float16, use_cache=True).to(torch_device)
model = model.eval()
batch = tokenizer(["Who are you?", "Joe Biden is the president of"], padding=True, return_tensors="pt")
input_ids = batch["input_ids"].to(torch_device)
attention_mask = batch["attention_mask"].to(torch_device)
with torch.no_grad():
outputs = model(input_ids, attention_mask=attention_mask)
self.assertFalse(
torch.isnan(outputs.logits[0]).any().item()
) # the first logits could contain NaNs if it fails
# TODO joao, manuel: remove this in v4.62.0
@slow
def test_contrastive_search_opt(self):
article = (
"A chat between a curious human and the Statue of Liberty.\n\nHuman: What is your name?\nStatue: I am the "
"Statue of Liberty.\nHuman: Where do you live?\nStatue: New York City.\nHuman: How long have you lived "
"there?"
)
opt_tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-1.3b")
opt_model = OPTForCausalLM.from_pretrained("facebook/opt-1.3b").to(torch_device)
input_ids = opt_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
outputs = opt_model.generate(
input_ids,
penalty_alpha=0.6,
top_k=5,
max_length=256,
trust_remote_code=True,
custom_generate="transformers-community/contrastive-search",
)
generated_text = opt_tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"A chat between a curious human and the Statue of Liberty.\n\nHuman: What is your name?\nStatue: I "
"am the Statue of Liberty.\nHuman: Where do you live?\nStatue: New York City.\nHuman: How long have "
"you lived there?\nStatue: A hundred years.\nHuman: And you’re from what country?\nStatue: The United "
"States of America.\nHuman: Why did you come to America?\nStatue: I came to escape the tyranny of my "
"country.\nHuman: What tyranny?\nStatue: They didn’t let me speak my mind.\nHuman: What was your "
"country?\nStatue: It was a country of immigrants.\nHuman: Who were the immigrants?\nStatue: They "
"were from all over the world.\nHuman: What language did they speak?\nStatue: French, Spanish, "
"Italian, German, English—you name it.\nHuman: And where did they come from?\nStatue: They came from "
"every country in the world.\nHuman: And you were born in what country?\nStatue: I was born in "
"France.\nHuman: And your parents were French?\nStatue"
],
)
| OPTGenerationTest |
python | huggingface__transformers | src/transformers/models/esm/configuration_esm.py | {
"start": 6391,
"end": 14016
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ESMModel`]. It is used to instantiate a ESM model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the ESM
[facebook/esm-1b](https://huggingface.co/facebook/esm-1b) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*):
Vocabulary size of the ESM model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`ESMModel`].
mask_token_id (`int`, *optional*):
The index of the mask token in the vocabulary. This must be included in the config because of the
"mask-dropout" scaling trick, which will scale the inputs depending on the number of masked tokens.
pad_token_id (`int`, *optional*):
The index of the padding token in the vocabulary. This must be included in the config because certain parts
of the ESM code use this instead of the attention mask.
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 1026):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose either `"absolute"` or "rotary"`.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
emb_layer_norm_before (`bool`, *optional*):
Whether to apply layer normalization after embeddings but before the main stem of the network.
token_dropout (`bool`, defaults to `False`):
When this is enabled, masked tokens are treated as if they had been dropped out by input dropout.
Examples:
```python
>>> from transformers import EsmModel, EsmConfig
>>> # Initializing a ESM facebook/esm-1b style configuration
>>> configuration = EsmConfig(vocab_size=33)
>>> # Initializing a model from the configuration
>>> model = EsmModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "esm"
sub_configs = {"esmfold_config": EsmFoldConfig}
def __init__(
self,
vocab_size=None,
mask_token_id=None,
pad_token_id=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=1026,
initializer_range=0.02,
layer_norm_eps=1e-12,
position_embedding_type="absolute",
use_cache=True,
emb_layer_norm_before=None,
token_dropout=False,
is_folding_model=False,
esmfold_config=None,
vocab_list=None,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, mask_token_id=mask_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.emb_layer_norm_before = emb_layer_norm_before
self.token_dropout = token_dropout
self.is_folding_model = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values.")
esmfold_config = EsmFoldConfig()
elif isinstance(esmfold_config, dict):
esmfold_config = EsmFoldConfig(**esmfold_config)
self.esmfold_config = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!")
self.vocab_list = get_default_vocab_list()
else:
self.vocab_list = vocab_list
else:
self.esmfold_config = None
self.vocab_list = None
if self.esmfold_config is not None and getattr(self.esmfold_config, "use_esm_attn_map", False):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!")
# TODO: update ESM to inherit from PreTrainedConfig
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PreTrainedConfig.to_dict`].
Returns:
`dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = super().to_dict()
if isinstance(self.esmfold_config, EsmFoldConfig):
output["esmfold_config"] = self.esmfold_config.to_dict()
return output
def get_default_vocab_list():
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
__all__ = ["EsmConfig"]
| EsmConfig |
python | dagster-io__dagster | .buildkite/buildkite-shared/buildkite_shared/step_builders/block_step_builder.py | {
"start": 328,
"end": 823
} | class ____(TypedDict):
text: str
key: str
hint: Optional[str]
default: Optional[str]
required: Optional[bool]
BlockStepConfiguration = TypedDict(
"BlockStepConfiguration",
{
"block": str,
"key": Optional[str],
"prompt": Optional[str],
"fields": list[Union[InputSelectField, InputTextField]],
"depends_on": Optional[list[str]],
"if": Optional[str],
"skip": Optional[str],
},
total=False,
)
| InputTextField |
python | huggingface__transformers | src/transformers/models/sam3/configuration_sam3.py | {
"start": 806,
"end": 4686
} | class ____(PreTrainedConfig):
r"""
Configuration class for SAM3 Vision Encoder (ViT backbone).
Instantiating a configuration defaults will yield a similar configuration to that of SAM 3
[facebook/sam3](https://huggingface.co/facebook/sam3) architecture.
Args:
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers.
intermediate_size (`int`, *optional*, defaults to 4736):
Dimensionality of the feedforward (MLP) layers.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer.
num_channels (`int`, *optional*, defaults to 3):
Number of input image channels.
image_size (`int`, *optional*, defaults to 1008):
Expected input image size.
patch_size (`int`, *optional*, defaults to 14):
Size of image patches.
hidden_act (`str`, *optional*, defaults to `"gelu"`):
The non-linear activation function.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for attention probabilities.
rope_theta (`float`, *optional*, defaults to 10000.0):
Base frequency for RoPE.
window_size (`int`, *optional*, defaults to 24):
Window size for windowed attention.
global_attn_indexes (`list[int]`, *optional*, defaults to `[7, 15, 23, 31]`):
Indexes of layers with global attention.
layer_scale_init_value (`float`, *optional*):
Initial value for layer scale. None means no layer scale.
pretrain_image_size (`int`, *optional*, defaults to 336):
Pretrained model image size for position embedding initialization.
hidden_dropout (`float`, *optional*, defaults to 0.0):
Dropout probability for hidden states.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing weight matrices.
"""
base_config_key = "backbone_config"
model_type = "sam3_vit_model"
def __init__(
self,
hidden_size=1024,
intermediate_size=4736,
num_hidden_layers=32,
num_attention_heads=16,
num_channels=3,
image_size=1008,
patch_size=14,
hidden_act="gelu",
layer_norm_eps=1e-6,
attention_dropout=0.0,
rope_theta=10000.0,
window_size=24,
global_attn_indexes=None,
layer_scale_init_value=None,
pretrain_image_size=336,
hidden_dropout=0.0,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
if global_attn_indexes is None:
global_attn_indexes = [7, 15, 23, 31]
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.image_size = image_size
self.patch_size = patch_size
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.attention_dropout = attention_dropout
self.rope_theta = rope_theta
self.window_size = window_size
self.global_attn_indexes = global_attn_indexes
self.layer_scale_init_value = layer_scale_init_value
self.pretrain_image_size = pretrain_image_size
self.hidden_dropout = hidden_dropout
self.initializer_range = initializer_range
| Sam3ViTConfig |
python | langchain-ai__langchain | libs/partners/anthropic/langchain_anthropic/middleware/file_search.py | {
"start": 2188,
"end": 11227
} | class ____(AgentMiddleware):
"""Provides Glob and Grep search over state-based files.
This middleware adds two tools that search through virtual files in state:
- Glob: Fast file pattern matching by file path
- Grep: Fast content search using regular expressions
Example:
```python
from langchain.agents import create_agent
from langchain.agents.middleware import (
StateTextEditorToolMiddleware,
StateFileSearchMiddleware,
)
agent = create_agent(
model=model,
tools=[],
middleware=[
StateTextEditorToolMiddleware(),
StateFileSearchMiddleware(),
],
)
```
"""
state_schema = AnthropicToolsState
def __init__(
self,
*,
state_key: str = "text_editor_files",
) -> None:
"""Initialize the search middleware.
Args:
state_key: State key to search
Use `'memory_files'` to search memory tool files.
"""
self.state_key = state_key
# Create tool instances
@tool
def glob_search( # noqa: D417
runtime: ToolRuntime[None, AnthropicToolsState],
pattern: str,
path: str = "/",
) -> str:
"""Fast file pattern matching tool that works with any codebase size.
Supports glob patterns like `**/*.js` or `src/**/*.ts`.
Returns matching file paths sorted by modification time.
Use this tool when you need to find files by name patterns.
Args:
pattern: The glob pattern to match files against.
path: The directory to search in.
If not specified, searches from root.
Returns:
Newline-separated list of matching file paths, sorted by modification
time (most recently modified first).
Returns `'No files found'` if no matches.
"""
return self._handle_glob_search(pattern, path, runtime.state)
@tool
def grep_search( # noqa: D417
runtime: ToolRuntime[None, AnthropicToolsState],
pattern: str,
path: str = "/",
include: str | None = None,
output_mode: Literal[
"files_with_matches", "content", "count"
] = "files_with_matches",
) -> str:
"""Fast content search tool that works with any codebase size.
Searches file contents using regular expressions.
Supports full regex syntax and filters files by pattern with the include
parameter.
Args:
pattern: The regular expression pattern to search for in file contents.
path: The directory to search in. If not specified, searches from root.
include: File pattern to filter (e.g., `'*.js'`, `'*.{ts,tsx}'`).
output_mode: Output format.
Options:
- `'files_with_matches'`: Only file paths containing matches
- `'content'`: Matching lines with file:line:content format
- `'count'`: Count of matches per file
Returns:
Search results formatted according to `output_mode`.
Returns `'No matches found'` if no results.
"""
return self._handle_grep_search(
pattern, path, include, output_mode, runtime.state
)
self.glob_search = glob_search
self.grep_search = grep_search
self.tools = [glob_search, grep_search]
def _handle_glob_search(
self,
pattern: str,
path: str,
state: AnthropicToolsState,
) -> str:
"""Handle glob search operation.
Args:
pattern: The glob pattern to match files against.
path: The directory to search in.
state: The current agent state.
Returns:
Newline-separated list of matching file paths, sorted by modification
time (most recently modified first).
Returns `'No files found'` if no matches.
"""
# Normalize base path
base_path = path if path.startswith("/") else "/" + path
# Get files from state
files = cast("dict[str, Any]", state.get(self.state_key, {}))
# Match files
matches = []
for file_path, file_data in files.items():
if file_path.startswith(base_path):
# Get relative path from base
if base_path == "/":
relative = file_path[1:] # Remove leading /
elif file_path == base_path:
relative = Path(file_path).name
elif file_path.startswith(base_path + "/"):
relative = file_path[len(base_path) + 1 :]
else:
continue
# Match against pattern
# Handle ** pattern which requires special care
# PurePosixPath.match doesn't match single-level paths
# against **/pattern
is_match = PurePosixPath(relative).match(pattern)
if not is_match and pattern.startswith("**/"):
# Also try matching without the **/ prefix for files in base dir
is_match = PurePosixPath(relative).match(pattern[3:])
if is_match:
matches.append((file_path, file_data["modified_at"]))
if not matches:
return "No files found"
# Sort by modification time
matches.sort(key=lambda x: x[1], reverse=True)
file_paths = [path for path, _ in matches]
return "\n".join(file_paths)
def _handle_grep_search(
self,
pattern: str,
path: str,
include: str | None,
output_mode: str,
state: AnthropicToolsState,
) -> str:
"""Handle grep search operation.
Args:
pattern: The regular expression pattern to search for in file contents.
path: The directory to search in.
include: File pattern to filter (e.g., `'*.js'`, `'*.{ts,tsx}'`).
output_mode: Output format.
state: The current agent state.
Returns:
Search results formatted according to `output_mode`.
Returns `'No matches found'` if no results.
"""
# Normalize base path
base_path = path if path.startswith("/") else "/" + path
# Compile regex pattern (for validation)
try:
regex = re.compile(pattern)
except re.error as e:
return f"Invalid regex pattern: {e}"
if include and not _is_valid_include_pattern(include):
return "Invalid include pattern"
# Search files
files = cast("dict[str, Any]", state.get(self.state_key, {}))
results: dict[str, list[tuple[int, str]]] = {}
for file_path, file_data in files.items():
if not file_path.startswith(base_path):
continue
# Check include filter
if include:
basename = Path(file_path).name
if not _match_include_pattern(basename, include):
continue
# Search file content
for line_num, line in enumerate(file_data["content"], 1):
if regex.search(line):
if file_path not in results:
results[file_path] = []
results[file_path].append((line_num, line))
if not results:
return "No matches found"
# Format output based on mode
return self._format_grep_results(results, output_mode)
def _format_grep_results(
self,
results: dict[str, list[tuple[int, str]]],
output_mode: str,
) -> str:
"""Format grep results based on output mode."""
if output_mode == "files_with_matches":
# Just return file paths
return "\n".join(sorted(results.keys()))
if output_mode == "content":
# Return file:line:content format
lines = []
for file_path in sorted(results.keys()):
for line_num, line in results[file_path]:
lines.append(f"{file_path}:{line_num}:{line}")
return "\n".join(lines)
if output_mode == "count":
# Return file:count format
lines = []
for file_path in sorted(results.keys()):
count = len(results[file_path])
lines.append(f"{file_path}:{count}")
return "\n".join(lines)
# Default to files_with_matches
return "\n".join(sorted(results.keys()))
__all__ = [
"StateFileSearchMiddleware",
]
| StateFileSearchMiddleware |
python | pennersr__django-allauth | allauth/account/migrations/0009_emailaddress_unique_primary_email.py | {
"start": 93,
"end": 545
} | class ____(migrations.Migration):
dependencies = [
("account", "0008_emailaddress_unique_primary_email_fixup"),
]
operations = [
migrations.AddConstraint(
model_name="emailaddress",
constraint=models.UniqueConstraint(
condition=models.Q(("primary", True)),
fields=("user", "primary"),
name="unique_primary_email",
),
),
]
| Migration |
python | graphql-python__graphene | graphene/types/tests/test_objecttype.py | {
"start": 588,
"end": 8250
} | class ____(UnmountedType):
def get_type(self):
return MyType
def test_generate_objecttype():
class MyObjectType(ObjectType):
"""Documentation"""
assert MyObjectType._meta.name == "MyObjectType"
assert MyObjectType._meta.description == "Documentation"
assert MyObjectType._meta.interfaces == tuple()
assert MyObjectType._meta.fields == {}
assert (
repr(MyObjectType)
== "<MyObjectType meta=<ObjectTypeOptions name='MyObjectType'>>"
)
def test_generate_objecttype_with_meta():
class MyObjectType(ObjectType):
class Meta:
name = "MyOtherObjectType"
description = "Documentation"
interfaces = (MyType,)
assert MyObjectType._meta.name == "MyOtherObjectType"
assert MyObjectType._meta.description == "Documentation"
assert MyObjectType._meta.interfaces == (MyType,)
def test_generate_lazy_objecttype():
class MyObjectType(ObjectType):
example = Field(lambda: InnerObjectType, required=True)
class InnerObjectType(ObjectType):
field = Field(MyType)
assert MyObjectType._meta.name == "MyObjectType"
example_field = MyObjectType._meta.fields["example"]
assert isinstance(example_field.type, NonNull)
assert example_field.type.of_type == InnerObjectType
def test_generate_objecttype_with_fields():
class MyObjectType(ObjectType):
field = Field(MyType)
assert "field" in MyObjectType._meta.fields
def test_generate_objecttype_with_private_attributes():
class MyObjectType(ObjectType):
def __init__(self, _private_state=None, **kwargs):
self._private_state = _private_state
super().__init__(**kwargs)
_private_state = None
assert "_private_state" not in MyObjectType._meta.fields
assert hasattr(MyObjectType, "_private_state")
m = MyObjectType(_private_state="custom")
assert m._private_state == "custom"
with raises(TypeError):
MyObjectType(_other_private_state="Wrong")
def test_ordered_fields_in_objecttype():
class MyObjectType(ObjectType):
b = Field(MyType)
a = Field(MyType)
field = MyScalar()
asa = Field(MyType)
assert list(MyObjectType._meta.fields) == ["b", "a", "field", "asa"]
def test_generate_objecttype_inherit_abstracttype():
class MyAbstractType:
field1 = MyScalar()
class MyObjectType(ObjectType, MyAbstractType):
field2 = MyScalar()
assert MyObjectType._meta.description is None
assert MyObjectType._meta.interfaces == ()
assert MyObjectType._meta.name == "MyObjectType"
assert list(MyObjectType._meta.fields) == ["field1", "field2"]
assert list(map(type, MyObjectType._meta.fields.values())) == [Field, Field]
def test_generate_objecttype_inherit_abstracttype_reversed():
class MyAbstractType:
field1 = MyScalar()
class MyObjectType(MyAbstractType, ObjectType):
field2 = MyScalar()
assert MyObjectType._meta.description is None
assert MyObjectType._meta.interfaces == ()
assert MyObjectType._meta.name == "MyObjectType"
assert list(MyObjectType._meta.fields) == ["field1", "field2"]
assert list(map(type, MyObjectType._meta.fields.values())) == [Field, Field]
def test_generate_objecttype_unmountedtype():
class MyObjectType(ObjectType):
field = MyScalar()
assert "field" in MyObjectType._meta.fields
assert isinstance(MyObjectType._meta.fields["field"], Field)
def test_parent_container_get_fields():
assert list(Container._meta.fields) == ["field1", "field2"]
def test_parent_container_interface_get_fields():
assert list(ContainerWithInterface._meta.fields) == ["ifield", "field1", "field2"]
def test_objecttype_as_container_only_args():
container = Container("1", "2")
assert container.field1 == "1"
assert container.field2 == "2"
def test_objecttype_repr():
container = Container("1", "2")
assert repr(container) == "Container(field1='1', field2='2')"
def test_objecttype_eq():
container1 = Container("1", "2")
container2 = Container("1", "2")
container3 = Container("2", "3")
assert container1 == container1
assert container1 == container2
assert container2 != container3
def test_objecttype_as_container_args_kwargs():
container = Container("1", field2="2")
assert container.field1 == "1"
assert container.field2 == "2"
def test_objecttype_as_container_few_kwargs():
container = Container(field2="2")
assert container.field2 == "2"
def test_objecttype_as_container_all_kwargs():
container = Container(field1="1", field2="2")
assert container.field1 == "1"
assert container.field2 == "2"
def test_objecttype_as_container_extra_args():
msg = r"__init__\(\) takes from 1 to 3 positional arguments but 4 were given"
with raises(TypeError, match=msg):
Container("1", "2", "3") # type: ignore
def test_objecttype_as_container_invalid_kwargs():
msg = r"__init__\(\) got an unexpected keyword argument 'unexisting_field'"
with raises(TypeError, match=msg):
Container(unexisting_field="3") # type: ignore
def test_objecttype_container_benchmark(benchmark):
@benchmark
def create_objecttype():
Container(field1="field1", field2="field2")
def test_generate_objecttype_description():
class MyObjectType(ObjectType):
"""
Documentation
Documentation line 2
"""
assert MyObjectType._meta.description == "Documentation\n\nDocumentation line 2"
def test_objecttype_with_possible_types():
class MyObjectType(ObjectType):
class Meta:
possible_types = (dict,)
assert MyObjectType._meta.possible_types == (dict,)
def test_objecttype_with_possible_types_and_is_type_of_should_raise():
with raises(AssertionError) as excinfo:
class MyObjectType(ObjectType):
class Meta:
possible_types = (dict,)
@classmethod
def is_type_of(cls, root, context, info):
return False
assert str(excinfo.value) == (
"MyObjectType.Meta.possible_types will cause type collision with "
"MyObjectType.is_type_of. Please use one or other."
)
def test_objecttype_no_fields_output():
class User(ObjectType):
name = String()
class Query(ObjectType):
user = Field(User)
def resolve_user(self, info):
return User()
schema = Schema(query=Query)
result = schema.execute(
""" query basequery {
user {
name
}
}
"""
)
assert not result.errors
assert result.data == {"user": {"name": None}}
def test_abstract_objecttype_can_str():
class MyObjectType(ObjectType):
class Meta:
abstract = True
field = MyScalar()
assert str(MyObjectType) == "MyObjectType"
def test_objecttype_meta_with_annotations():
class Query(ObjectType):
class Meta:
name: str = "oops"
hello = String()
def resolve_hello(self, info):
return "Hello"
schema = Schema(query=Query)
assert schema is not None
def test_objecttype_meta_arguments():
class MyInterface(Interface):
foo = String()
class MyType(ObjectType, interfaces=[MyInterface]):
bar = String()
assert MyType._meta.interfaces == [MyInterface]
assert list(MyType._meta.fields.keys()) == ["foo", "bar"]
def test_objecttype_type_name():
class MyObjectType(ObjectType, name="FooType"):
pass
assert MyObjectType._meta.name == "FooType"
| MyScalar |
python | django-haystack__django-haystack | test_haystack/multipleindex/tests.py | {
"start": 401,
"end": 6579
} | class ____(WhooshTestCase):
def setUp(self):
super().setUp()
self.ui = connections["solr"].get_unified_index()
self.fi = self.ui.get_index(Foo)
self.bi = self.ui.get_index(Bar)
self.solr_backend = connections["solr"].get_backend()
self.whoosh_backend = connections["whoosh"].get_backend()
self.filtered_whoosh_backend = connections["filtered_whoosh"].get_backend()
Foo.objects.bulk_create(
[
Foo(title="Haystack test", body="foo 1"),
Foo(title="Another Haystack test", body="foo 2"),
]
)
Bar.objects.bulk_create(
[
Bar(author="Haystack test", content="bar 1"),
Bar(author="Another Haystack test", content="bar 2"),
Bar(author="Yet another Haystack test", content="bar 3"),
]
)
self.fi.reindex(using="solr")
self.fi.reindex(using="whoosh")
self.bi.reindex(using="solr")
def tearDown(self):
self.fi.clear(using="solr")
self.bi.clear(using="solr")
super().tearDown()
def test_index_update_object_using(self):
results = self.solr_backend.search("foo")
self.assertEqual(results["hits"], 2)
results = self.whoosh_backend.search("foo")
self.assertEqual(results["hits"], 2)
foo_3 = Foo.objects.create(title="Whee another Haystack test", body="foo 3")
self.fi.update_object(foo_3, using="solr")
results = self.solr_backend.search("foo")
self.assertEqual(results["hits"], 3)
results = self.whoosh_backend.search("foo")
self.assertEqual(results["hits"], 2)
self.fi.update_object(foo_3, using="whoosh")
results = self.solr_backend.search("foo")
self.assertEqual(results["hits"], 3)
results = self.whoosh_backend.search("foo")
self.assertEqual(results["hits"], 3)
def test_index_remove_object_using(self):
results = self.solr_backend.search("foo")
self.assertEqual(results["hits"], 2)
results = self.whoosh_backend.search("foo")
self.assertEqual(results["hits"], 2)
foo_1 = Foo.objects.get(pk=1)
self.fi.remove_object(foo_1, using="solr")
results = self.solr_backend.search("foo")
self.assertEqual(results["hits"], 1)
results = self.whoosh_backend.search("foo")
self.assertEqual(results["hits"], 2)
self.fi.remove_object(foo_1, using="whoosh")
results = self.solr_backend.search("foo")
self.assertEqual(results["hits"], 1)
results = self.whoosh_backend.search("foo")
self.assertEqual(results["hits"], 1)
def test_index_clear_using(self):
results = self.solr_backend.search("foo")
self.assertEqual(results["hits"], 2)
results = self.whoosh_backend.search("foo")
self.assertEqual(results["hits"], 2)
self.fi.clear(using="solr")
results = self.solr_backend.search("foo")
self.assertEqual(results["hits"], 0)
results = self.whoosh_backend.search("foo")
self.assertEqual(results["hits"], 2)
self.fi.clear(using="whoosh")
results = self.solr_backend.search("foo")
self.assertEqual(results["hits"], 0)
results = self.whoosh_backend.search("foo")
self.assertEqual(results["hits"], 0)
def test_index_update_using(self):
self.fi.clear(using="solr")
self.fi.clear(using="whoosh")
self.bi.clear(using="solr")
self.bi.clear(using="whoosh")
results = self.solr_backend.search("foo")
self.assertEqual(results["hits"], 0)
results = self.whoosh_backend.search("foo")
self.assertEqual(results["hits"], 0)
self.fi.update(using="solr")
results = self.solr_backend.search("foo")
self.assertEqual(results["hits"], 2)
results = self.whoosh_backend.search("foo")
self.assertEqual(results["hits"], 0)
self.fi.update(using="whoosh")
results = self.solr_backend.search("foo")
self.assertEqual(results["hits"], 2)
results = self.whoosh_backend.search("foo")
self.assertEqual(results["hits"], 2)
def test_searchqueryset_using(self):
# Using the default.
sqs = SearchQuerySet("solr")
self.assertEqual(sqs.count(), 5)
self.assertEqual(sqs.models(Foo).count(), 2)
self.assertEqual(sqs.models(Bar).count(), 3)
self.assertEqual(sqs.using("solr").count(), 5)
self.assertEqual(sqs.using("solr").models(Foo).count(), 2)
self.assertEqual(sqs.using("solr").models(Bar).count(), 3)
self.assertEqual(sqs.using("whoosh").count(), 2)
self.assertEqual(sqs.using("whoosh").models(Foo).count(), 2)
self.assertEqual(sqs.using("whoosh").models(Bar).count(), 0)
def test_searchquery_using(self):
sq = connections["solr"].get_query()
# Using the default.
self.assertEqual(sq.get_count(), 5)
# "Swap" to the default.
sq = sq.using("solr")
self.assertEqual(sq.get_count(), 5)
# Swap the ``SearchQuery`` used.
sq = sq.using("whoosh")
self.assertEqual(sq.get_count(), 2)
def test_excluded_indexes(self):
wui = connections["filtered_whoosh"].get_unified_index()
self.assertTrue(any(isinstance(i, FooIndex) for i in wui.collect_indexes()))
self.assertFalse(any(isinstance(i, BarIndex) for i in wui.collect_indexes()))
# Shouldn't error.
wui.get_index(Foo)
# Should error, since it's not present.
self.assertRaises(NotHandled, wui.get_index, Bar)
def test_filtered_index_update(self):
for i in ("whoosh", "filtered_whoosh"):
self.fi.clear(using=i)
self.fi.update(using=i)
results = self.whoosh_backend.search("foo")
self.assertEqual(results["hits"], 2)
results = self.filtered_whoosh_backend.search("foo")
self.assertEqual(
results["hits"], 1, "Filtered backend should only contain one record"
)
| MultipleIndexTestCase |
python | cloudpipe__cloudpickle | tests/cloudpickle_testpkg/_cloudpickle_testpkg/__init__.py | {
"start": 302,
"end": 1097
} | class ____:
def __reduce__(self):
# This reducer is only valid for the top level "some_singleton" object.
return "some_singleton"
def relative_imports_factory():
"""Factory creating dynamically-defined functions using relative imports
Relative import of functions living both inside modules and packages are
tested.
"""
def f():
# module_function belongs to _cloudpickle_testpkg.mod, which is a
# module
from .mod import module_function
return module_function()
def g():
# package_function belongs to _cloudpickle_testpkg, which is a package
from . import package_function
return package_function()
return f, g
some_singleton = _SingletonClass()
T = typing.TypeVar("T")
| _SingletonClass |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/slots1.py | {
"start": 122,
"end": 183
} | class ____:
def __init__(self):
self.x = 1
| NoSlots1 |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_device_class_list.py | {
"start": 383,
"end": 7049
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1beta2DeviceClass]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1beta2DeviceClassList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1beta2DeviceClassList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta2DeviceClassList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta2DeviceClassList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta2DeviceClassList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1beta2DeviceClassList. # noqa: E501
Items is the list of resource classes. # noqa: E501
:return: The items of this V1beta2DeviceClassList. # noqa: E501
:rtype: list[V1beta2DeviceClass]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1beta2DeviceClassList.
Items is the list of resource classes. # noqa: E501
:param items: The items of this V1beta2DeviceClassList. # noqa: E501
:type: list[V1beta2DeviceClass]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1beta2DeviceClassList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta2DeviceClassList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta2DeviceClassList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta2DeviceClassList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta2DeviceClassList. # noqa: E501
:return: The metadata of this V1beta2DeviceClassList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta2DeviceClassList.
:param metadata: The metadata of this V1beta2DeviceClassList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2DeviceClassList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2DeviceClassList):
return True
return self.to_dict() != other.to_dict()
| V1beta2DeviceClassList |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 38695,
"end": 42050
} | class ____(NonStrictDataModel):
"""
:param scroll_id: Scroll ID to pass to the next calls to get_plot_sample or
next_plot_sample
:type scroll_id: str
:param event: Plot event
:type event: dict
:param min_iteration: minimal valid iteration for the variant
:type min_iteration: int
:param max_iteration: maximal valid iteration for the variant
:type max_iteration: int
"""
_schema = {
"properties": {
"event": {"description": "Plot event", "type": ["object", "null"]},
"max_iteration": {
"description": "maximal valid iteration for the variant",
"type": ["integer", "null"],
},
"min_iteration": {
"description": "minimal valid iteration for the variant",
"type": ["integer", "null"],
},
"scroll_id": {
"description": "Scroll ID to pass to the next calls to get_plot_sample or next_plot_sample",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self,
scroll_id: Optional[str] = None,
event: Optional[dict] = None,
min_iteration: Optional[int] = None,
max_iteration: Optional[int] = None,
**kwargs: Any
) -> None:
super(PlotSampleResponse, self).__init__(**kwargs)
self.scroll_id = scroll_id
self.event = event
self.min_iteration = min_iteration
self.max_iteration = max_iteration
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
@schema_property("event")
def event(self) -> Optional[dict]:
return self._property_event
@event.setter
def event(self, value: Optional[dict]) -> None:
if value is None:
self._property_event = None
return
self.assert_isinstance(value, "event", (dict,))
self._property_event = value
@schema_property("min_iteration")
def min_iteration(self) -> Optional[int]:
return self._property_min_iteration
@min_iteration.setter
def min_iteration(self, value: Optional[int]) -> None:
if value is None:
self._property_min_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "min_iteration", six.integer_types)
self._property_min_iteration = value
@schema_property("max_iteration")
def max_iteration(self) -> Optional[int]:
return self._property_max_iteration
@max_iteration.setter
def max_iteration(self, value: Optional[int]) -> None:
if value is None:
self._property_max_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "max_iteration", six.integer_types)
self._property_max_iteration = value
| PlotSampleResponse |
python | getsentry__sentry | src/flagpole/evaluation_context.py | {
"start": 2486,
"end": 5073
} | class ____(Generic[T_CONTEXT_DATA]):
"""
Used to build an EvaluationContext instance for use in Flagpole.
This class aggregates a list of context transformers, each of which are
responsible for generating a slice of context data.
This class is meant to be used with Flagpole's `Feature` class:
>>> from flagpole import ContextBuilder, Feature
>>> builder = ContextBuilder().add_context_transformer(lambda _dict: dict(foo="bar"))
>>> feature = Feature.from_feature_dictionary(name="foo", feature_dictionary=dict(), context=builder)
>>> feature.match(EvaluationContext(dict()))
"""
context_transformers: list[Callable[[T_CONTEXT_DATA], EvaluationContextDict]]
exception_handler: Callable[[Exception], Any] | None
__identity_fields: set[str]
def __init__(self):
self.context_transformers = []
self.exception_handler = None
self.__identity_fields = set()
def add_context_transformer(
self,
context_transformer: Callable[[T_CONTEXT_DATA], EvaluationContextDict],
identity_fields: list[str] | None = None,
) -> ContextBuilder[T_CONTEXT_DATA]:
self.context_transformers.append(context_transformer)
if identity_fields is not None:
self.__identity_fields.update(identity_fields)
return self
def add_exception_handler(
self, exception_handler: Callable[[Exception], None]
) -> ContextBuilder[T_CONTEXT_DATA]:
"""
Add a custom exception handler to the context builder if you need custom handling
if any of the transformer functions raise an exception. This is useful for swallowing
or reporting any exceptions that occur while building a context.
:param exception_handler:
"""
if self.exception_handler is not None:
raise Exception("Exception handler is already defined")
self.exception_handler = exception_handler
return self
def build(self, data: T_CONTEXT_DATA | None = None) -> EvaluationContext:
context_data: EvaluationContextDict = dict()
if data is None:
return EvaluationContext(context_data)
for transformer in self.context_transformers:
try:
context_data = {**context_data, **transformer(data)}
except Exception as e:
if self.exception_handler is not None:
self.exception_handler(e)
else:
raise
return EvaluationContext(context_data, self.__identity_fields)
| ContextBuilder |
python | spack__spack | lib/spack/spack/vendor/archspec/vendor/cpuid/cpuid.py | {
"start": 2439,
"end": 2869
} | class ____(ctypes.Structure):
_register_names = ("eax", "ebx", "ecx", "edx")
_fields_ = [(r, c_uint32) for r in _register_names]
def __getitem__(self, item):
if item not in self._register_names:
raise KeyError(item)
return getattr(self, item)
def __repr__(self):
return "eax=0x{:x}, ebx=0x{:x}, ecx=0x{:x}, edx=0x{:x}".format(self.eax, self.ebx, self.ecx, self.edx)
| CPUID_struct |
python | django__django | tests/many_to_many/models.py | {
"start": 2144,
"end": 2339
} | class ____(models.Model):
article = models.ForeignKey(NullableTargetArticle, models.CASCADE)
publication = models.ForeignKey(Publication, models.CASCADE, null=True)
| NullablePublicationThrough |
python | numba__numba | numba/cuda/tests/cudapy/test_optimization.py | {
"start": 421,
"end": 2647
} | class ____(CUDATestCase):
def test_eager_opt(self):
# Optimization should occur by default
sig = (float64[::1],)
kernel = cuda.jit(sig)(kernel_func)
ptx = kernel.inspect_asm()
for fragment in removed_by_opt:
with self.subTest(fragment=fragment):
self.assertNotIn(fragment, ptx[sig])
def test_eager_noopt(self):
# Optimization disabled
sig = (float64[::1],)
kernel = cuda.jit(sig, opt=False)(kernel_func)
ptx = kernel.inspect_asm()
for fragment in removed_by_opt:
with self.subTest(fragment=fragment):
self.assertIn(fragment, ptx[sig])
def test_lazy_opt(self):
# Optimization should occur by default
kernel = cuda.jit(kernel_func)
x = np.zeros(1, dtype=np.float64)
kernel[1, 1](x)
# Grab the PTX for the one definition that has just been jitted
ptx = next(iter(kernel.inspect_asm().items()))[1]
for fragment in removed_by_opt:
with self.subTest(fragment=fragment):
self.assertNotIn(fragment, ptx)
def test_lazy_noopt(self):
# Optimization disabled
kernel = cuda.jit(opt=False)(kernel_func)
x = np.zeros(1, dtype=np.float64)
kernel[1, 1](x)
# Grab the PTX for the one definition that has just been jitted
ptx = next(iter(kernel.inspect_asm().items()))[1]
for fragment in removed_by_opt:
with self.subTest(fragment=fragment):
self.assertIn(fragment, ptx)
def test_device_opt(self):
# Optimization should occur by default
sig = (float64, float64, float64)
device = cuda.jit(sig, device=True)(device_func)
ptx = device.inspect_asm(sig)
self.assertIn('fma.rn.f64', ptx)
def test_device_noopt(self):
# Optimization disabled
sig = (float64, float64, float64)
device = cuda.jit(sig, device=True, opt=False)(device_func)
ptx = device.inspect_asm(sig)
# Fused-multiply adds should be disabled when not optimizing
self.assertNotIn('fma.rn.f64', ptx)
if __name__ == '__main__':
unittest.main()
| TestOptimization |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/connectors/migrate_to_manifest_only/declarative_component_schema.py | {
"start": 9494,
"end": 9669
} | class ____(BaseModel):
class Config:
extra = Extra.allow
type: Optional[Literal["LegacyToPerPartitionStateMigration"]] = None
| LegacyToPerPartitionStateMigration |
python | Pylons__pyramid | tests/test_registry.py | {
"start": 68,
"end": 2859
} | class ____(unittest.TestCase):
def _getTargetClass(self):
from pyramid.registry import Registry
return Registry
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test___bool__(self):
registry = self._makeOne()
self.assertEqual(registry.__bool__(), True)
def test__lock(self):
registry = self._makeOne()
self.assertTrue(registry._lock)
def test_clear_view_cache_lookup(self):
registry = self._makeOne()
registry._view_lookup_cache[1] = 2
registry._clear_view_lookup_cache()
self.assertEqual(registry._view_lookup_cache, {})
def test_package_name(self):
package_name = 'testing'
registry = self._makeOne(package_name)
self.assertEqual(registry.package_name, package_name)
def test_default_package_name(self):
registry = self._makeOne()
self.assertEqual(registry.package_name, 'tests')
def test_registerHandler_and_notify(self):
registry = self._makeOne()
self.assertEqual(registry.has_listeners, False)
L = []
def f(event):
L.append(event)
registry.registerHandler(f, [IDummyEvent])
self.assertEqual(registry.has_listeners, True)
event = DummyEvent()
registry.notify(event)
self.assertEqual(L, [event])
def test_registerSubscriptionAdapter(self):
registry = self._makeOne()
self.assertEqual(registry.has_listeners, False)
from zope.interface import Interface
registry.registerSubscriptionAdapter(
DummyEvent, [IDummyEvent], Interface
)
self.assertEqual(registry.has_listeners, True)
def test__get_settings(self):
registry = self._makeOne()
registry._settings = 'foo'
self.assertEqual(registry.settings, 'foo')
def test__set_settings(self):
registry = self._makeOne()
registry.settings = 'foo'
self.assertEqual(registry._settings, 'foo')
def test_init_forwards_args(self):
from zope.interface import Interface
from zope.interface.registry import Components
dummy = object()
c = Components()
c.registerUtility(dummy, Interface)
registry = self._makeOne('foo', (c,))
self.assertEqual(registry.__name__, 'foo')
self.assertEqual(registry.getUtility(Interface), dummy)
def test_init_forwards_kw(self):
from zope.interface import Interface
from zope.interface.registry import Components
dummy = object()
c = Components()
c.registerUtility(dummy, Interface)
registry = self._makeOne(bases=(c,))
self.assertEqual(registry.getUtility(Interface), dummy)
| TestRegistry |
python | walkccc__LeetCode | solutions/466. Count The Repetitions/466.py | {
"start": 47,
"end": 93
} | class ____:
count: int
nextIndex: int
| Record |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 479447,
"end": 480221
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for BranchProtectionRule."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("BranchProtectionRuleEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("BranchProtectionRule"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| BranchProtectionRuleConnection |
python | getsentry__sentry | tests/sentry/preprod/size_analysis/test_compare.py | {
"start": 404,
"end": 22152
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.organization = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.organization)
def _create_treemap_element(self, name, size, path=None, children=None):
"""Helper to create TreemapElement."""
return TreemapElement(
name=name,
size=size,
path=path,
is_dir=children is not None,
children=children or [],
)
def _create_size_analysis_results(
self, download_size=500, install_size=1000, treemap_root=None
):
"""Helper to create SizeAnalysisResults."""
treemap = None
if treemap_root:
treemap = TreemapResults(
root=treemap_root,
file_count=1, # Required field
category_breakdown={},
platform="test",
)
return SizeAnalysisResults(
analysis_duration=1.0,
download_size=download_size,
install_size=install_size,
treemap=treemap,
)
def test_compare_size_analysis_no_treemaps(self):
"""Test compare_size_analysis with no treemap data."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=2000,
max_download_size=1000,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
head_results = self._create_size_analysis_results(download_size=1000, install_size=2000)
base_results = self._create_size_analysis_results(download_size=800, install_size=1500)
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
assert isinstance(result, ComparisonResults)
assert result.diff_items == []
assert isinstance(result.size_metric_diff_item, SizeMetricDiffItem)
assert result.size_metric_diff_item.head_install_size == 2000
assert result.size_metric_diff_item.head_download_size == 1000
assert result.size_metric_diff_item.base_install_size == 1500
assert result.size_metric_diff_item.base_download_size == 800
def test_compare_size_analysis_file_added(self):
"""Test compare_size_analysis with a file added."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
# Head has one file, base has none
head_treemap = self._create_treemap_element("file.txt", 100)
head_results = self._create_size_analysis_results(treemap_root=head_treemap)
base_results = self._create_size_analysis_results()
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
assert len(result.diff_items) == 1
diff_item = result.diff_items[0]
assert diff_item.path == "file.txt"
assert diff_item.size_diff == 100
assert diff_item.head_size == 100
assert diff_item.base_size is None
assert diff_item.type == DiffType.ADDED
def test_compare_size_analysis_file_removed(self):
"""Test compare_size_analysis with a file removed."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
# Base has one file, head has none
base_treemap = self._create_treemap_element("file.txt", 100)
head_results = self._create_size_analysis_results()
base_results = self._create_size_analysis_results(treemap_root=base_treemap)
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
assert len(result.diff_items) == 1
diff_item = result.diff_items[0]
assert diff_item.path == "file.txt"
assert diff_item.size_diff == -100
assert diff_item.head_size is None
assert diff_item.base_size == 100
assert diff_item.type == DiffType.REMOVED
def test_compare_size_analysis_file_increased(self):
"""Test compare_size_analysis with a file size increased."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
# Same file, different sizes
head_treemap = self._create_treemap_element("file.txt", 150)
base_treemap = self._create_treemap_element("file.txt", 100)
head_results = self._create_size_analysis_results(treemap_root=head_treemap)
base_results = self._create_size_analysis_results(treemap_root=base_treemap)
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
assert len(result.diff_items) == 1
diff_item = result.diff_items[0]
assert diff_item.path == "file.txt"
assert diff_item.size_diff == 50
assert diff_item.head_size == 150
assert diff_item.base_size == 100
assert diff_item.type == DiffType.INCREASED
def test_compare_size_analysis_file_decreased(self):
"""Test compare_size_analysis with a file size decreased."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
# Same file, different sizes
head_treemap = self._create_treemap_element("file.txt", 50)
base_treemap = self._create_treemap_element("file.txt", 100)
head_results = self._create_size_analysis_results(treemap_root=head_treemap)
base_results = self._create_size_analysis_results(treemap_root=base_treemap)
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
assert len(result.diff_items) == 1
diff_item = result.diff_items[0]
assert diff_item.path == "file.txt"
assert diff_item.size_diff == -50
assert diff_item.head_size == 50
assert diff_item.base_size == 100
assert diff_item.type == DiffType.DECREASED
def test_compare_size_analysis_file_unchanged(self):
"""Test compare_size_analysis with a file size unchanged (should be skipped)."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
# Same file, same size
head_treemap = self._create_treemap_element("file.txt", 100)
base_treemap = self._create_treemap_element("file.txt", 100)
head_results = self._create_size_analysis_results(treemap_root=head_treemap)
base_results = self._create_size_analysis_results(treemap_root=base_treemap)
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
# Should skip files with no size difference
assert len(result.diff_items) == 0
def test_compare_size_analysis_multiple_files(self):
"""Test compare_size_analysis with multiple files."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
# Head has file1 (increased), file2 (new), file3 (removed from base)
# Base has file1 (original), file3 (removed from head)
head_treemap = self._create_treemap_element(
"dir",
0,
children=[
self._create_treemap_element("file1.txt", 150), # increased from 100
self._create_treemap_element("file2.txt", 200), # new file
],
)
base_treemap = self._create_treemap_element(
"dir",
0,
children=[
self._create_treemap_element("file1.txt", 100), # original size
self._create_treemap_element("file3.txt", 300), # removed file
],
)
head_results = self._create_size_analysis_results(treemap_root=head_treemap)
base_results = self._create_size_analysis_results(treemap_root=base_treemap)
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
assert len(result.diff_items) == 3
# Sort by path for consistent testing
diff_items = sorted(result.diff_items, key=lambda x: x.path)
# file1.txt - increased
assert diff_items[0].path == "dir/file1.txt"
assert diff_items[0].size_diff == 50
assert diff_items[0].type == DiffType.INCREASED
# file2.txt - added
assert diff_items[1].path == "dir/file2.txt"
assert diff_items[1].size_diff == 200
assert diff_items[1].type == DiffType.ADDED
# file3.txt - removed
assert diff_items[2].path == "dir/file3.txt"
assert diff_items[2].size_diff == -300
assert diff_items[2].type == DiffType.REMOVED
def test_compare_size_analysis_zero_size_diffs_skipped(self):
"""Test that zero size diffs are skipped for all diff types."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
# Test added file with zero size
head_treemap = self._create_treemap_element("file1.txt", 0)
head_results = self._create_size_analysis_results(treemap_root=head_treemap)
base_results = self._create_size_analysis_results()
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
assert len(result.diff_items) == 0
# Test removed file with zero size
base_treemap = self._create_treemap_element("file2.txt", 0)
head_results = self._create_size_analysis_results()
base_results = self._create_size_analysis_results(treemap_root=base_treemap)
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
assert len(result.diff_items) == 0
def test_compare_size_analysis_different_artifact_types(self):
"""Test compare_size_analysis with different artifact types."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT,
identifier="watch",
max_install_size=1500,
max_download_size=800,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="main",
max_install_size=1500,
max_download_size=800,
)
head_results = self._create_size_analysis_results()
base_results = self._create_size_analysis_results()
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
assert (
result.size_metric_diff_item.metrics_artifact_type
== PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT
)
assert result.size_metric_diff_item.identifier == "watch"
def test_compare_size_analysis_complex_nested_structure(self):
"""Test compare_size_analysis with complex nested directory structure."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.WATCH_ARTIFACT,
identifier="watch",
max_install_size=1500,
max_download_size=800,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="main",
max_install_size=1500,
max_download_size=800,
)
# Complex nested structure
head_treemap = self._create_treemap_element(
"app",
0,
children=[
self._create_treemap_element(
"src",
0,
children=[
self._create_treemap_element("main.js", 500),
self._create_treemap_element("utils.js", 200),
],
),
self._create_treemap_element(
"assets",
0,
children=[
self._create_treemap_element("logo.png", 100),
],
),
],
)
base_treemap = self._create_treemap_element(
"app",
0,
children=[
self._create_treemap_element(
"src",
0,
children=[
self._create_treemap_element("main.js", 400), # increased
# utils.js removed
],
),
# assets directory removed
],
)
head_results = self._create_size_analysis_results(treemap_root=head_treemap)
base_results = self._create_size_analysis_results(treemap_root=base_treemap)
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
assert len(result.diff_items) == 3
# Sort by path for consistent testing
diff_items = sorted(result.diff_items, key=lambda x: x.path)
# app/assets/logo.png - added
assert diff_items[0].path == "app/assets/logo.png"
assert diff_items[0].size_diff == 100
assert diff_items[0].type == DiffType.ADDED
# app/src/main.js - increased
assert diff_items[1].path == "app/src/main.js"
assert diff_items[1].size_diff == 100
assert diff_items[1].type == DiffType.INCREASED
# app/src/utils.js - added
assert diff_items[2].path == "app/src/utils.js"
assert diff_items[2].size_diff == 200
assert diff_items[2].type == DiffType.ADDED
def test_compare_size_analysis_duplicate_paths(self):
"""Test compare_size_analysis with duplicate paths (e.g., Assets.car with multiple entries)."""
head_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
base_metrics = PreprodArtifactSizeMetrics(
preprod_artifact_id=1,
metrics_artifact_type=PreprodArtifactSizeMetrics.MetricsArtifactType.MAIN_ARTIFACT,
identifier="test",
max_install_size=1500,
max_download_size=800,
)
# Simulate Assets.car with duplicate image files
# Head has more duplicates than base
head_treemap = self._create_treemap_element(
"Assets.car",
4788224,
path="Assets.car",
children=[
self._create_treemap_element("AppIcon", 4096, path="Assets.car/AppIcon"),
self._create_treemap_element(
"Primary-Light@2x.png", 507904, path="Assets.car/Primary-Light@2x.png"
),
self._create_treemap_element(
"Primary-Light@2x.png", 802816, path="Assets.car/Primary-Light@2x.png"
),
self._create_treemap_element("AppIcon", 4096, path="Assets.car/AppIcon"),
self._create_treemap_element(
"Primary-Light@2x.png", 507904, path="Assets.car/Primary-Light@2x.png"
),
self._create_treemap_element(
"Primary-Light@2x.png", 802816, path="Assets.car/Primary-Light@2x.png"
),
self._create_treemap_element(
"Primary-Dark@2x.png", 339968, path="Assets.car/Primary-Dark@2x.png"
),
self._create_treemap_element(
"Primary-Dark@2x.png", 462848, path="Assets.car/Primary-Dark@2x.png"
),
],
)
base_treemap = self._create_treemap_element(
"Assets.car",
2404352,
path="Assets.car",
children=[
self._create_treemap_element("AppIcon", 4096, path="Assets.car/AppIcon"),
self._create_treemap_element(
"Primary-Light@2x.png", 507904, path="Assets.car/Primary-Light@2x.png"
),
self._create_treemap_element(
"Primary-Light@2x.png", 802816, path="Assets.car/Primary-Light@2x.png"
),
self._create_treemap_element(
"Primary-Dark@2x.png", 339968, path="Assets.car/Primary-Dark@2x.png"
),
self._create_treemap_element(
"Primary-Dark@2x.png", 462848, path="Assets.car/Primary-Dark@2x.png"
),
],
)
head_results = self._create_size_analysis_results(treemap_root=head_treemap)
base_results = self._create_size_analysis_results(treemap_root=base_treemap)
result = compare_size_analysis(head_metrics, head_results, base_metrics, base_results)
# Should detect added files: 1 AppIcon, 2 Primary-Light@2x.png
assert len(result.diff_items) == 3
# Sort by path and size for consistent testing
diff_items = sorted(result.diff_items, key=lambda x: (x.path, x.size_diff))
# Assets.car/AppIcon - added (1 extra copy)
assert diff_items[0].path == "Assets.car/AppIcon"
assert diff_items[0].size_diff == 4096
assert diff_items[0].type == DiffType.ADDED
# Assets.car/Primary-Light@2x.png - added (2 extra copies)
assert diff_items[1].path == "Assets.car/Primary-Light@2x.png"
assert diff_items[1].size_diff == 507904
assert diff_items[1].type == DiffType.ADDED
assert diff_items[2].path == "Assets.car/Primary-Light@2x.png"
assert diff_items[2].size_diff == 802816
assert diff_items[2].type == DiffType.ADDED
| CompareSizeAnalysisTest |
python | django__django | django/db/migrations/operations/models.py | {
"start": 33859,
"end": 35515
} | class ____(IndexOperation):
"""Remove an index from a model."""
category = OperationCategory.REMOVAL
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
def state_forwards(self, app_label, state):
state.remove_index(app_label, self.model_name_lower, self.name)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
from_model_state = from_state.models[app_label, self.model_name_lower]
index = from_model_state.get_index_by_name(self.name)
schema_editor.remove_index(model, index)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
to_model_state = to_state.models[app_label, self.model_name_lower]
index = to_model_state.get_index_by_name(self.name)
schema_editor.add_index(model, index)
def deconstruct(self):
kwargs = {
"model_name": self.model_name,
"name": self.name,
}
return (
self.__class__.__qualname__,
[],
kwargs,
)
def describe(self):
return "Remove index %s from %s" % (self.name, self.model_name)
@property
def migration_name_fragment(self):
return "remove_%s_%s" % (self.model_name_lower, self.name.lower())
| RemoveIndex |
python | sqlalchemy__sqlalchemy | test/orm/test_relationship_criteria.py | {
"start": 61143,
"end": 84336
} | class ____(_Fixtures, testing.AssertsCompiledSQL):
__dialect__ = "default"
def _user_minus_edwood(self, User, Address):
return [
User(
addresses=[
Address(email_address="jack@bean.com", id=1, user_id=7)
],
id=7,
name="jack",
),
User(
addresses=[
Address(
email_address="ed@bettyboop.com",
id=3,
user_id=8,
),
Address(email_address="ed@lala.com", id=4, user_id=8),
],
id=8,
name="ed",
),
User(
addresses=[
Address(email_address="fred@fred.com", id=5, user_id=9)
],
id=9,
name="fred",
),
User(addresses=[], id=10, name="chuck"),
]
def _user_minus_edlala(self, User, Address):
return [
User(
addresses=[
Address(email_address="jack@bean.com", id=1, user_id=7)
],
id=7,
name="jack",
),
User(
addresses=[
Address(email_address="ed@wood.com", id=2, user_id=8),
Address(
email_address="ed@bettyboop.com",
id=3,
user_id=8,
),
],
id=8,
name="ed",
),
User(
addresses=[
Address(email_address="fred@fred.com", id=5, user_id=9)
],
id=9,
name="fred",
),
User(addresses=[], id=10, name="chuck"),
]
def test_joinedload_local_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db, future=True)
def go(value):
stmt = (
select(User)
.options(
joinedload(
User.addresses.and_(Address.email_address != value)
),
)
.order_by(User.id)
)
result = s.execute(stmt)
return result
for value in "ed@wood.com", "ed@lala.com":
s.close()
with self.sql_execution_asserter() as asserter:
result = go(value)
eq_(
result.scalars().unique().all(),
(
self._user_minus_edwood(*user_address_fixture)
if value == "ed@wood.com"
else self._user_minus_edlala(*user_address_fixture)
),
)
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name, addresses_1.id AS id_1, "
"addresses_1.user_id, addresses_1.email_address FROM "
"users LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id "
"AND addresses_1.email_address != :email_address_1 "
"ORDER BY users.id, addresses_1.id",
[{"email_address_1": value}],
),
)
@testing.combinations(
lambda r: r.scalar(),
lambda r: r.scalar_one(),
lambda r: r.scalar_one_or_none(),
argnames="get",
)
def test_joinedload_scalar(self, user_address_fixture, get):
User, Address = user_address_fixture
s = Session(testing.db, future=True)
stmt = (
select(User)
.options(joinedload(User.addresses))
.where(User.name == "jack")
)
r = s.execute(stmt).unique()
jack = get(r)
eq_(jack.name, "jack")
def test_selectinload_local_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db, future=True)
def go(value):
stmt = (
select(User)
.options(
selectinload(
User.addresses.and_(Address.email_address != value)
),
)
.order_by(User.id)
)
result = s.execute(stmt)
return result
for value in (
"ed@wood.com",
"ed@lala.com",
"ed@wood.com",
"ed@lala.com",
):
s.close()
with self.sql_execution_asserter() as asserter:
result = go(value)
eq_(
result.scalars().unique().all(),
(
self._user_minus_edwood(*user_address_fixture)
if value == "ed@wood.com"
else self._user_minus_edlala(*user_address_fixture)
),
)
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users ORDER BY users.id"
),
CompiledSQL(
"SELECT addresses.user_id, addresses.id, "
"addresses.email_address "
"FROM addresses "
"WHERE addresses.user_id IN "
"(__[POSTCOMPILE_primary_keys]) "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[
{
"primary_keys": [7, 8, 9, 10],
"email_address_1": value,
}
],
),
)
def test_selectinload_local_criteria_subquery(self, user_address_fixture):
"""test #7489"""
User, Address = user_address_fixture
s = Session(testing.db, future=True)
def go(value):
a1 = aliased(Address)
subq = select(a1.id).where(a1.email_address != value).subquery()
stmt = (
select(User)
.options(
selectinload(User.addresses.and_(Address.id == subq.c.id)),
)
.order_by(User.id)
)
result = s.execute(stmt)
return result
for value in (
"ed@wood.com",
"ed@lala.com",
"ed@wood.com",
"ed@lala.com",
):
s.close()
with self.sql_execution_asserter() as asserter:
result = go(value)
eq_(
result.scalars().unique().all(),
(
self._user_minus_edwood(*user_address_fixture)
if value == "ed@wood.com"
else self._user_minus_edlala(*user_address_fixture)
),
)
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users ORDER BY users.id"
),
CompiledSQL(
"SELECT addresses.user_id, addresses.id, "
"addresses.email_address "
# note the comma-separated FROM clause
"FROM addresses, (SELECT addresses_1.id AS id FROM "
"addresses AS addresses_1 "
"WHERE addresses_1.email_address != :email_address_1) "
"AS anon_1 WHERE addresses.user_id "
"IN (__[POSTCOMPILE_primary_keys]) "
"AND addresses.id = anon_1.id ORDER BY addresses.id",
[
{
"primary_keys": [7, 8, 9, 10],
"email_address_1": value,
}
],
),
)
@testing.combinations(
(selectinload,),
(subqueryload,),
(lazyload,),
(joinedload,),
argnames="opt",
)
@testing.variation("use_in", [True, False])
def test_opts_local_criteria_cachekey(
self, opt, user_address_fixture, use_in
):
"""test #11173"""
User, Address = user_address_fixture
s = Session(testing.db, future=True)
def go(value):
if use_in:
expr = ~Address.email_address.in_([value, "some_email"])
else:
expr = Address.email_address != value
stmt = (
select(User)
.options(
opt(User.addresses.and_(expr)),
)
.order_by(User.id)
)
result = s.execute(stmt)
return result
for value in (
"ed@wood.com",
"ed@lala.com",
"ed@wood.com",
"ed@lala.com",
):
s.close()
result = go(value)
eq_(
result.scalars().unique().all(),
(
self._user_minus_edwood(*user_address_fixture)
if value == "ed@wood.com"
else self._user_minus_edlala(*user_address_fixture)
),
)
@testing.combinations(
(joinedload, False),
(lazyload, True),
(subqueryload, False),
(selectinload, True),
argnames="opt,results_supported",
)
def test_loader_criteria_subquery_w_same_entity(
self, user_address_fixture, opt, results_supported
):
"""test #7491.
note this test also uses the not-quite-supported form of subquery
criteria introduced by #7489. where we also have to clone
the subquery linked only from a column criteria. this required
additional changes to the _annotate() method that is also
test here, which is why two of the loader strategies still fail;
we're just testing that there's no recursion overflow with this
very particular form.
"""
User, Address = user_address_fixture
s = Session(testing.db, future=True)
def go(value):
subq = (
select(Address.id)
.where(Address.email_address != value)
.subquery()
)
stmt = (
select(User)
.options(
# subquery here would need to be added to the FROM
# clause. this isn't quite supported and won't work
# right now with joinedoad() or subqueryload().
opt(User.addresses.and_(Address.id == subq.c.id)),
)
.order_by(User.id)
)
result = s.execute(stmt)
return result
for value in (
"ed@wood.com",
"ed@lala.com",
"ed@wood.com",
"ed@lala.com",
):
s.close()
if not results_supported:
# for joinedload and subqueryload, the query generated here
# is invalid right now; this is because it's already not
# quite a supported pattern to refer to a subquery-bound
# column in loader criteria. However, the main thing we want
# to prevent here is the recursion overflow, so make sure
# we get a DBAPI error at least indicating compilation
# succeeded.
with expect_raises(sa_exc.DBAPIError):
go(value).scalars().unique().all()
else:
result = go(value).scalars().unique().all()
eq_(
result,
(
self._user_minus_edwood(*user_address_fixture)
if value == "ed@wood.com"
else self._user_minus_edlala(*user_address_fixture)
),
)
@testing.combinations((True,), (False,), argnames="use_compiled_cache")
def test_selectinload_nested_criteria(
self, user_order_item_fixture, use_compiled_cache
):
User, Order, Item = user_order_item_fixture
if not use_compiled_cache:
s = Session(
testing.db.execution_options(compiled_cache=None), future=True
)
else:
s = Session(testing.db, future=True)
def go(order_description, item_description):
stmt = (
select(User)
.where(User.id == 7)
.options(
selectinload(
User.orders.and_(
Order.description == order_description
)
).joinedload(
Order.items.and_(Item.description == item_description)
),
)
)
return s.execute(stmt)
for order_description, item_description, oid, iid in (
("order 3", "item 3", 3, 3),
("order 3", "item 4", 3, 4),
("order 3", "item 4", 3, 4),
("order 5", "item 5", 5, 5),
("order 3", "item 3", 3, 3),
("order 5", "item 5", 5, 5),
):
s.close()
with self.sql_execution_asserter() as asserter:
result = go(order_description, item_description)
eq_(
result.scalars().unique().all(),
[User(id=7, orders=[Order(id=oid, items=[Item(id=iid)])])],
)
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users "
"WHERE users.id = :id_1",
[{"id_1": 7}],
),
CompiledSQL(
"SELECT orders.user_id, "
"orders.id, "
"orders.address_id, "
"orders.description, "
"orders.isopen, "
"items_1.id, "
"items_1.description "
"FROM orders LEFT OUTER JOIN "
"(order_items AS order_items_1 "
"JOIN items AS items_1 "
"ON items_1.id = order_items_1.item_id "
"AND items_1.description = :description_1) "
"ON orders.id = order_items_1.order_id "
"WHERE orders.user_id IN (__[POSTCOMPILE_primary_keys]) "
"AND orders.description = :description_2 "
"ORDER BY orders.id, items_1.id",
[
{
"description_1": item_description,
"primary_keys": [7],
"description_2": order_description,
}
],
),
)
def test_lazyload_local_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db, future=True)
def go(value):
s.close()
stmt = (
select(User)
.options(
lazyload(
User.addresses.and_(Address.email_address != value)
),
)
.order_by(User.id)
)
result = s.execute(stmt)
return result
for value in "ed@wood.com", "ed@lala.com":
with self.sql_execution_asserter() as asserter:
result = go(value)
eq_(
result.scalars().unique().all(),
(
self._user_minus_edwood(*user_address_fixture)
if value == "ed@wood.com"
else self._user_minus_edlala(*user_address_fixture)
),
)
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users ORDER BY users.id"
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 7, "email_address_1": value}],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 8, "email_address_1": value}],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 9, "email_address_1": value}],
),
CompiledSQL(
"SELECT addresses.id, "
"addresses.user_id, "
"addresses.email_address "
"FROM addresses WHERE :param_1 = addresses.user_id "
"AND addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"param_1": 10, "email_address_1": value}],
),
)
def test_subqueryload_local_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db, future=True)
def go(value):
s.close()
stmt = (
select(User)
.options(
subqueryload(
User.addresses.and_(Address.email_address != value)
),
)
.order_by(User.id)
)
result = s.execute(stmt)
return result
for value in "ed@wood.com", "ed@lala.com":
with self.sql_execution_asserter() as asserter:
result = go(value)
eq_(
result.scalars().unique().all(),
(
self._user_minus_edwood(*user_address_fixture)
if value == "ed@wood.com"
else self._user_minus_edlala(*user_address_fixture)
),
)
asserter.assert_(
CompiledSQL(
"SELECT users.id, users.name FROM users ORDER BY users.id"
),
CompiledSQL(
"SELECT addresses.id AS addresses_id, addresses.user_id "
"AS addresses_user_id, addresses.email_address "
"AS addresses_email_address, anon_1.users_id "
"AS anon_1_users_id FROM (SELECT users.id AS users_id "
"FROM users) AS anon_1 "
"JOIN addresses ON anon_1.users_id = "
"addresses.user_id AND "
"addresses.email_address != :email_address_1 "
"ORDER BY addresses.id",
[{"email_address_1": value}],
),
)
def test_query_join_local_criteria(self, user_address_fixture):
User, Address = user_address_fixture
s = Session(testing.db)
q = s.query(User).join(
User.addresses.and_(Address.email_address != "email")
)
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"AND addresses.email_address != :email_address_1",
)
def test_select_join_local_criteria(self, user_address_fixture):
User, Address = user_address_fixture
stmt = select(User).join(
User.addresses.and_(Address.email_address != "email")
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users JOIN addresses "
"ON users.id = addresses.user_id "
"AND addresses.email_address != :email_address_1",
)
def test_select_joinm2m_local_criteria(self, order_item_fixture):
Order, Item = order_item_fixture
stmt = select(Order).join(
Order.items.and_(Item.description != "description")
)
self.assert_compile(
stmt,
"SELECT orders.id, orders.user_id, orders.address_id, "
"orders.description, orders.isopen "
"FROM orders JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"AND items.description != :description_1",
)
def test_select_joinm2m_aliased_local_criteria(self, order_item_fixture):
Order, Item = order_item_fixture
i1 = aliased(Item)
stmt = select(Order).join(
Order.items.of_type(i1).and_(i1.description != "description")
)
self.assert_compile(
stmt,
"SELECT orders.id, orders.user_id, orders.address_id, "
"orders.description, orders.isopen "
"FROM orders JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id "
"JOIN items AS items_1 ON items_1.id = order_items_1.item_id "
"AND items_1.description != :description_1",
)
def test_use_secondary_table_in_criteria(self, order_item_fixture):
"""test #11010 , regression caused by #9779"""
Order, Item = order_item_fixture
order_items = self.tables.order_items
stmt = select(Order).join(
Order.items.and_(
order_items.c.item_id > 1, Item.description != "description"
)
)
self.assert_compile(
stmt,
"SELECT orders.id, orders.user_id, orders.address_id, "
"orders.description, orders.isopen FROM orders JOIN order_items "
"AS order_items_1 ON orders.id = order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"AND order_items_1.item_id > :item_id_1 "
"AND items.description != :description_1",
)
| RelationshipCriteriaTest |
python | getsentry__sentry | tests/snuba/api/endpoints/test_group_details.py | {
"start": 881,
"end": 15186
} | class ____(APITestCase, SnubaTestCase):
def test_multiple_environments(self) -> None:
group = self.create_group()
self.login_as(user=self.user)
environment = Environment.get_or_create(group.project, "production")
environment2 = Environment.get_or_create(group.project, "staging")
url = f"/api/0/issues/{group.id}/"
with mock.patch(
"sentry.issues.endpoints.group_details.tsdb.backend.get_range",
side_effect=tsdb.backend.get_range,
) as get_range:
response = self.client.get(
f"{url}?environment=production&environment=staging", format="json"
)
assert response.status_code == 200
assert get_range.call_count == 2
for args, kwargs in get_range.call_args_list:
assert kwargs["environment_ids"] == [environment.id, environment2.id]
response = self.client.get(f"{url}?environment=invalid", format="json")
assert response.status_code == 404
def test_with_first_last_release(self) -> None:
self.login_as(user=self.user)
first_release = {
"firstEvent": before_now(minutes=3),
"lastEvent": before_now(minutes=2, seconds=30),
}
last_release = {
"firstEvent": before_now(minutes=1, seconds=30),
"lastEvent": before_now(minutes=1),
}
for timestamp in first_release.values():
self.store_event(
data={"release": "1.0", "timestamp": timestamp.isoformat()},
project_id=self.project.id,
)
self.store_event(
data={"release": "1.1", "timestamp": before_now(minutes=2).isoformat()},
project_id=self.project.id,
)
event = [
self.store_event(
data={"release": "1.0a", "timestamp": timestamp.isoformat()},
project_id=self.project.id,
)
for timestamp in last_release.values()
][-1]
group = event.group
url = f"/api/0/issues/{group.id}/"
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(group.id)
release = response.data["firstRelease"]
assert release["version"] == "1.0"
for event, timestamp in first_release.items():
assert release[event].ctime() == timestamp.ctime()
release = response.data["lastRelease"]
assert release["version"] == "1.0a"
for event, timestamp in last_release.items():
assert release[event].ctime() == timestamp.ctime()
def test_first_last_only_one_tagstore(self) -> None:
self.login_as(user=self.user)
event = self.store_event(
data={"release": "1.0", "timestamp": before_now(days=3).isoformat()},
project_id=self.project.id,
)
self.store_event(
data={"release": "1.1", "timestamp": before_now(minutes=3).isoformat()},
project_id=self.project.id,
)
group = event.group
url = f"/api/0/issues/{group.id}/"
with mock.patch("sentry.tagstore.backend.get_release_tags") as get_release_tags:
response = self.client.get(url, format="json")
assert response.status_code == 200
assert get_release_tags.call_count == 1
def test_first_release_only(self) -> None:
self.login_as(user=self.user)
first_event = before_now(days=3)
self.store_event(
data={"release": "1.0", "timestamp": first_event.isoformat()},
project_id=self.project.id,
)
event = self.store_event(
data={"release": "1.1", "timestamp": before_now(days=1).isoformat()},
project_id=self.project.id,
)
# Forcibly remove one of the releases
Release.objects.get(version="1.1").delete()
group = event.group
url = f"/api/0/issues/{group.id}/"
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["firstRelease"]["version"] == "1.0"
# only one event
assert (
response.data["firstRelease"]["firstEvent"]
== response.data["firstRelease"]["lastEvent"]
)
assert response.data["firstRelease"]["firstEvent"].ctime() == first_event.ctime()
assert response.data["lastRelease"] is None
def test_group_expand_inbox(self) -> None:
self.login_as(user=self.user)
event = self.store_event(
data={"timestamp": before_now(minutes=3).isoformat()},
project_id=self.project.id,
)
group = event.group
add_group_to_inbox(group, GroupInboxReason.NEW)
url = f"/api/0/issues/{group.id}/?expand=inbox"
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["inbox"] is not None
assert response.data["inbox"]["reason"] == GroupInboxReason.NEW.value
assert response.data["inbox"]["reason_details"] is None
remove_group_from_inbox(event.group)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["inbox"] is None
def test_group_expand_owners(self) -> None:
self.login_as(user=self.user)
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
group = event.group
url = f"/api/0/issues/{group.id}/?expand=owners"
self.login_as(user=self.user)
# Test with no owner
response = self.client.get(url, format="json")
assert response.status_code == 200
assert response.data["owners"] is None
# Test with owners
GroupOwner.objects.create(
group=event.group,
project=event.project,
organization=event.project.organization,
type=GroupOwnerType.SUSPECT_COMMIT.value,
user_id=self.user.id,
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["owners"] is not None
assert len(response.data["owners"]) == 1
assert response.data["owners"][0]["owner"] == f"user:{self.user.id}"
assert response.data["owners"][0]["type"] == GROUP_OWNER_TYPE[GroupOwnerType.SUSPECT_COMMIT]
def test_group_expand_forecasts(self) -> None:
self.login_as(user=self.user)
event = self.store_event(
data={"timestamp": before_now(seconds=500).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
group = event.group
generate_and_save_forecasts([group])
url = f"/api/0/issues/{group.id}/?expand=forecast"
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["forecast"] is not None
assert response.data["forecast"]["data"] is not None
assert response.data["forecast"]["date_added"] is not None
def test_group_get_priority(self) -> None:
self.login_as(user=self.user)
group = self.create_group(
project=self.project,
status=GroupStatus.IGNORED,
priority=PriorityLevel.LOW,
)
url = f"/api/0/issues/{group.id}/"
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["priority"] == "low"
assert response.data["priorityLockedAt"] is None
def test_group_post_priority(self) -> None:
self.login_as(user=self.user)
group = self.create_group(
project=self.project,
status=GroupStatus.IGNORED,
priority=PriorityLevel.LOW,
)
url = f"/api/0/issues/{group.id}/"
get_response_before = self.client.get(url, format="json")
assert get_response_before.status_code == 200, get_response_before.content
assert get_response_before.data["priority"] == "low"
response = self.client.put(url, {"priority": "high"}, format="json")
assert response.status_code == 200, response.content
assert response.data["priority"] == "high"
act_for_group = Activity.objects.get_activities_for_group(group=group, num=100)
assert len(act_for_group) == 2
assert act_for_group[0].type == ActivityType.SET_PRIORITY.value
assert act_for_group[-1].type == ActivityType.FIRST_SEEN.value
assert act_for_group[0].user_id == self.user.id
assert act_for_group[0].data["priority"] == "high"
get_response_after = self.client.get(url, format="json")
assert get_response_after.status_code == 200, get_response_after.content
assert get_response_after.data["priority"] == "high"
assert get_response_after.data["priorityLockedAt"] is not None
def test_assigned_to_unknown(self) -> None:
self.login_as(user=self.user)
event = self.store_event(
data={"timestamp": before_now(minutes=3).isoformat()},
project_id=self.project.id,
)
group = event.group
url = f"/api/0/issues/{group.id}/"
response = self.client.put(
url, {"assignedTo": "admin@localhost", "status": "unresolved"}, format="json"
)
assert response.status_code == 200
response = self.client.put(
url, {"assignedTo": "user@doesnotexist.com", "status": "unresolved"}, format="json"
)
assert response.status_code == 400
assert response.data == {
"assignedTo": [
ErrorDetail(
string="Could not parse actor. Format should be `type:id` where type is `team` or `user`.",
code="invalid",
)
]
}
def test_collapse_stats_does_not_work(self) -> None:
"""
'collapse' param should hide the stats data and not return anything in the response, but the impl
doesn't seem to respect this param.
include this test here in-case the endpoint behavior changes in the future.
"""
self.login_as(user=self.user)
event = self.store_event(
data={"timestamp": before_now(minutes=3).isoformat()},
project_id=self.project.id,
)
group = event.group
url = f"/api/0/issues/{group.id}/"
response = self.client.get(url, {"collapse": ["stats"]}, format="json")
assert response.status_code == 200
assert int(response.data["id"]) == event.group.id
assert response.data["stats"] # key shouldn't be present
assert response.data["count"] is not None # key shouldn't be present
assert response.data["userCount"] is not None # key shouldn't be present
assert response.data["firstSeen"] is not None # key shouldn't be present
assert response.data["lastSeen"] is not None # key shouldn't be present
def test_issue_type_category(self) -> None:
"""Test that the issue's type and category is returned in the results"""
self.login_as(user=self.user)
event = self.store_event(
data={"timestamp": before_now(minutes=3).isoformat()},
project_id=self.project.id,
)
url = f"/api/0/issues/{event.group.id}/"
response = self.client.get(url, format="json")
assert response.status_code == 200
assert int(response.data["id"]) == event.group.id
assert response.data["issueType"] == "error"
assert response.data["issueCategory"] == "error"
def test_delete_error_issue(self) -> Any:
"""Test that a user cannot delete a error issue"""
self.login_as(user=self.user)
group = self.create_group(status=GroupStatus.RESOLVED, project=self.project)
url = f"/api/0/issues/{group.id}/"
with patch(
"sentry.api.helpers.group_index.delete.delete_groups_for_project.apply_async"
) as mock_apply_async:
response = self.client.delete(url, format="json")
mock_apply_async.assert_called_once()
assert response.status_code == 202
# Since the task has not executed yet the group is pending deletion
assert Group.objects.get(id=group.id).status == GroupStatus.PENDING_DELETION
# Undo some of what the previous endpoint call did
group.update(status=GroupStatus.RESOLVED)
with self.tasks():
response = self.client.delete(url, format="json")
assert response.status_code == 202
assert not Group.objects.filter(id=group.id).exists()
def test_delete_issue_platform_issue(self) -> Any:
"""Test that a user cannot delete an issue if issue platform deletion is not allowed"""
self.login_as(user=self.user)
group = self.create_group(
status=GroupStatus.RESOLVED,
project=self.project,
type=PerformanceSlowDBQueryGroupType.type_id,
)
url = f"/api/0/issues/{group.id}/"
with patch(
"sentry.api.helpers.group_index.delete.delete_groups_for_project.apply_async"
) as mock_apply_async:
response = self.client.delete(url, format="json")
assert response.status_code == 202
# Since the task has not executed yet the group is pending deletion
assert Group.objects.get(id=group.id).status == GroupStatus.PENDING_DELETION
mock_apply_async.assert_called_once()
# Undo some of what the previous endpoint call did
group.update(status=GroupStatus.RESOLVED)
with self.tasks():
response = self.client.delete(url, format="json")
assert response.status_code == 202
# Now check that the group doesn't exist
assert not Group.objects.filter(id=group.id).exists()
| GroupDetailsTest |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin_ini/plugin_fail.py | {
"start": 5141,
"end": 5427
} | class ____(BaseModel):
x: str = Field(..., alias=x_alias)
z: int
model_config = ConfigDict(validate_by_name=True)
DynamicAliasModel2(y='y', z=1)
# MYPY: error: Missing named argument "x" for "DynamicAliasModel2" [call-arg]
DynamicAliasModel2(x='y', z=1)
| DynamicAliasModel2 |
python | sqlalchemy__sqlalchemy | test/orm/test_deprecations.py | {
"start": 36220,
"end": 40490
} | class ____(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_values(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
with testing.expect_deprecated(r"Query.values?\(\) is deprecated"):
assert list(sess.query(User).values()) == list()
q = sess.query(User)
with testing.expect_deprecated(r"Query.values?\(\) is deprecated"):
q2 = q.order_by(User.id).values(
User.name, User.name + " " + cast(User.id, String(50))
)
eq_(
list(q2),
[
("jack", "jack 7"),
("ed", "ed 8"),
("fred", "fred 9"),
("chuck", "chuck 10"),
],
)
with testing.expect_deprecated(r"Query.values?\(\) is deprecated"):
q2 = (
q.join(User.addresses)
.filter(User.name.like("%e%"))
.order_by(User.id, Address.id)
.values(User.name, Address.email_address)
)
eq_(
list(q2),
[
("ed", "ed@wood.com"),
("ed", "ed@bettyboop.com"),
("ed", "ed@lala.com"),
("fred", "fred@fred.com"),
],
)
with testing.expect_deprecated(r"Query.values?\(\) is deprecated"):
q2 = (
q.join(User.addresses)
.filter(User.name.like("%e%"))
.order_by(desc(Address.email_address))
.slice(1, 3)
.values(User.name, Address.email_address)
)
eq_(list(q2), [("ed", "ed@wood.com"), ("ed", "ed@lala.com")])
adalias = aliased(Address)
with testing.expect_deprecated(r"Query.values?\(\) is deprecated"):
q2 = (
q.join(adalias, User.addresses)
.filter(User.name.like("%e%"))
.order_by(adalias.email_address)
.values(User.name, adalias.email_address)
)
eq_(
list(q2),
[
("ed", "ed@bettyboop.com"),
("ed", "ed@lala.com"),
("ed", "ed@wood.com"),
("fred", "fred@fred.com"),
],
)
with testing.expect_deprecated(r"Query.values?\(\) is deprecated"):
q2 = q.values(func.count(User.name))
assert next(q2) == (4,)
def test_values_specific_order_by(self):
User = self.classes.User
sess = fixture_session()
with testing.expect_deprecated(r"Query.values?\(\) is deprecated"):
assert list(sess.query(User).values()) == list()
@testing.fails_on("mssql", "FIXME: unknown")
@testing.fails_on(
"oracle", "Oracle doesn't support boolean expressions as columns"
)
@testing.fails_on(
"postgresql+pg8000",
"pg8000 parses the SQL itself before passing on "
"to PG, doesn't parse this",
)
@testing.fails_on(
"postgresql+asyncpg",
"Asyncpg uses preprated statements that are not compatible with how "
"sqlalchemy passes the query. Fails with "
'ERROR: column "users.name" must appear in the GROUP BY clause'
" or be used in an aggregate function",
)
def test_values_with_boolean_selects(self):
"""Tests a values clause that works with select boolean
evaluations"""
User = self.classes.User
sess = fixture_session()
q = sess.query(User)
with testing.expect_deprecated(r"Query.values?\(\) is deprecated"):
q2 = (
q.group_by(User.name.like("%j%"))
.order_by(desc(User.name.like("%j%")))
.values(
User.name.like("%j%"), func.count(User.name.like("%j%"))
)
)
eq_(list(q2), [(True, 1), (False, 3)])
with testing.expect_deprecated(r"Query.values?\(\) is deprecated"):
q2 = q.order_by(desc(User.name.like("%j%"))).values(
User.name.like("%j%")
)
eq_(list(q2), [(True,), (False,), (False,), (False,)])
| MixedEntitiesTest |
python | patrick-kidger__equinox | equinox/_module/_prebuilt.py | {
"start": 339,
"end": 1182
} | class ____(Module):
"""Just like a normal Python bound method... except that this one is a PyTree!
This stores `__self__` as a subnode.
"""
__func__: types.FunctionType = field(static=True)
__self__: Module
def __post_init__(self):
for field_name in WRAPPER_FIELD_NAMES:
try:
value = getattr(self.__func__, field_name)
except AttributeError:
pass
else:
setattr(self, field_name, value)
def __call__(self, *args, **kwargs):
__tracebackhide__ = True
return self.__func__(self.__self__, *args, **kwargs)
@property
def __wrapped__(self):
return self.__func__.__get__(self.__self__, type(self.__self__)) # pyright: ignore[reportAttributeAccessIssue]
_Return = TypeVar("_Return")
| BoundMethod |
python | django-guardian__django-guardian | guardian/admin.py | {
"start": 20452,
"end": 21035
} | class ____(forms.Form):
group = forms.CharField(max_length=80, error_messages={"does_not_exist": _("This group does not exist")})
def clean_group(self):
"""Returns `Group` instance based on the given group name."""
name = self.cleaned_data["group"]
GroupModel = get_group_obj_perms_model().group.field.related_model
try:
group = GroupModel.objects.get(name=name)
return group
except GroupModel.DoesNotExist:
raise forms.ValidationError(self.fields["group"].error_messages["does_not_exist"])
| GroupManage |
python | automl__auto-sklearn | test/test_evaluation/test_train_evaluator.py | {
"start": 108447,
"end": 131814
} | class ____(unittest.TestCase):
def setUp(self):
self.queue = multiprocessing.Queue()
self.configuration = get_configuration_space(
DummyDatamanager()
).get_default_configuration()
self.data = get_multiclass_classification_datamanager()
self.tmp_dir = os.path.join(
os.path.dirname(__file__), ".test_holdout_functions"
)
self.n = len(self.data.data["Y_train"])
self.y = self.data.data["Y_train"].flatten()
tmp_dir_name = self.id()
self.ev_path = os.path.join(this_directory, ".tmp_evaluations", tmp_dir_name)
if os.path.exists(self.ev_path):
shutil.rmtree(self.ev_path)
os.makedirs(self.ev_path, exist_ok=False)
self.backend = unittest.mock.Mock()
self.backend.temporary_directory = tempfile.gettempdir()
self.backend.get_model_dir.return_value = self.ev_path
self.backend.get_cv_model_dir.return_value = self.ev_path
dummy_model_files = [os.path.join(self.ev_path, str(n)) for n in range(100)]
dummy_pred_files = [os.path.join(self.ev_path, str(n)) for n in range(100, 200)]
dummy_cv_model_files = [
os.path.join(self.ev_path, str(n)) for n in range(200, 300)
]
self.backend.get_model_path.side_effect = dummy_model_files
self.backend.get_cv_model_path.side_effect = dummy_cv_model_files
self.backend.get_prediction_output_path.side_effect = dummy_pred_files
self.backend.load_datamanager.return_value = self.data
self.dataset_name = json.dumps({"task_id": "test"})
self.port = logging.handlers.DEFAULT_TCP_LOGGING_PORT
def tearDown(self):
if os.path.exists(self.ev_path):
os.rmdir(self.ev_path)
def test_eval_holdout(self):
eval_holdout(
queue=self.queue,
port=self.port,
config=self.configuration,
backend=self.backend,
resampling_strategy="holdout",
resampling_strategy_args=None,
seed=1,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
metrics=[accuracy],
additional_components=dict(),
)
info = read_queue(self.queue)
self.assertEqual(len(info), 1)
self.assertAlmostEqual(info[0]["loss"], 0.030303030303030276, places=3)
self.assertEqual(info[0]["status"], StatusType.SUCCESS)
self.assertNotIn("bac_metric", info[0]["additional_run_info"])
def test_eval_holdout_multi_objective(self):
metrics = {
accuracy: 0.030303030303030276,
balanced_accuracy: 0.033333333333333326,
}
eval_holdout(
queue=self.queue,
port=self.port,
config=self.configuration,
backend=self.backend,
resampling_strategy="holdout",
resampling_strategy_args=None,
seed=test.conftest.DEFAULT_SEED,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
metrics=list(metrics.keys()),
additional_components=dict(),
)
info = read_queue(self.queue)
self.assertEqual(len(info), 1)
for metric, loss in metrics.items():
self.assertAlmostEqual(info[0]["loss"][metric.name], loss)
self.assertEqual(info[0]["status"], StatusType.SUCCESS)
self.assertNotIn("bac_metric", info[0]["additional_run_info"])
def test_eval_holdout_all_loss_functions(self):
eval_holdout(
queue=self.queue,
port=self.port,
config=self.configuration,
backend=self.backend,
resampling_strategy="holdout",
resampling_strategy_args=None,
seed=1,
num_run=1,
scoring_functions=SCORER_LIST,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
metrics=[accuracy],
additional_components=dict(),
)
return_value = read_queue(self.queue)
self.assertEqual(len(return_value), 1)
fixture = {
"accuracy": 0.030303030303030276,
"balanced_accuracy": 0.033333333333333326,
"f1_macro": 0.032036613272311221,
"f1_micro": 0.030303030303030276,
"f1_weighted": 0.030441716940572849,
"log_loss": 0.06376745642134637,
"precision_macro": 0.02777777777777779,
"precision_micro": 0.030303030303030276,
"precision_weighted": 0.027777777777777901,
"recall_macro": 0.033333333333333326,
"recall_micro": 0.030303030303030276,
"recall_weighted": 0.030303030303030276,
"num_run": 1,
"test_loss": 0.04,
"train_loss": 0.0,
}
additional_run_info = return_value[0]["additional_run_info"]
for key, value in fixture.items():
self.assertAlmostEqual(additional_run_info[key], fixture[key], msg=key)
self.assertIn("duration", additional_run_info)
self.assertEqual(
len(additional_run_info),
len(fixture) + 1,
msg=sorted(additional_run_info.items()),
)
self.assertAlmostEqual(return_value[0]["loss"], 0.030303030303030276, places=3)
self.assertEqual(return_value[0]["status"], StatusType.SUCCESS)
def test_eval_holdout_iterative_fit_no_timeout(self):
eval_iterative_holdout(
queue=self.queue,
port=self.port,
config=self.configuration,
backend=self.backend,
resampling_strategy="holdout",
resampling_strategy_args=None,
seed=1,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
metrics=[accuracy],
additional_components=dict(),
)
return_value = read_queue(self.queue)
self.assertEqual(len(return_value), 9)
self.assertAlmostEqual(return_value[-1]["loss"], 0.030303030303030276)
self.assertEqual(return_value[0]["status"], StatusType.DONOTADVANCE)
self.assertEqual(return_value[-1]["status"], StatusType.SUCCESS)
def test_eval_holdout_iterative_fit_no_timeout_multi_objective(self):
metrics = {
accuracy: 0.030303030303030276,
balanced_accuracy: 0.033333333333333326,
}
eval_iterative_holdout(
queue=self.queue,
port=self.port,
config=self.configuration,
backend=self.backend,
resampling_strategy="holdout",
resampling_strategy_args=None,
seed=1,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
metrics=list(metrics.keys()),
additional_components=dict(),
)
return_value = read_queue(self.queue)
self.assertEqual(len(return_value), 9)
for metric, loss in metrics.items():
self.assertAlmostEqual(return_value[-1]["loss"][metric.name], loss)
self.assertEqual(return_value[0]["status"], StatusType.DONOTADVANCE)
self.assertEqual(return_value[-1]["status"], StatusType.SUCCESS)
def test_eval_holdout_budget_iterations(self):
eval_holdout(
queue=self.queue,
port=self.port,
config=self.configuration,
backend=self.backend,
resampling_strategy="holdout",
resampling_strategy_args=None,
seed=1,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
metrics=[accuracy],
budget=1,
budget_type="iterations",
additional_components=dict(),
)
info = read_queue(self.queue)
self.assertEqual(len(info), 1)
self.assertAlmostEqual(info[0]["loss"], 0.06060606060606055, places=3)
self.assertEqual(info[0]["status"], StatusType.SUCCESS)
self.assertNotIn("bac_metric", info[0]["additional_run_info"])
def test_eval_holdout_budget_iterations_multi_objective(self):
metrics = {
accuracy: 0.06060606060606055,
balanced_accuracy: 0.06666666666666676,
}
eval_holdout(
queue=self.queue,
port=self.port,
config=self.configuration,
backend=self.backend,
resampling_strategy="holdout",
resampling_strategy_args=None,
seed=1,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
metrics=list(metrics.keys()),
budget=1, # Not iterative, but only for 1% of the budget
budget_type="iterations",
additional_components=dict(),
)
info = read_queue(self.queue)
self.assertEqual(len(info), 1)
for metric, loss in metrics.items():
self.assertAlmostEqual(info[0]["loss"][metric.name], loss)
self.assertEqual(info[0]["status"], StatusType.SUCCESS)
self.assertNotIn("bac_metric", info[0]["additional_run_info"])
def test_eval_holdout_budget_iterations_converged_multi_objective(self):
configuration = get_configuration_space(
exclude={"classifier": ["random_forest", "liblinear_svc"]},
datamanager=DummyDatamanager(),
).get_default_configuration()
eval_holdout(
queue=self.queue,
port=self.port,
config=configuration,
backend=self.backend,
resampling_strategy="holdout",
resampling_strategy_args=None,
seed=1,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude={"classifier": ["random_forest", "liblinear_svc"]},
disable_file_output=False,
instance=self.dataset_name,
metrics=[accuracy],
budget=80,
budget_type="iterations",
additional_components=dict(),
)
info = read_queue(self.queue)
self.assertEqual(len(info), 1)
self.assertAlmostEqual(info[0]["loss"], 0.18181818181818177, places=3)
self.assertEqual(info[0]["status"], StatusType.DONOTADVANCE)
self.assertNotIn("bac_metric", info[0]["additional_run_info"])
def test_eval_holdout_budget_iterations_converged(self):
metrics = {
accuracy: 0.18181818181818177,
balanced_accuracy: 0.18787878787878787,
}
configuration = get_configuration_space(
exclude={"classifier": ["random_forest", "liblinear_svc"]},
datamanager=DummyDatamanager(),
).get_default_configuration()
eval_holdout(
queue=self.queue,
port=self.port,
config=configuration,
backend=self.backend,
resampling_strategy="holdout",
resampling_strategy_args=None,
seed=1,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude={"classifier": ["random_forest", "liblinear_svc"]},
disable_file_output=False,
instance=self.dataset_name,
metrics=list(metrics.keys()),
budget=80,
budget_type="iterations",
additional_components=dict(),
)
info = read_queue(self.queue)
self.assertEqual(len(info), 1)
for metric, loss in metrics.items():
self.assertAlmostEqual(info[0]["loss"][metric.name], loss)
self.assertEqual(info[0]["status"], StatusType.DONOTADVANCE)
self.assertNotIn("bac_metric", info[0]["additional_run_info"])
def test_eval_holdout_budget_subsample(self):
eval_holdout(
queue=self.queue,
port=self.port,
config=self.configuration,
backend=self.backend,
resampling_strategy="holdout",
resampling_strategy_args=None,
seed=1,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
metrics=[accuracy],
budget=30,
budget_type="subsample",
additional_components=dict(),
)
info = read_queue(self.queue)
self.assertEqual(len(info), 1)
self.assertAlmostEqual(info[0]["loss"], 0.0)
self.assertEqual(info[0]["status"], StatusType.SUCCESS)
self.assertNotIn("bac_metric", info[0]["additional_run_info"])
def test_eval_holdout_budget_subsample_multi_objective(self):
metrics = {
accuracy: 0.0,
f1_macro: 0.0,
}
eval_holdout(
queue=self.queue,
port=self.port,
config=self.configuration,
backend=self.backend,
resampling_strategy="holdout",
resampling_strategy_args=None,
seed=1,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
metrics=list(metrics.keys()),
budget=30,
budget_type="subsample",
additional_components=dict(),
)
info = read_queue(self.queue)
self.assertEqual(len(info), 1)
for metric, loss in metrics.items():
self.assertAlmostEqual(info[0]["loss"][metric.name], loss)
self.assertEqual(info[0]["status"], StatusType.SUCCESS)
self.assertNotIn("bac_metric", info[0]["additional_run_info"])
def test_eval_holdout_budget_mixed_iterations(self):
print(self.configuration)
eval_holdout(
queue=self.queue,
port=self.port,
config=self.configuration,
backend=self.backend,
resampling_strategy="holdout",
resampling_strategy_args=None,
seed=1,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
metrics=[accuracy],
budget=1,
budget_type="mixed",
additional_components=dict(),
)
info = read_queue(self.queue)
self.assertEqual(len(info), 1)
self.assertAlmostEqual(info[0]["loss"], 0.06060606060606055)
def test_eval_holdout_budget_mixed_subsample(self):
configuration = get_configuration_space(
exclude={"classifier": ["random_forest"]},
datamanager=DummyDatamanager(),
).get_default_configuration()
self.assertEqual(configuration["classifier:__choice__"], "liblinear_svc")
eval_holdout(
queue=self.queue,
port=self.port,
config=configuration,
backend=self.backend,
resampling_strategy="holdout",
resampling_strategy_args=None,
seed=1,
num_run=1,
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude={"classifier": ["random_forest"]},
disable_file_output=False,
instance=self.dataset_name,
metrics=[accuracy],
budget=40,
budget_type="mixed",
additional_components=dict(),
)
info = read_queue(self.queue)
self.assertEqual(len(info), 1)
self.assertAlmostEqual(info[0]["loss"], 0.06060606060606055)
self.assertEqual(info[0]["status"], StatusType.SUCCESS)
self.assertNotIn("bac_metric", info[0]["additional_run_info"])
def test_eval_cv(self):
eval_cv(
queue=self.queue,
port=self.port,
config=self.configuration,
backend=self.backend,
seed=1,
num_run=1,
resampling_strategy="cv",
resampling_strategy_args={"folds": 3},
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
metrics=[accuracy],
additional_components=dict(),
)
return_value = read_queue(self.queue)
self.assertEqual(len(return_value), 1)
self.assertAlmostEqual(return_value[0]["loss"], 0.04999999999999997)
self.assertEqual(return_value[0]["status"], StatusType.SUCCESS)
self.assertNotIn("bac_metric", return_value[0]["additional_run_info"])
def test_eval_cv_all_loss_functions(self):
eval_cv(
queue=self.queue,
port=self.port,
config=self.configuration,
backend=self.backend,
seed=1,
num_run=1,
resampling_strategy="cv",
resampling_strategy_args={"folds": 3},
scoring_functions=SCORER_LIST,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
instance=self.dataset_name,
metrics=[accuracy],
additional_components=dict(),
)
return_value = read_queue(self.queue)
self.assertEqual(len(return_value), 1)
fixture = {
"accuracy": 0.04999999999999997,
"balanced_accuracy": 0.05130303030303027,
"f1_macro": 0.052793650793650775,
"f1_micro": 0.04999999999999997,
"f1_weighted": 0.050090909090909096,
"log_loss": 0.12108563414774837,
"precision_macro": 0.04963636363636359,
"precision_micro": 0.04999999999999997,
"precision_weighted": 0.045757575757575664,
"recall_macro": 0.05130303030303027,
"recall_micro": 0.04999999999999997,
"recall_weighted": 0.04999999999999997,
"num_run": 1,
"test_loss": 0.04,
"train_loss": 0.0,
}
additional_run_info = return_value[0]["additional_run_info"]
for key, value in fixture.items():
self.assertAlmostEqual(additional_run_info[key], fixture[key], msg=key)
self.assertIn("duration", additional_run_info)
self.assertEqual(
len(additional_run_info),
len(fixture) + 1,
msg=sorted(additional_run_info.items()),
)
self.assertAlmostEqual(return_value[0]["loss"], 0.04999999999999997)
self.assertEqual(return_value[0]["status"], StatusType.SUCCESS)
# def test_eval_cv_on_subset(self):
# backend_api = backend.create(self.tmp_dir, self.tmp_dir)
# eval_cv(queue=self.queue, config=self.configuration, data=self.data,
# backend=backend_api, seed=1, num_run=1, folds=5, subsample=45,
# with_predictions=True, scoring_functions=None,
# output_y_hat_optimization=True, include=None, exclude=None,
# disable_file_output=False)
# info = self.queue.get()
# self.assertAlmostEqual(info[1], 0.063004032258064502)
# self.assertEqual(info[2], 1)
def test_eval_partial_cv(self):
results = [
0.050000000000000044,
0.0,
0.09999999999999998,
0.09999999999999998,
0.050000000000000044,
]
for fold in range(5):
instance = json.dumps({"task_id": "data", "fold": fold})
eval_partial_cv(
port=self.port,
queue=self.queue,
config=self.configuration,
backend=self.backend,
seed=1,
num_run=1,
instance=instance,
resampling_strategy="partial-cv",
resampling_strategy_args={"folds": 5},
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
metrics=[accuracy],
additional_components=dict(),
)
return_value = read_queue(self.queue)
self.assertEqual(len(return_value), 1)
self.assertAlmostEqual(return_value[0]["loss"], results[fold])
self.assertEqual(return_value[0]["status"], StatusType.SUCCESS)
def test_eval_partial_cv_multi_objective(self):
metrics = {
accuracy: [
0.050000000000000044,
0.0,
0.09999999999999998,
0.09999999999999998,
0.050000000000000044,
],
balanced_accuracy: [
0.04761904761904756,
0.0,
0.10317460317460314,
0.11111111111111116,
0.05555555555555547,
],
}
for fold in range(5):
instance = json.dumps({"task_id": "data", "fold": fold})
eval_partial_cv(
port=self.port,
queue=self.queue,
config=self.configuration,
backend=self.backend,
seed=1,
num_run=1,
instance=instance,
resampling_strategy="partial-cv",
resampling_strategy_args={"folds": 5},
scoring_functions=None,
output_y_hat_optimization=True,
include=None,
exclude=None,
disable_file_output=False,
metrics=list(metrics.keys()),
additional_components=dict(),
)
return_value = read_queue(self.queue)
self.assertEqual(len(return_value), 1)
for metric, loss in metrics.items():
self.assertAlmostEqual(return_value[0]["loss"][metric.name], loss[fold])
self.assertEqual(return_value[0]["status"], StatusType.SUCCESS)
| FunctionsTest |
python | facebookresearch__faiss | benchs/bench_fw/descriptors.py | {
"start": 1551,
"end": 5543
} | class ____:
# namespace possible values:
# 1. a hive namespace
# 2. 'std_t', 'std_d', 'std_q' for the standard datasets
# via faiss.contrib.datasets.dataset_from_name()
# t - training, d - database, q - queries
# eg. "std_t"
# 3. 'syn' for synthetic data
# 4. None for local files
namespace: Optional[str] = None
# tablename possible values, corresponding to the
# namespace value above:
# 1. a hive table name
# 2. name of the standard dataset as recognized
# by faiss.contrib.datasets.dataset_from_name()
# eg. "bigann1M"
# 3. d_seed, eg. 128_1234 for 128 dimensional vectors
# with seed 1234
# 4. a local file name (relative to benchmark_io.path)
tablename: Optional[str] = None
# partition names and values for hive
# eg. ["ds=2021-09-01"]
partitions: Optional[List[str]] = None
# number of vectors to load from the dataset
num_vectors: Optional[int] = None
embedding_column: Optional[str] = None
# only when the embedding column is a map
embedding_column_key: Optional[Any] = None
embedding_id_column: Optional[str] = None
# only used when previous_assignment_table is set
# this represents the centroid id that the embedding was mapped to
# in a previous clustering job
centroid_id_column: Optional[str] = None
# filters on the dataset where each filter is a
# string rep of a filter expression
filters: Optional[List[str]] = None
# unused in open-source
splits_distribution: Optional[List[List[bytes]]] = None
# unused in open-source
splits: Optional[List[bytes]] = None
# unused in open-source
serialized_df: Optional[str] = None
sampling_rate: Optional[float] = None
# sampling column for xdb
sampling_column: Optional[str] = None
# blob store
bucket: Optional[str] = None
path: Optional[str] = None
# desc_name
desc_name: Optional[str] = None
filename_suffix: Optional[str] = None
normalize_L2: bool = False
def __hash__(self):
return hash(self.get_filename())
def get_filename(
self,
prefix: Optional[str] = None,
) -> str:
if self.desc_name is not None:
return self.desc_name
filename = ""
if prefix is not None:
filename += prefix + "_"
if self.namespace is not None:
filename += self.namespace + "_"
assert self.tablename is not None
filename += self.tablename
if self.partitions is not None:
filename += "_" + "_".join(
self.partitions
).replace("=", "_").replace("/", "_")
if self.num_vectors is not None:
filename += f"_{self.num_vectors}"
if self.filename_suffix is not None:
filename += f"_{self.filename_suffix}"
filename += "."
self.desc_name = filename
return self.desc_name
def get_kmeans_filename(self, k):
return f"{self.get_filename()}kmeans_{k}."
def k_means(self, io, k, dry_run):
logger.info(f"k_means {k} {self}")
kmeans_vectors = DatasetDescriptor(
tablename=f"{self.get_filename()}kmeans_{k}"
)
kmeans_filename = kmeans_vectors.get_filename() + "npy"
meta_filename = kmeans_vectors.get_filename() + "json"
if not io.file_exist(kmeans_filename) or not io.file_exist(
meta_filename
):
if dry_run:
return None, None, kmeans_filename
x = io.get_dataset(self)
kmeans = faiss.Kmeans(d=x.shape[1], k=k, gpu=True)
_, t, _ = timer("k_means", lambda: kmeans.train(x))
io.write_nparray(kmeans.centroids, kmeans_filename)
io.write_json({"k_means_time": t}, meta_filename)
else:
t = io.read_json(meta_filename)["k_means_time"]
return kmeans_vectors, t, None
@dataclass
| DatasetDescriptor |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 14313,
"end": 14621
} | class ____(ASTPostfixOp):
def _stringify(self, transform: StringifyTransform) -> str:
return '--'
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
signode += addnodes.desc_sig_operator('--', '--')
| ASTPostfixDec |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_device_toleration.py | {
"start": 383,
"end": 8843
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'effect': 'str',
'key': 'str',
'operator': 'str',
'toleration_seconds': 'int',
'value': 'str'
}
attribute_map = {
'effect': 'effect',
'key': 'key',
'operator': 'operator',
'toleration_seconds': 'tolerationSeconds',
'value': 'value'
}
def __init__(self, effect=None, key=None, operator=None, toleration_seconds=None, value=None, local_vars_configuration=None): # noqa: E501
"""V1beta1DeviceToleration - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._effect = None
self._key = None
self._operator = None
self._toleration_seconds = None
self._value = None
self.discriminator = None
if effect is not None:
self.effect = effect
if key is not None:
self.key = key
if operator is not None:
self.operator = operator
if toleration_seconds is not None:
self.toleration_seconds = toleration_seconds
if value is not None:
self.value = value
@property
def effect(self):
"""Gets the effect of this V1beta1DeviceToleration. # noqa: E501
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule and NoExecute. # noqa: E501
:return: The effect of this V1beta1DeviceToleration. # noqa: E501
:rtype: str
"""
return self._effect
@effect.setter
def effect(self, effect):
"""Sets the effect of this V1beta1DeviceToleration.
Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule and NoExecute. # noqa: E501
:param effect: The effect of this V1beta1DeviceToleration. # noqa: E501
:type: str
"""
self._effect = effect
@property
def key(self):
"""Gets the key of this V1beta1DeviceToleration. # noqa: E501
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. Must be a label name. # noqa: E501
:return: The key of this V1beta1DeviceToleration. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this V1beta1DeviceToleration.
Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. Must be a label name. # noqa: E501
:param key: The key of this V1beta1DeviceToleration. # noqa: E501
:type: str
"""
self._key = key
@property
def operator(self):
"""Gets the operator of this V1beta1DeviceToleration. # noqa: E501
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a ResourceClaim can tolerate all taints of a particular category. # noqa: E501
:return: The operator of this V1beta1DeviceToleration. # noqa: E501
:rtype: str
"""
return self._operator
@operator.setter
def operator(self, operator):
"""Sets the operator of this V1beta1DeviceToleration.
Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a ResourceClaim can tolerate all taints of a particular category. # noqa: E501
:param operator: The operator of this V1beta1DeviceToleration. # noqa: E501
:type: str
"""
self._operator = operator
@property
def toleration_seconds(self):
"""Gets the toleration_seconds of this V1beta1DeviceToleration. # noqa: E501
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. If larger than zero, the time when the pod needs to be evicted is calculated as <time when taint was adedd> + <toleration seconds>. # noqa: E501
:return: The toleration_seconds of this V1beta1DeviceToleration. # noqa: E501
:rtype: int
"""
return self._toleration_seconds
@toleration_seconds.setter
def toleration_seconds(self, toleration_seconds):
"""Sets the toleration_seconds of this V1beta1DeviceToleration.
TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. If larger than zero, the time when the pod needs to be evicted is calculated as <time when taint was adedd> + <toleration seconds>. # noqa: E501
:param toleration_seconds: The toleration_seconds of this V1beta1DeviceToleration. # noqa: E501
:type: int
"""
self._toleration_seconds = toleration_seconds
@property
def value(self):
"""Gets the value of this V1beta1DeviceToleration. # noqa: E501
Value is the taint value the toleration matches to. If the operator is Exists, the value must be empty, otherwise just a regular string. Must be a label value. # noqa: E501
:return: The value of this V1beta1DeviceToleration. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this V1beta1DeviceToleration.
Value is the taint value the toleration matches to. If the operator is Exists, the value must be empty, otherwise just a regular string. Must be a label value. # noqa: E501
:param value: The value of this V1beta1DeviceToleration. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1DeviceToleration):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1DeviceToleration):
return True
return self.to_dict() != other.to_dict()
| V1beta1DeviceToleration |
python | pytorch__pytorch | test/cpp_extensions/python_agnostic_extension/setup.py | {
"start": 480,
"end": 2107
} | class ____(distutils.command.clean.clean):
def run(self):
# Run default behavior first
distutils.command.clean.clean.run(self)
# Remove extension
for path in (ROOT_DIR / "python_agnostic").glob("**/*.so"):
path.unlink()
# Remove build and dist and egg-info directories
dirs = [
ROOT_DIR / "build",
ROOT_DIR / "dist",
ROOT_DIR / "python_agnostic.egg-info",
]
for path in dirs:
if path.exists():
shutil.rmtree(str(path), ignore_errors=True)
def get_extension():
extra_compile_args = {
"cxx": ["-fdiagnostics-color=always"],
}
if torch.cuda.is_available():
sources = list(CSRC_DIR.glob("**/*.cu"))
extension = CUDAExtension
elif torch.xpu.is_available():
sources = list(CSRC_DIR.glob("**/*.sycl"))
extension = SyclExtension
else:
raise AssertionError("Expected CUDA or XPU device backend, found none")
return [
extension(
"python_agnostic._C",
sources=sorted(str(s) for s in sources),
py_limited_api=True,
extra_compile_args=extra_compile_args,
extra_link_args=[],
)
]
setup(
name="python_agnostic",
version="0.0",
author="PyTorch Core Team",
description="Example of python agnostic extension",
ext_modules=get_extension(),
cmdclass={
"build_ext": BuildExtension.with_options(no_python_abi_suffix=True),
"clean": clean,
},
options={"bdist_wheel": {"py_limited_api": "cp39"}},
)
| clean |
python | django__django | django/core/management/base.py | {
"start": 5558,
"end": 21940
} | class ____:
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``ArgumentParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``help``
A short description of the command, which will be printed in
help messages.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_migrations_checks``
A boolean; if ``True``, the command prints a warning if the set of
migrations on disk don't match the migrations in the database.
``requires_system_checks``
A list or tuple of tags, e.g. [Tags.staticfiles, Tags.models]. System
checks registered in the chosen tags will be checked for errors prior
to executing the command. The value '__all__' can be used to specify
that all system checks should be performed. Default value is '__all__'.
To validate an individual application's models
rather than all applications' models, call
``self.check(app_configs)`` from ``handle()``, where ``app_configs``
is the list of application's configuration provided by the
app registry.
``stealth_options``
A tuple of any options the command uses which aren't defined by the
argument parser.
"""
# Metadata about this command.
help = ""
# Configuration shortcuts that alter various logic.
_called_from_command_line = False
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
requires_migrations_checks = False
requires_system_checks = "__all__"
# Arguments, common to all commands, which aren't defined by the argument
# parser.
base_stealth_options = ("stderr", "stdout")
# Command-specific options not defined by the argument parser.
stealth_options = ()
suppressed_base_arguments = set()
def __init__(self, stdout=None, stderr=None, no_color=False, force_color=False):
self.stdout = OutputWrapper(stdout or sys.stdout)
self.stderr = OutputWrapper(stderr or sys.stderr)
if no_color and force_color:
raise CommandError("'no_color' and 'force_color' can't be used together.")
if no_color:
self.style = no_style()
else:
self.style = color_style(force_color)
self.stderr.style_func = self.style.ERROR
if (
not isinstance(self.requires_system_checks, (list, tuple))
and self.requires_system_checks != ALL_CHECKS
):
raise TypeError("requires_system_checks must be a list or tuple.")
def get_version(self):
"""
Return the Django version, which should be correct for all built-in
Django commands. User-supplied commands can override this method to
return their own version.
"""
return django.get_version()
def create_parser(self, prog_name, subcommand, **kwargs):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
kwargs.setdefault("formatter_class", DjangoHelpFormatter)
parser = CommandParser(
prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None,
missing_args_message=getattr(self, "missing_args_message", None),
called_from_command_line=getattr(self, "_called_from_command_line", None),
**kwargs,
)
self.add_base_argument(
parser,
"--version",
action="version",
version=self.get_version(),
help="Show program's version number and exit.",
)
self.add_base_argument(
parser,
"-v",
"--verbosity",
default=1,
type=int,
choices=[0, 1, 2, 3],
help=(
"Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, "
"3=very verbose output"
),
)
self.add_base_argument(
parser,
"--settings",
help=(
"The Python path to a settings module, e.g. "
'"myproject.settings.main". If this isn\'t provided, the '
"DJANGO_SETTINGS_MODULE environment variable will be used."
),
)
self.add_base_argument(
parser,
"--pythonpath",
help=(
"A directory to add to the Python path, e.g. "
'"/home/djangoprojects/myproject".'
),
)
self.add_base_argument(
parser,
"--traceback",
action="store_true",
help="Display a full stack trace on CommandError exceptions.",
)
self.add_base_argument(
parser,
"--no-color",
action="store_true",
help="Don't colorize the command output.",
)
self.add_base_argument(
parser,
"--force-color",
action="store_true",
help="Force colorization of the command output.",
)
if self.requires_system_checks:
parser.add_argument(
"--skip-checks",
action="store_true",
help="Skip system checks.",
)
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def add_base_argument(self, parser, *args, **kwargs):
"""
Call the parser's add_argument() method, suppressing the help text
according to BaseCommand.suppressed_base_arguments.
"""
for arg in args:
if arg in self.suppressed_base_arguments:
kwargs["help"] = argparse.SUPPRESS
break
parser.add_argument(*args, **kwargs)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop("args", ())
handle_default_options(options)
try:
self.execute(*args, **cmd_options)
except CommandError as e:
if options.traceback:
raise
# SystemCheckError takes care of its own formatting.
if isinstance(e, SystemCheckError):
self.stderr.write(str(e), lambda x: x)
else:
self.stderr.write("%s: %s" % (e.__class__.__name__, e))
sys.exit(e.returncode)
finally:
try:
connections.close_all()
except ImproperlyConfigured:
# Ignore if connections aren't setup at this point (e.g. no
# configured settings).
pass
def execute(self, *args, **options):
"""
Try to execute this command, performing system checks if needed (as
controlled by the ``requires_system_checks`` attribute, except if
force-skipped).
"""
if options["force_color"] and options["no_color"]:
raise CommandError(
"The --no-color and --force-color options can't be used together."
)
if options["force_color"]:
self.style = color_style(force_color=True)
elif options["no_color"]:
self.style = no_style()
self.stderr.style_func = None
if options.get("stdout"):
self.stdout = OutputWrapper(options["stdout"])
if options.get("stderr"):
self.stderr = OutputWrapper(options["stderr"])
if self.requires_system_checks and not options["skip_checks"]:
check_kwargs = self.get_check_kwargs(options)
self.check(**check_kwargs)
if self.requires_migrations_checks:
self.check_migrations()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
connection = connections[options.get("database", DEFAULT_DB_ALIAS)]
output = "%s\n%s\n%s" % (
self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()),
output,
self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()),
)
self.stdout.write(output)
return output
def get_check_kwargs(self, options):
if self.requires_system_checks == ALL_CHECKS:
return {}
return {"tags": self.requires_system_checks}
def check(
self,
app_configs=None,
tags=None,
display_num_errors=False,
include_deployment_checks=False,
fail_level=checks.ERROR,
databases=None,
):
"""
Use the system check framework to validate entire Django project.
Raise CommandError for any serious message (error or critical errors).
If there are only light messages (like warnings), print them to stderr
and don't raise an exception.
"""
all_issues = checks.run_checks(
app_configs=app_configs,
tags=tags,
include_deployment_checks=include_deployment_checks,
databases=databases,
)
header, body, footer = "", "", ""
visible_issue_count = 0 # excludes silenced warnings
if all_issues:
debugs = [
e for e in all_issues if e.level < checks.INFO and not e.is_silenced()
]
infos = [
e
for e in all_issues
if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()
]
warnings = [
e
for e in all_issues
if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()
]
errors = [
e
for e in all_issues
if checks.ERROR <= e.level < checks.CRITICAL and not e.is_silenced()
]
criticals = [
e
for e in all_issues
if checks.CRITICAL <= e.level and not e.is_silenced()
]
sorted_issues = [
(criticals, "CRITICALS"),
(errors, "ERRORS"),
(warnings, "WARNINGS"),
(infos, "INFOS"),
(debugs, "DEBUGS"),
]
for issues, group_name in sorted_issues:
if issues:
visible_issue_count += len(issues)
formatted = (
(
self.style.ERROR(str(e))
if e.is_serious()
else self.style.WARNING(str(e))
)
for e in issues
)
formatted = "\n".join(sorted(formatted))
body += "\n%s:\n%s\n" % (group_name, formatted)
if visible_issue_count:
header = "System check identified some issues:\n"
if display_num_errors:
if visible_issue_count:
footer += "\n"
footer += "System check identified %s (%s silenced)." % (
(
"no issues"
if visible_issue_count == 0
else (
"1 issue"
if visible_issue_count == 1
else "%s issues" % visible_issue_count
)
),
len(all_issues) - visible_issue_count,
)
if any(e.is_serious(fail_level) and not e.is_silenced() for e in all_issues):
msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer
raise SystemCheckError(msg)
else:
msg = header + body + footer
if msg:
if visible_issue_count:
self.stderr.write(msg, lambda x: x)
else:
self.stdout.write(msg)
def check_migrations(self):
"""
Print a warning if the set of migrations on disk don't match the
migrations in the database.
"""
from django.db.migrations.executor import MigrationExecutor
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
apps_waiting_migration = sorted(
{migration.app_label for migration, backwards in plan}
)
self.stdout.write(
self.style.NOTICE(
"\nYou have %(unapplied_migration_count)s unapplied migration(s). "
"Your project may not work properly until you apply the "
"migrations for app(s): %(apps_waiting_migration)s."
% {
"unapplied_migration_count": len(plan),
"apps_waiting_migration": ", ".join(apps_waiting_migration),
}
)
)
self.stdout.write(
self.style.NOTICE("Run 'python manage.py migrate' to apply them.")
)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError(
"subclasses of BaseCommand must provide a handle() method"
)
| BaseCommand |
python | davidhalter__jedi | test/completion/usages.py | {
"start": 3965,
"end": 4486
} | class ____(Super):
#< 4 (0,4),
class_var = 1
def x_method(self):
#< (0,18), (2,13), (-23,4)
TestClass.base_class
#< (-2,18), (0,13), (-25,4)
self.base_class
#< (-20,13), (0,13)
self.base_var
#< (0, 18),
TestClass.base_var
#< 13 (5,13), (0,13)
self.instance_var = 3
#< 9 (0,8),
def just_a_method(self):
#< (-5,13), (0,13)
self.instance_var
# -----------------
# properties
# -----------------
| TestClass |
python | django__django | django/db/migrations/graph.py | {
"start": 967,
"end": 1544
} | class ____(Node):
"""
A node that doesn't correspond to a migration file on disk.
(A squashed migration that was removed, for example.)
After the migration graph is processed, all dummy nodes should be removed.
If there are any left, a nonexistent dependency error is raised.
"""
def __init__(self, key, origin, error_message):
super().__init__(key)
self.origin = origin
self.error_message = error_message
def raise_error(self):
raise NodeNotFoundError(self.error_message, self.key, origin=self.origin)
| DummyNode |
python | scikit-learn__scikit-learn | sklearn/utils/_param_validation.py | {
"start": 11022,
"end": 11373
} | class ____(_Constraint):
"""Constraint representing the indicator `pd.NA`."""
def is_satisfied_by(self, val):
try:
import pandas as pd
return isinstance(val, type(pd.NA)) and pd.isna(val)
except ImportError:
return False
def __str__(self):
return "pandas.NA"
| _PandasNAConstraint |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/schedule_dry_run.py | {
"start": 788,
"end": 1545
} | class ____(graphene.Mutation):
"""Enable a schedule to launch runs for a job based on external state change."""
Output = graphene.NonNull(GrapheneScheduleDryRunResult)
class Arguments:
selector_data = graphene.NonNull(GrapheneScheduleSelector)
timestamp = graphene.Float()
class Meta:
name = "ScheduleDryRunMutation"
@capture_error
def mutate(
self, graphene_info: "ResolveInfo", selector_data: Mapping[str, Any], timestamp: float
):
return GrapheneDryRunInstigationTick(
selector=ScheduleSelector.from_graphql_input(selector_data), timestamp=timestamp
)
types = [
GrapheneScheduleDryRunMutation,
GrapheneScheduleDryRunResult,
]
| GrapheneScheduleDryRunMutation |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_vision.py | {
"start": 39261,
"end": 39974
} | class ____(nn.Module):
def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
super().__init__()
self.layers = [
nn.AdaptiveAvgPool2d(pool_scale),
Data2VecVisionConvModule(in_channels, channels, kernel_size=1),
]
for i, layer in enumerate(self.layers):
self.add_module(str(i), layer)
def forward(self, input: torch.Tensor) -> torch.Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
# Copied from transformers.models.beit.modeling_beit.BeitPyramidPoolingModule with Beit->Data2VecVision
| Data2VecVisionPyramidPoolingBlock |
python | matplotlib__matplotlib | lib/matplotlib/patches.py | {
"start": 38707,
"end": 41622
} | class ____(Patch):
"""A general polygon patch."""
def __str__(self):
if len(self._path.vertices):
s = "Polygon%d((%g, %g) ...)"
return s % (len(self._path.vertices), *self._path.vertices[0])
else:
return "Polygon0()"
@_docstring.interpd
def __init__(self, xy, *, closed=True, **kwargs):
"""
Parameters
----------
xy : (N, 2) array
closed : bool, default: True
Whether the polygon is closed (i.e., has identical start and end
points).
**kwargs
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self._closed = closed
self.set_xy(xy)
def get_path(self):
"""Get the `.Path` of the polygon."""
return self._path
def get_closed(self):
"""Return whether the polygon is closed."""
return self._closed
def set_closed(self, closed):
"""
Set whether the polygon is closed.
Parameters
----------
closed : bool
True if the polygon is closed
"""
if self._closed == bool(closed):
return
self._closed = bool(closed)
self.set_xy(self.get_xy())
self.stale = True
def get_xy(self):
"""
Get the vertices of the path.
Returns
-------
(N, 2) array
The coordinates of the vertices.
"""
return self._path.vertices
def set_xy(self, xy):
"""
Set the vertices of the polygon.
Parameters
----------
xy : (N, 2) array-like
The coordinates of the vertices.
Notes
-----
Unlike `.Path`, we do not ignore the last input vertex. If the
polygon is meant to be closed, and the last point of the polygon is not
equal to the first, we assume that the user has not explicitly passed a
``CLOSEPOLY`` vertex, and add it ourselves.
"""
xy = np.asarray(xy)
nverts, _ = xy.shape
if self._closed:
# if the first and last vertex are the "same", then we assume that
# the user explicitly passed the CLOSEPOLY vertex. Otherwise, we
# have to append one since the last vertex will be "ignored" by
# Path
if nverts == 1 or nverts > 1 and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
# if we aren't closed, and the last vertex matches the first, then
# we assume we have an unnecessary CLOSEPOLY vertex and remove it
if nverts > 2 and (xy[0] == xy[-1]).all():
xy = xy[:-1]
self._path = Path(xy, closed=self._closed)
self.stale = True
xy = property(get_xy, set_xy,
doc='The vertices of the path as a (N, 2) array.')
| Polygon |
python | realpython__materials | hangman-pysimplegui/source_code_step_4/hangman.py | {
"start": 86,
"end": 5186
} | class ____:
def __init__(self) -> None:
layout = [
[
self._build_canvas_frame(),
self._build_letters_frame(),
],
[
self._build_guessed_word_frame(),
],
[
self._build_action_buttons_frame(),
],
]
self._window = sg.Window(title="Hangman", layout=layout, finalize=True)
self._canvas = self._window["-CANVAS-"]
# Temporary code
self._draw_scaffold()
for index in range(MAX_WRONG_GUESSES):
self._wrong_guesses = index + 1
self._draw_hanged_man()
def _build_canvas_frame(self):
return sg.Frame(
"Hangman",
[
[
sg.Graph(
key="-CANVAS-",
canvas_size=(200, 400),
graph_bottom_left=(0, 0),
graph_top_right=(200, 400),
)
]
],
font="Any 20",
)
def _build_letters_frame(self):
letter_groups = [
ascii_uppercase[i : i + 4]
for i in range(0, len(ascii_uppercase), 4)
]
letter_buttons = [
[
sg.Button(
button_text=f" {letter} ",
font="Courier 20",
border_width=0,
button_color=(None, sg.theme_background_color()),
key=f"-letter-{letter}-",
enable_events=True,
)
for letter in letter_group
]
for letter_group in letter_groups
]
return sg.Column(
[
[
sg.Frame(
"Letters",
letter_buttons,
font="Any 20",
),
sg.Sizer(),
]
]
)
def _build_guessed_word_frame(self):
return sg.Frame(
"",
[
[
sg.Text(
key="-DISPLAY-WORD-",
font="Courier 20",
)
]
],
element_justification="center",
)
def _build_action_buttons_frame(self):
return sg.Frame(
"",
[
[
sg.Sizer(h_pixels=90),
sg.Button(
button_text="New",
key="-NEW-",
font="Any 20",
),
sg.Sizer(h_pixels=60),
sg.Button(
button_text="Restart",
key="-RESTART-",
font="Any 20",
),
sg.Sizer(h_pixels=60),
sg.Button(
button_text="Quit",
key="-QUIT-",
font="Any 20",
),
sg.Sizer(h_pixels=90),
]
],
font="Any 20",
)
def _draw_scaffold(self):
lines = [
((40, 55), (180, 55), 10),
((165, 60), (165, 365), 10),
((160, 360), (100, 360), 10),
((100, 365), (100, 330), 10),
((100, 330), (100, 310), 1),
]
for *points, width in lines:
self._canvas.DrawLine(*points, color="black", width=width)
def _draw_hanged_man(self):
head = (100, 290)
torso = [((100, 270), (100, 170))]
left_arm = [
((100, 250), (80, 250)),
((80, 250), (60, 210)),
((60, 210), (60, 190)),
]
right_arm = [
((100, 250), (120, 250)),
((120, 250), (140, 210)),
((140, 210), (140, 190)),
]
left_leg = [
((100, 170), (80, 170)),
((80, 170), (70, 140)),
((70, 140), (70, 80)),
((70, 80), (60, 80)),
]
right_leg = [
((100, 170), (120, 170)),
((120, 170), (130, 140)),
((130, 140), (130, 80)),
((130, 80), (140, 80)),
]
body = [
torso,
left_arm,
right_arm,
left_leg,
right_leg,
]
if self._wrong_guesses == 1:
self._canvas.DrawCircle(head, 20, line_color="red", line_width=2)
elif self._wrong_guesses > 1:
for part in body[self._wrong_guesses - 2]:
self._canvas.DrawLine(*part, color="red", width=2)
def read_event(self):
return self._window.read()
def close(self):
self._window.close()
if __name__ == "__main__":
game = Hangman()
# Event loop
while True:
event, values = game.read_event()
if event in {sg.WIN_CLOSED}:
break
game.close()
| Hangman |
python | vyperlang__vyper | vyper/venom/passes/float_allocas.py | {
"start": 50,
"end": 1320
} | class ____(IRPass):
"""
This pass moves allocas to the entry basic block of a function
We could probably move them to the immediate dominator of the basic
block defining the alloca instead of the entry (which dominates all
basic blocks), but this is done for expedience.
Without this step, sccp fails, possibly because dominators are not
guaranteed to be traversed first.
"""
def run_pass(self):
entry_bb = self.function.entry
assert entry_bb.is_terminated, entry_bb
tmp = entry_bb.instructions.pop()
for bb in self.function.get_basic_blocks():
if bb is entry_bb:
continue
# Extract alloca instructions
non_alloca_instructions = []
for inst in bb.instructions:
if inst.opcode in ("alloca", "palloca", "calloca"):
# note: order of allocas impacts bytecode.
# TODO: investigate.
entry_bb.insert_instruction(inst)
else:
non_alloca_instructions.append(inst)
# Replace original instructions with filtered list
bb.instructions = non_alloca_instructions
entry_bb.instructions.append(tmp)
| FloatAllocas |
python | matplotlib__matplotlib | lib/matplotlib/backend_bases.py | {
"start": 105397,
"end": 105531
} | class ____(str, Enum):
NONE = ""
PAN = "pan/zoom"
ZOOM = "zoom rect"
def __str__(self):
return self.value
| _Mode |
python | ansible__ansible | lib/ansible/module_utils/_internal/_json/_profiles/_fallback_to_str.py | {
"start": 530,
"end": 2285
} | class ____(_profiles._JSONSerializationProfile["Encoder", "Decoder"]):
serialize_map: _t.ClassVar[dict[type, _t.Callable]]
@classmethod
def post_init(cls) -> None:
cls.serialize_map = {
bytes: cls.serialize_bytes_as_str,
set: cls.serialize_as_list,
tuple: cls.serialize_as_list,
_datetime.date: cls.serialize_as_isoformat,
_datetime.time: cls.serialize_as_isoformat,
_datetime.datetime: cls.serialize_as_isoformat,
_datatag._AnsibleTaggedDate: cls.discard_tags,
_datatag._AnsibleTaggedTime: cls.discard_tags,
_datatag._AnsibleTaggedDateTime: cls.discard_tags,
_datatag._AnsibleTaggedStr: cls.discard_tags,
_datatag._AnsibleTaggedInt: cls.discard_tags,
_datatag._AnsibleTaggedFloat: cls.discard_tags,
_datatag._AnsibleTaggedSet: cls.discard_tags,
_datatag._AnsibleTaggedList: cls.discard_tags,
_datatag._AnsibleTaggedTuple: cls.discard_tags,
_datatag._AnsibleTaggedDict: cls.discard_tags,
_datatag._AnsibleTaggedBytes: cls.discard_tags,
}
@classmethod
def serialize_bytes_as_str(cls, value: bytes) -> str:
return value.decode(errors='surrogateescape')
@classmethod
def handle_key(cls, k: _t.Any) -> _t.Any:
while mapped_callable := cls.serialize_map.get(type(k)):
k = mapped_callable(k)
k = cls.default(k)
if not isinstance(k, str):
k = _dumps(k, cls=Encoder)
return k
@classmethod
def last_chance(cls, o: _t.Any) -> _t.Any:
try:
return str(o)
except Exception as ex:
return str(ex)
| _Profile |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/gitlab/tests.py | {
"start": 448,
"end": 3931
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = GitLabProvider.id
_uid = 2
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"avatar_url": "https://secure.gravatar.com/avatar/123",
"bio": null,
"can_create_group": true,
"can_create_project": true,
"color_scheme_id": 5,
"confirmed_at": "2015-03-02T16:53:58.370Z",
"created_at": "2015-03-02T16:53:58.885Z",
"current_sign_in_at": "2018-06-12T18:44:49.985Z",
"email": "mr.bob@gitlab.example.com",
"external": false,
"id": 2,
"identities": [],
"last_activity_on": "2018-06-11",
"last_sign_in_at": "2018-05-31T14:59:44.527Z",
"linkedin": "",
"location": null,
"name": "Mr Bob",
"organization": null,
"projects_limit": 10,
"shared_runners_minutes_limit": 2000,
"skype": "",
"state": "active",
"theme_id": 6,
"twitter": "mrbob",
"two_factor_enabled": true,
"username": "mr.bob",
"web_url": "https://gitlab.example.com/u/mr.bob",
"website_url": ""
}
""",
)
def get_expected_to_str(self):
return "mr.bob"
def test_valid_response(self):
data = {"id": 12345}
response = MockedResponse(HTTPStatus.OK, json.dumps(data))
self.assertEqual(_check_errors(response), data)
def test_invalid_data(self):
response = MockedResponse(HTTPStatus.OK, json.dumps({}))
with self.assertRaises(OAuth2Error):
# No id, raises
_check_errors(response)
def test_account_invalid_response(self):
body = (
"403 Forbidden - You (@domain.com) must accept the Terms of "
"Service in order to perform this action. Please access GitLab "
"from a web browser to accept these terms."
)
response = MockedResponse(HTTPStatus.FORBIDDEN, body)
# GitLab allow users to login with their API and provides
# an error requiring the user to accept the Terms of Service.
# see: https://gitlab.com/gitlab-org/gitlab-foss/-/issues/45849
with self.assertRaises(OAuth2Error):
# no id, 4xx code, raises
_check_errors(response)
def test_error_response(self):
body = "403 Forbidden"
response = MockedResponse(HTTPStatus.FORBIDDEN, body)
with self.assertRaises(OAuth2Error):
# no id, 4xx code, raises
_check_errors(response)
def test_invalid_response(self):
response = MockedResponse(HTTPStatus.OK, json.dumps({}))
with self.assertRaises(OAuth2Error):
# No id, raises
_check_errors(response)
def test_bad_response(self):
response = MockedResponse(HTTPStatus.BAD_REQUEST, json.dumps({}))
with self.assertRaises(OAuth2Error):
# bad json, raises
_check_errors(response)
def test_extra_data(self):
self.login(self.get_mocked_response())
account = SocialAccount.objects.get(uid=str(self._uid))
self.assertEqual(account.extra_data["id"], self._uid)
| GitLabTests |
python | mlflow__mlflow | mlflow/types/responses_helpers.py | {
"start": 3947,
"end": 4127
} | class ____(Status):
approval_request_id: str
approve: bool
type: str = "mcp_approval_response"
id: str | None = None
reason: str | None = None
| McpApprovalResponse |
python | cython__cython | tests/run/extra_walrus.py | {
"start": 3180,
"end": 9415
} | class ____:
"""
>>> InLambdaInClass.x1
12
>>> InLambdaInClass.x2
[12, 12]
"""
x1 = (lambda y_global: (y_global := y_global + 1) + y_global)(2) + y_global
x2 = [(lambda y_global: (y_global := y_global + 1) + y_global)(2) + y_global for _ in range(2) ]
def in_lambda_in_list_comprehension1():
"""
>>> in_lambda_in_list_comprehension1()
[[0, 2, 4, 6], [0, 2, 4, 6], [0, 2, 4, 6], [0, 2, 4, 6], [0, 2, 4, 6]]
"""
return [ (lambda x: [(x := y) + x for y in range(4)])(x) for x in range(5) ]
def in_lambda_in_list_comprehension2():
"""
>>> in_lambda_in_list_comprehension2()
[[0, 1, 2, 3], [1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6], [4, 5, 6, 7]]
"""
return [ (lambda z: [(x := y) + z for y in range(4)])(x) for x in range(5) ]
def in_lambda_in_generator_expression1():
"""
>>> in_lambda_in_generator_expression1()
[(0, 2, 4, 6), (0, 2, 4, 6), (0, 2, 4, 6), (0, 2, 4, 6), (0, 2, 4, 6)]
"""
return [ (lambda x: tuple((x := y) + x for y in range(4)))(x) for x in range(5) ]
def in_lambda_in_generator_expression2():
"""
>>> in_lambda_in_generator_expression2()
[(0, 1, 2, 3), (1, 2, 3, 4), (2, 3, 4, 5), (3, 4, 5, 6), (4, 5, 6, 7)]
"""
return [ (lambda z: tuple((x := y) + z for y in range(4)))(x) for x in range(5) ]
# A bunch of tests where assignment may/may not happen and flow control has to
# be able to detect this to avoid crashing:
def flow_control_binops1(test, value):
"""
>>> flow_control_binops1(True, "value")
('value', 'value')
>>> flow_control_binops1(False, "value") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnboundLocalError
"""
res = test and (target := value)
return res, target
def flow_control_binops2(test, value):
"""
>>> flow_control_binops2(True, "value") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnboundLocalError
>>> flow_control_binops2(False, "value")
('value', 'value')
"""
res = test or (target := value)
return res, target
def flow_control_binops3(test, value):
"""
>>> flow_control_binops3(True, "value")
('value', 'value')
>>> flow_control_binops3(False, "value") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnboundLocalError
"""
# "True" may or may not be optimized out here
# but either way the assignment is uncertain
res = True and test and (target := value)
return res, target
def flow_control_binops4(test1, test2, value):
"""
>>> flow_control_binops4(True, True, "value")
('value', 'value')
>>> flow_control_binops4(False, True, "value") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnboundLocalError
>>> flow_control_binops4(False, False, "value") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnboundLocalError
>>> flow_control_binops4(True, False, "value") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnboundLocalError
"""
# "True" may or may not be optimized out here
# but either way the assignment is uncertain
res = test1 and test2 and (target := value)
return res, target
def flow_control_cond_expr1(test, value):
"""
>>> flow_control_cond_expr1(True, "value")
('value', 'value')
>>> flow_control_cond_expr1(False, "value") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnboundLocalError
"""
res = (target := value) if test else None
return res, target
def flow_control_cond_expr2(test, value):
"""
>>> flow_control_cond_expr2(True, "value") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnboundLocalError
>>> flow_control_cond_expr2(False, "value")
('value', 'value')
"""
res = None if test else (target := value)
return res, target
def flow_control_cond_expr3(test, value1, value2):
"""
>>> flow_control_cond_expr3(True, "value1", "value2")
('value1', 'value1')
>>> flow_control_cond_expr3(False, "value1", "value2")
('value2', 'value2')
"""
res = (target := value1) if test else (target := value2)
# Not tested here (but I believe working) - Cython shouldn't need
# to generate an unbound local check for "target"
return res, target
def flow_control_list_comp(it, value):
"""
>>> flow_control_list_comp([1], "value")
'value'
>>> flow_control_list_comp([], "value") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnboundLocalError
"""
[(target := value) for _ in it]
return target
def flow_control_set_comp(it, value):
"""
>>> flow_control_set_comp([1], "value")
'value'
>>> flow_control_set_comp([], "value") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnboundLocalError
"""
{(target := value) for _ in it}
return target
def flow_control_dict_comp1(it, value):
"""
>>> flow_control_dict_comp1([1], "value")
'value'
>>> flow_control_dict_comp1([], "value") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnboundLocalError
"""
{(target := value): x for x in it}
return target
def flow_control_dict_comp2(it, value):
"""
>>> flow_control_dict_comp2([1], "value")
'value'
>>> flow_control_dict_comp2([], "value") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnboundLocalError
"""
{x: (target := value) for x in it}
return target
def flow_control_genexp(it, value):
"""
>>> flow_control_genexp([1], "value")
'value'
>>> flow_control_genexp([], "value") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
UnboundLocalError
"""
all((target := value) for _ in it)
return target
def memoryview_walrus(x: cython.uchar[:]):
"""
>>> memoryview_walrus(bytearray(b"123"))
'1'
"""
(y := x)
return chr(y[0])
| InLambdaInClass |
python | wandb__wandb | wandb/analytics/sentry.py | {
"start": 1705,
"end": 9203
} | class ____:
def __init__(self, *, pid: int) -> None:
from wandb import env as _env
self._pid: int = pid
self._enabled: bool = bool(_env.error_reporting_enabled())
self._booted: bool = False
self._boot_lock = threading.Lock()
self._atexit_registered: bool = False
self._sent_messages: set[str] = set()
self._sdk: Any | None = None # will hold the sentry_sdk module after boot
self.scope: Any | None = None
self.dsn: str | None = os.environ.get(_env.SENTRY_DSN, SENTRY_DEFAULT_DSN)
@property
def environment(self) -> str:
is_git = pathlib.Path(__file__).parent.parent.parent.joinpath(".git").exists()
return "development" if is_git else "production"
def _boot(self) -> bool:
"""Import sentry_sdk and set up client/scope."""
from wandb import __version__
with self._boot_lock:
if not self._enabled:
return False
if self._booted:
return True
try:
import sentry_sdk # type: ignore
import sentry_sdk.scope # type: ignore
import sentry_sdk.utils # type: ignore
self._sdk = sentry_sdk
client = self._sdk.Client(
dsn=self.dsn,
default_integrations=False,
environment=self.environment,
release=__version__,
)
scope = self._sdk.get_global_scope().fork()
scope.clear()
scope.set_client(client)
self.scope = scope
self._booted = True
if not self._atexit_registered:
atexit.register(self.end_session)
self._atexit_registered = True
except Exception:
# Disable on any failure.
self._enabled = False
self._booted = False
self._sdk = None
self.scope = None
return False
return True
@_guard
def message(
self,
message: str,
repeat: bool = True,
level: str = "info",
) -> str | None:
if not repeat and message in self._sent_messages:
return None
self._sent_messages.add(message)
with self._sdk.scope.use_isolation_scope(self.scope): # type: ignore
return self._sdk.capture_message(message, level=level) # type: ignore
@_guard
def exception(
self,
exc: str
| BaseException
| tuple[
type[BaseException] | None,
BaseException | None,
TracebackType | None,
]
| None,
handled: bool = False,
status: SessionStatus | None = None,
) -> str | None:
if isinstance(exc, str):
exc_info = self._sdk.utils.exc_info_from_error(Exception(exc)) # type: ignore
elif isinstance(exc, BaseException):
exc_info = self._sdk.utils.exc_info_from_error(exc) # type: ignore
else:
exc_info = sys.exc_info()
event, _ = self._sdk.utils.event_from_exception( # type: ignore
exc_info,
client_options=self.scope.get_client().options, # type: ignore
mechanism={"type": "generic", "handled": handled},
)
event_id = None
with contextlib.suppress(Exception):
with self._sdk.scope.use_isolation_scope(self.scope): # type: ignore
event_id = self._sdk.capture_event(event) # type: ignore
status = status or ("crashed" if not handled else "errored") # type: ignore
self.mark_session(status=status)
client = self.scope.get_client() # type: ignore
if client is not None:
client.flush()
return event_id
def reraise(self, exc: Any) -> Never:
"""Re-raise after logging, preserving traceback. Safe if disabled."""
try:
self.exception(exc) # @_guard applies here
finally:
_, _, tb = sys.exc_info()
if tb is not None and hasattr(exc, "with_traceback"):
raise exc.with_traceback(tb)
raise exc
@_guard
def start_session(self) -> None:
if self.scope is None:
return
if self.scope._session is None:
self.scope.start_session()
@_guard
def end_session(self) -> None:
if self.scope is None:
return
client = self.scope.get_client()
session = self.scope._session
if session is not None and client is not None:
self.scope.end_session()
client.flush()
@_guard
def mark_session(self, status: SessionStatus | None = None) -> None:
if self.scope is None:
return
session = self.scope._session
if session is not None:
session.update(status=status)
@_guard
def configure_scope(
self,
tags: dict[str, Any] | None = None,
process_context: str | None = None,
) -> None:
import wandb.util
if self.scope is None:
return
settings_tags = (
"entity",
"project",
"run_id",
"run_url",
"sweep_url",
"sweep_id",
"deployment",
"launch",
"_platform",
)
if process_context:
self.scope.set_tag("process_context", process_context)
if tags is None:
return None
for tag in settings_tags:
val = tags.get(tag, None)
if val not in (None, ""):
self.scope.set_tag(tag, val)
if tags.get("_colab", None):
python_runtime = "colab"
elif tags.get("_jupyter", None):
python_runtime = "jupyter"
elif tags.get("_ipython", None):
python_runtime = "ipython"
else:
python_runtime = "python"
self.scope.set_tag("python_runtime", python_runtime)
# Construct run_url and sweep_url given run_id and sweep_id.
for obj in ("run", "sweep"):
obj_id, obj_url = f"{obj}_id", f"{obj}_url"
if tags.get(obj_url, None):
continue
try:
app_url = wandb.util.app_url(tags["base_url"]) # type: ignore[index]
entity, project = (quote(tags[k]) for k in ("entity", "project")) # type: ignore[index]
self.scope.set_tag(
obj_url,
f"{app_url}/{entity}/{project}/{obj}s/{tags[obj_id]}",
)
except Exception:
pass
email = tags.get("email")
if email:
self.scope.user = {"email": email}
self.start_session()
_singleton: Sentry | None = None
_singleton_lock = threading.Lock()
def get_sentry() -> Sentry:
"""Return the Sentry singleton for the current process (fork-aware).
Creates a new instance in child processes after fork.
Thread-safe within each process.
"""
global _singleton
pid = os.getpid()
with _singleton_lock:
if _singleton is not None and _singleton._pid == pid:
return _singleton
if _singleton is None or _singleton._pid != pid:
_singleton = Sentry(pid=pid)
return _singleton
| Sentry |
python | huggingface__transformers | src/transformers/models/diffllama/modeling_diffllama.py | {
"start": 21041,
"end": 25198
} | class ____(DiffLlamaAttention):
"""
DiffLlama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`DiffLlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
"""
# Adapted from DiffLlamaAttention.forward
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
value_states = torch.cat(torch.chunk(value_states, 2, dim=1), dim=-1)
value_states = value_states.repeat(1, 2, 1, 1)
causal_mask = attention_mask
if attention_mask is not None:
causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
# Reference: https://github.com/pytorch/pytorch/issues/112577.
if query_states.device.type == "cuda" and causal_mask is not None:
query_states = query_states.contiguous()
key_states = key_states.contiguous()
value_states = value_states.contiguous()
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
is_causal = causal_mask is None and q_len > 1
attn_output = torch.nn.functional.scaled_dot_product_attention(
query_states,
key_states,
value_states,
attn_mask=causal_mask,
dropout_p=self.attention_dropout if self.training else 0.0,
is_causal=is_causal,
)
attn_output1, attn_output2 = torch.chunk(attn_output, 2, dim=1)
lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1, dtype=torch.float32)).to(
query_states.dtype
)
lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1, dtype=torch.float32)).to(
query_states.dtype
)
lambda_full = lambda_1 - lambda_2 + self.lambda_init
attn_output = attn_output1 - lambda_full * attn_output2
attn_output = (1 - self.lambda_init) * self.groupnorm(attn_output)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, -1)
attn_output = self.o_proj(attn_output)
return attn_output, None
@use_kernel_forward_from_hub("RMSNorm")
| DiffLlamaSdpaAttention |
python | realpython__materials | queue/src/queues.py | {
"start": 343,
"end": 585
} | class ____(IterableMixin):
def __init__(self, *elements):
self._elements = deque(elements)
def enqueue(self, element):
self._elements.append(element)
def dequeue(self):
return self._elements.popleft()
| Queue |
python | modin-project__modin | modin/tests/pandas/test_io.py | {
"start": 48729,
"end": 75922
} | class ____:
@pytest.mark.parametrize("columns", [None, ["col1"]])
@pytest.mark.parametrize("row_group_size", [None, 100, 1000, 10_000])
@pytest.mark.parametrize("path_type", [Path, str])
def test_read_parquet(
self, engine, make_parquet_file, columns, row_group_size, path_type
):
self._test_read_parquet(
engine=engine,
make_parquet_file=make_parquet_file,
columns=columns,
filters=None,
row_group_size=row_group_size,
path_type=path_type,
)
def _test_read_parquet(
self,
engine,
make_parquet_file,
columns,
filters,
row_group_size,
path_type=str,
range_index_start=0,
range_index_step=1,
range_index_name=None,
expected_exception=None,
):
if engine == "pyarrow" and filters == [] and os.name == "nt":
# pyarrow, and therefore pandas using pyarrow, errors in this case.
# Modin correctly replicates this behavior; however error cases
# cause race conditions with ensure_clean on Windows.
# TODO: Remove this once https://github.com/modin-project/modin/issues/6460 is fixed.
pytest.xfail(
"Skipping empty filters error case to avoid race condition - see #6460"
)
with ensure_clean(".parquet") as unique_filename:
unique_filename = path_type(unique_filename)
make_parquet_file(
filename=unique_filename,
row_group_size=row_group_size,
range_index_start=range_index_start,
range_index_step=range_index_step,
range_index_name=range_index_name,
)
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
engine=engine,
path=unique_filename,
columns=columns,
filters=filters,
expected_exception=expected_exception,
)
@pytest.mark.parametrize(
"dtype_backend", [lib.no_default, "numpy_nullable", "pyarrow"]
)
def test_read_parquet_dtype_backend(self, engine, make_parquet_file, dtype_backend):
with ensure_clean(".parquet") as unique_filename:
make_parquet_file(filename=unique_filename, row_group_size=100)
def comparator(df1, df2):
df_equals(df1, df2)
df_equals(df1.dtypes, df2.dtypes)
expected_exception = None
if engine == "fastparquet":
expected_exception = ValueError(
"The 'dtype_backend' argument is not supported for the fastparquet engine"
)
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
engine=engine,
path=unique_filename,
dtype_backend=dtype_backend,
comparator=comparator,
expected_exception=expected_exception,
)
# Tests issue #6778
def test_read_parquet_no_extension(self, engine, make_parquet_file):
with ensure_clean(".parquet") as unique_filename:
# Remove the .parquet extension
no_ext_fname = unique_filename[: unique_filename.index(".parquet")]
make_parquet_file(filename=no_ext_fname)
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
engine=engine,
path=no_ext_fname,
)
@pytest.mark.parametrize(
"filters",
[None, [], [("col1", "==", 5)], [("col1", "<=", 215), ("col2", ">=", 35)]],
)
def test_read_parquet_filters(self, engine, make_parquet_file, filters):
expected_exception = None
if filters == [] and engine == "pyarrow":
expected_exception = ValueError("Malformed filters")
self._test_read_parquet(
engine=engine,
make_parquet_file=make_parquet_file,
columns=None,
filters=filters,
row_group_size=100,
path_type=str,
expected_exception=expected_exception,
)
@pytest.mark.parametrize("columns", [None, ["col1"]])
@pytest.mark.parametrize(
"filters",
[None, [("col1", "<=", 1_000_000)], [("col1", "<=", 75), ("col2", ">=", 35)]],
)
@pytest.mark.parametrize(
"range_index_start",
[0, 5_000],
)
@pytest.mark.parametrize(
"range_index_step",
[1, 10],
)
@pytest.mark.parametrize(
"range_index_name",
[None, "my_index"],
)
def test_read_parquet_range_index(
self,
engine,
make_parquet_file,
columns,
filters,
range_index_start,
range_index_step,
range_index_name,
):
self._test_read_parquet(
engine=engine,
make_parquet_file=make_parquet_file,
columns=columns,
filters=filters,
row_group_size=100,
path_type=str,
range_index_start=range_index_start,
range_index_step=range_index_step,
range_index_name=range_index_name,
)
def test_read_parquet_list_of_files_5698(self, engine, make_parquet_file):
if engine == "fastparquet" and os.name == "nt":
pytest.xfail(reason="https://github.com/pandas-dev/pandas/issues/51720")
with ensure_clean(".parquet") as f1, ensure_clean(
".parquet"
) as f2, ensure_clean(".parquet") as f3:
for f in [f1, f2, f3]:
make_parquet_file(filename=f)
eval_io(fn_name="read_parquet", path=[f1, f2, f3], engine=engine)
def test_read_parquet_indexing_by_column(self, tmp_path, engine, make_parquet_file):
# Test indexing into a column of Modin with various parquet file row lengths.
# Specifically, tests for https://github.com/modin-project/modin/issues/3527
# which fails when min_partition_size < nrows < min_partition_size * (num_partitions - 1)
nrows = (
MinRowPartitionSize.get() + 1
) # Use the minimal guaranteed failing value for nrows.
unique_filename = get_unique_filename(extension="parquet", data_dir=tmp_path)
make_parquet_file(filename=unique_filename, nrows=nrows)
parquet_df = pd.read_parquet(unique_filename, engine=engine)
for col in parquet_df.columns:
parquet_df[col]
@pytest.mark.parametrize("columns", [None, ["col1"]])
@pytest.mark.parametrize(
"filters",
[None, [("col1", "<=", 3_215), ("col2", ">=", 35)]],
)
@pytest.mark.parametrize("row_group_size", [None, 100, 1000, 10_000])
@pytest.mark.parametrize(
"rows_per_file", [[1000] * 40, [0, 0, 40_000], [10_000, 10_000] + [100] * 200]
)
@pytest.mark.exclude_in_sanity
def test_read_parquet_directory(
self, engine, make_parquet_dir, columns, filters, row_group_size, rows_per_file
):
self._test_read_parquet_directory(
engine=engine,
make_parquet_dir=make_parquet_dir,
columns=columns,
filters=filters,
range_index_start=0,
range_index_step=1,
range_index_name=None,
row_group_size=row_group_size,
rows_per_file=rows_per_file,
)
def _test_read_parquet_directory(
self,
engine,
make_parquet_dir,
columns,
filters,
range_index_start,
range_index_step,
range_index_name,
row_group_size,
rows_per_file,
):
num_cols = DATASET_SIZE_DICT.get(
TestDatasetSize.get(), DATASET_SIZE_DICT["Small"]
)
dfs_by_filename = {}
start_row = 0
for i, length in enumerate(rows_per_file):
end_row = start_row + length
df = pandas.DataFrame(
{f"col{x + 1}": np.arange(start_row, end_row) for x in range(num_cols)},
)
index = pandas.RangeIndex(
start=range_index_start,
stop=range_index_start + (length * range_index_step),
step=range_index_step,
name=range_index_name,
)
if (
range_index_start == 0
and range_index_step == 1
and range_index_name is None
):
assert df.index.equals(index)
else:
df.index = index
dfs_by_filename[f"{i}.parquet"] = df
start_row = end_row
path = make_parquet_dir(dfs_by_filename, row_group_size)
# There are specific files that PyArrow will try to ignore by default
# in a parquet directory. One example are files that start with '_'. Our
# previous implementation tried to read all files in a parquet directory,
# but we now make use of PyArrow to ensure the directory is valid.
with open(os.path.join(path, "_committed_file"), "w+") as f:
f.write("testingtesting")
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
engine=engine,
path=path,
columns=columns,
filters=filters,
)
@pytest.mark.parametrize(
"filters",
[None, [("col1", "<=", 1_000_000)], [("col1", "<=", 75), ("col2", ">=", 35)]],
)
@pytest.mark.parametrize(
"range_index_start",
[0, 5_000],
)
@pytest.mark.parametrize(
"range_index_step",
[1, 10],
)
@pytest.mark.parametrize(
"range_index_name",
[None, "my_index"],
)
@pytest.mark.parametrize("row_group_size", [None, 20])
def test_read_parquet_directory_range_index(
self,
engine,
make_parquet_dir,
filters,
range_index_start,
range_index_step,
range_index_name,
row_group_size,
):
self._test_read_parquet_directory(
engine=engine,
make_parquet_dir=make_parquet_dir,
columns=None,
filters=filters,
range_index_start=range_index_start,
range_index_step=range_index_step,
range_index_name=range_index_name,
row_group_size=row_group_size,
# We don't vary rows_per_file, but we choose a
# tricky option: uneven with some empty files,
# none divisible by the row_group_size.
# We use a smaller total size than in other tests
# to make this test run faster.
rows_per_file=([250] + [0] * 10 + [25] * 10),
)
@pytest.mark.parametrize(
"filters",
[None, [("col1", "<=", 1_000_000)], [("col1", "<=", 75), ("col2", ">=", 35)]],
)
@pytest.mark.parametrize(
"range_index_start",
[0, 5_000],
)
@pytest.mark.parametrize(
"range_index_step",
[1, 10],
)
@pytest.mark.parametrize(
"range_index_name",
[None, "my_index"],
)
def test_read_parquet_directory_range_index_consistent_metadata(
self,
engine,
filters,
range_index_start,
range_index_step,
range_index_name,
tmp_path,
):
num_cols = DATASET_SIZE_DICT.get(
TestDatasetSize.get(), DATASET_SIZE_DICT["Small"]
)
df = pandas.DataFrame(
{f"col{x + 1}": np.arange(0, 500) for x in range(num_cols)},
)
index = pandas.RangeIndex(
start=range_index_start,
stop=range_index_start + (len(df) * range_index_step),
step=range_index_step,
name=range_index_name,
)
if (
range_index_start == 0
and range_index_step == 1
and range_index_name is None
):
assert df.index.equals(index)
else:
df.index = index
path = get_unique_filename(extension=None, data_dir=tmp_path)
table = pa.Table.from_pandas(df)
pyarrow.dataset.write_dataset(
table,
path,
format="parquet",
max_rows_per_group=35,
max_rows_per_file=100,
)
# There are specific files that PyArrow will try to ignore by default
# in a parquet directory. One example are files that start with '_'. Our
# previous implementation tried to read all files in a parquet directory,
# but we now make use of PyArrow to ensure the directory is valid.
with open(os.path.join(path, "_committed_file"), "w+") as f:
f.write("testingtesting")
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
engine=engine,
path=path,
filters=filters,
)
@pytest.mark.parametrize("columns", [None, ["col1"]])
@pytest.mark.parametrize(
"filters",
[None, [], [("col1", "==", 5)], [("col1", "<=", 215), ("col2", ">=", 35)]],
)
@pytest.mark.parametrize(
"range_index_start",
[0, 5_000],
)
@pytest.mark.parametrize(
"range_index_step",
[1, 10],
)
def test_read_parquet_partitioned_directory(
self,
tmp_path,
make_parquet_file,
columns,
filters,
range_index_start,
range_index_step,
engine,
):
unique_filename = get_unique_filename(extension=None, data_dir=tmp_path)
make_parquet_file(
filename=unique_filename,
partitioned_columns=["col1"],
range_index_start=range_index_start,
range_index_step=range_index_step,
range_index_name="my_index",
)
expected_exception = None
if filters == [] and engine == "pyarrow":
expected_exception = ValueError("Malformed filters")
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
engine=engine,
path=unique_filename,
columns=columns,
filters=filters,
expected_exception=expected_exception,
)
@pytest.mark.parametrize(
"filters",
[
None,
[],
[("B", "==", "a")],
[
("B", "==", "a"),
("A", ">=", 50_000),
("idx", "<=", 30_000),
("idx_categorical", "==", "y"),
],
],
)
def test_read_parquet_pandas_index(self, engine, filters):
if (
version.parse(pa.__version__) >= version.parse("12.0.0")
and version.parse(pd.__version__) < version.parse("2.0.0")
and engine == "pyarrow"
):
pytest.xfail("incompatible versions; see #6072")
# Ensure modin can read parquet files written by pandas with a non-RangeIndex object
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"idx_categorical": pandas.Categorical(["y", "z"] * 1000),
# Can't do interval index right now because of this bug fix that is planned
# to be apart of the pandas 1.5.0 release: https://github.com/pandas-dev/pandas/pull/46034
# "idx_interval": pandas.interval_range(start=0, end=2000),
"idx_periodrange": pandas.period_range(
start="2017-01-01", periods=2000
),
"A": np.random.randint(0, 100_000, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
# Older versions of pyarrow do not support Arrow to Parquet
# schema conversion for duration[ns]
# https://issues.apache.org/jira/browse/ARROW-6780
if version.parse(pa.__version__) >= version.parse("8.0.0"):
pandas_df["idx_timedelta"] = pandas.timedelta_range(
start="1 day", periods=2000
)
# There is a non-deterministic bug in the fastparquet engine when we
# try to set the index to the datetime column. Please see:
# https://github.com/dask/fastparquet/issues/796
if engine == "pyarrow":
pandas_df["idx_datetime"] = pandas.date_range(
start="1/1/2018", periods=2000
)
for col in pandas_df.columns:
if col.startswith("idx"):
# Before this commit, first released in version 2023.1.0, fastparquet relied
# on pandas private APIs to handle Categorical indices.
# These private APIs broke in pandas 2.
# https://github.com/dask/fastparquet/commit/cf60ae0e9a9ca57afc7a8da98d8c0423db1c0c53
if (
col == "idx_categorical"
and engine == "fastparquet"
and version.parse(fastparquet.__version__)
< version.parse("2023.1.0")
):
continue
with ensure_clean(".parquet") as unique_filename:
pandas_df.set_index(col).to_parquet(unique_filename)
# read the same parquet using modin.pandas
eval_io(
"read_parquet",
# read_parquet kwargs
path=unique_filename,
engine=engine,
filters=filters,
)
with ensure_clean(".parquet") as unique_filename:
pandas_df.set_index(["idx", "A"]).to_parquet(unique_filename)
eval_io(
"read_parquet",
# read_parquet kwargs
path=unique_filename,
engine=engine,
filters=filters,
)
@pytest.mark.parametrize(
"filters",
[
None,
[],
[("B", "==", "a")],
[("B", "==", "a"), ("A", ">=", 5), ("idx", "<=", 30_000)],
],
)
def test_read_parquet_pandas_index_partitioned(self, tmp_path, engine, filters):
# Ensure modin can read parquet files written by pandas with a non-RangeIndex object
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 10, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
unique_filename = get_unique_filename(extension="parquet", data_dir=tmp_path)
pandas_df.set_index("idx").to_parquet(unique_filename, partition_cols=["A"])
expected_exception = None
if filters == [] and engine == "pyarrow":
expected_exception = ValueError("Malformed filters")
# read the same parquet using modin.pandas
eval_io(
"read_parquet",
# read_parquet kwargs
path=unique_filename,
engine=engine,
filters=filters,
expected_exception=expected_exception,
)
def test_read_parquet_hdfs(self, engine):
eval_io(
fn_name="read_parquet",
# read_parquet kwargs
path="modin/tests/pandas/data/hdfs.parquet",
engine=engine,
)
@pytest.mark.parametrize(
"path_type",
["object", "directory", "url"],
)
def test_read_parquet_s3(self, s3_resource, path_type, engine, s3_storage_options):
s3_path = "s3://modin-test/modin-bugs/test_data.parquet"
if path_type == "object":
import s3fs
fs = s3fs.S3FileSystem(
endpoint_url=s3_storage_options["client_kwargs"]["endpoint_url"]
)
with fs.open(s3_path, "rb") as file_obj:
eval_io("read_parquet", path=file_obj, engine=engine)
elif path_type == "directory":
s3_path = "s3://modin-test/modin-bugs/test_data_dir.parquet"
eval_io(
"read_parquet",
path=s3_path,
storage_options=s3_storage_options,
engine=engine,
)
else:
eval_io(
"read_parquet",
path=s3_path,
storage_options=s3_storage_options,
engine=engine,
)
@pytest.mark.parametrize(
"filters",
[None, [], [("idx", "<=", 30_000)], [("idx", "<=", 30_000), ("A", ">=", 5)]],
)
def test_read_parquet_without_metadata(self, tmp_path, engine, filters):
"""Test that Modin can read parquet files not written by pandas."""
from pyarrow import csv, parquet
parquet_fname = get_unique_filename(extension="parquet", data_dir=tmp_path)
csv_fname = get_unique_filename(extension="parquet", data_dir=tmp_path)
pandas_df = pandas.DataFrame(
{
"idx": np.random.randint(0, 100_000, size=2000),
"A": np.random.randint(0, 10, size=2000),
"B": ["a", "b"] * 1000,
"C": ["c"] * 2000,
}
)
pandas_df.to_csv(csv_fname, index=False)
# read into pyarrow table and write it to a parquet file
t = csv.read_csv(csv_fname)
parquet.write_table(t, parquet_fname)
expected_exception = None
if filters == [] and engine == "pyarrow":
expected_exception = ValueError("Malformed filters")
eval_io(
"read_parquet",
# read_parquet kwargs
path=parquet_fname,
engine=engine,
filters=filters,
expected_exception=expected_exception,
)
def test_read_empty_parquet_file(self, tmp_path, engine):
test_df = pandas.DataFrame()
path = tmp_path / "data"
path.mkdir()
test_df.to_parquet(path / "part-00000.parquet", engine=engine)
eval_io(fn_name="read_parquet", path=path, engine=engine)
@pytest.mark.parametrize(
"compression_kwargs",
[
pytest.param({}, id="no_compression_kwargs"),
pytest.param({"compression": None}, id="compression=None"),
pytest.param({"compression": "gzip"}, id="compression=gzip"),
pytest.param({"compression": "snappy"}, id="compression=snappy"),
pytest.param({"compression": "brotli"}, id="compression=brotli"),
],
)
@pytest.mark.parametrize("extension", ["parquet", ".gz", ".bz2", ".zip", ".xz"])
def test_to_parquet(self, tmp_path, engine, compression_kwargs, extension):
modin_df, pandas_df = create_test_dfs(TEST_DATA)
parquet_eval_to_file(
tmp_path,
modin_obj=modin_df,
pandas_obj=pandas_df,
fn="to_parquet",
extension=extension,
engine=engine,
**compression_kwargs,
)
def test_to_parquet_keep_index(self, tmp_path, engine):
data = {"c0": [0, 1] * 1000, "c1": [2, 3] * 1000}
modin_df, pandas_df = create_test_dfs(data)
modin_df.index.name = "foo"
pandas_df.index.name = "foo"
parquet_eval_to_file(
tmp_path,
modin_obj=modin_df,
pandas_obj=pandas_df,
fn="to_parquet",
extension="parquet",
index=True,
engine=engine,
)
def test_to_parquet_s3(self, s3_resource, engine, s3_storage_options):
# use utils_test_data because it spans multiple partitions
modin_path = "s3://modin-test/modin-dir/modin_df.parquet"
mdf, pdf = create_test_dfs(utils_test_data["int_data"])
pdf.to_parquet(
"s3://modin-test/pandas-dir/pandas_df.parquet",
engine=engine,
storage_options=s3_storage_options,
)
mdf.to_parquet(modin_path, engine=engine, storage_options=s3_storage_options)
df_equals(
pandas.read_parquet(
"s3://modin-test/pandas-dir/pandas_df.parquet",
storage_options=s3_storage_options,
),
pd.read_parquet(modin_path, storage_options=s3_storage_options),
)
# check we're not creating local file:
# https://github.com/modin-project/modin/issues/5888
assert not os.path.isdir(modin_path)
def test_read_parquet_2462(self, tmp_path, engine):
test_df = pandas.DataFrame({"col1": [["ad_1", "ad_2"], ["ad_3"]]})
path = tmp_path / "data"
path.mkdir()
test_df.to_parquet(path / "part-00000.parquet", engine=engine)
read_df = pd.read_parquet(path, engine=engine)
df_equals(test_df, read_df)
def test_read_parquet_5767(self, tmp_path, engine):
test_df = pandas.DataFrame({"a": [1, 2, 3, 4], "b": [1, 1, 2, 2]})
path = tmp_path / "data"
path.mkdir()
file_name = "modin_issue#0000.parquet"
test_df.to_parquet(path / file_name, engine=engine, partition_cols=["b"])
read_df = pd.read_parquet(path / file_name)
# both Modin and pandas read column "b" as a category
df_equals(test_df, read_df.astype("int64"))
@pytest.mark.parametrize("index", [False, True])
def test_read_parquet_6855(self, tmp_path, engine, index):
if engine == "fastparquet":
pytest.skip("integer columns aren't supported")
test_df = pandas.DataFrame(np.random.rand(10**2, 10))
path = tmp_path / "data"
path.mkdir()
file_name = "issue6855.parquet"
test_df.to_parquet(path / file_name, index=index, engine=engine)
read_df = pd.read_parquet(path / file_name, engine=engine)
if not index:
# In that case pyarrow cannot preserve index dtype
read_df.columns = pandas.Index(read_df.columns).astype("int64").to_list()
df_equals(test_df, read_df)
def test_read_parquet_s3_with_column_partitioning(
self, s3_resource, engine, s3_storage_options
):
# https://github.com/modin-project/modin/issues/4636
s3_path = "s3://modin-test/modin-bugs/issue5159.parquet"
eval_io(
fn_name="read_parquet",
path=s3_path,
engine=engine,
storage_options=s3_storage_options,
)
# Leave this test apart from the test classes, which skip the default to pandas
# warning check. We want to make sure we are NOT defaulting to pandas for a
# path relative to user home.
# TODO(https://github.com/modin-project/modin/issues/3655): Get rid of this
# commment once we turn all default to pandas messages into errors.
def test_read_parquet_relative_to_user_home(make_parquet_file):
with ensure_clean(".parquet") as unique_filename:
make_parquet_file(filename=unique_filename)
_check_relative_io("read_parquet", unique_filename, "path")
@pytest.mark.filterwarnings(default_to_pandas_ignore_string)
| TestParquet |
python | pytorch__pytorch | torch/ao/nn/intrinsic/quantized/modules/bn_relu.py | {
"start": 178,
"end": 1731
} | class ____(nnq.BatchNorm2d):
r"""
A BNReLU2d module is a fused module of BatchNorm2d and ReLU
We adopt the same interface as :class:`torch.ao.nn.quantized.BatchNorm2d`.
Attributes:
Same as torch.ao.nn.quantized.BatchNorm2d
"""
_FLOAT_MODULE = torch.ao.nn.intrinsic.BNReLU2d
def __init__(self, num_features, eps=1e-5, momentum=0.1, device=None, dtype=None):
super().__init__(
num_features, eps=eps, momentum=momentum, device=device, dtype=dtype
)
def forward(self, input):
# Temporarily using len(shape) instead of ndim due to JIT issue
# https://github.com/pytorch/pytorch/issues/23890
if len(input.shape) != 4:
raise ValueError("Input shape must be `(N, C, H, W)`!")
return torch.ops.quantized.batch_norm2d_relu(
input,
self.weight,
self.bias,
self.running_mean,
self.running_var,
self.eps,
self.scale,
self.zero_point,
)
def _get_name(self):
return "QuantizedBNReLU2d"
@classmethod
def from_float(cls, mod, use_precomputed_fake_quant=False): # type: ignore[override]
# TODO: Add qat support for BNReLU2d
return super().from_float(
mod, use_precomputed_fake_quant=use_precomputed_fake_quant
)
@classmethod
def from_reference(cls, bn_relu, output_scale, output_zero_point):
return super().from_reference(bn_relu[0], output_scale, output_zero_point)
| BNReLU2d |
python | getsentry__sentry | tests/sentry/middleware/integrations/parsers/test_bitbucket.py | {
"start": 822,
"end": 4033
} | class ____(TestCase):
def get_response(self, req: HttpRequest) -> HttpResponse:
return HttpResponse(status=200, content="passthrough")
factory = RequestFactory()
region = Region("us", 1, "https://us.testserver", RegionCategory.MULTI_TENANT)
region_config = (region,)
def get_integration(self) -> Integration:
return self.create_integration(
organization=self.organization, external_id="bitbucket:1", provider="bitbucket"
)
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
def test_routing_endpoints(self) -> None:
self.get_integration()
control_routes = [
reverse("sentry-extensions-bitbucket-descriptor"),
reverse("sentry-extensions-bitbucket-installed"),
reverse("sentry-extensions-bitbucket-uninstalled"),
reverse(
"sentry-extensions-bitbucket-search",
kwargs={
"organization_id_or_slug": self.organization.slug,
"integration_id": self.integration.id,
},
),
]
for route in control_routes:
request = self.factory.post(route)
parser = BitbucketRequestParser(request=request, response_handler=self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == 200
assert response.content == b"passthrough"
assert_no_webhook_payloads()
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
def test_routing_webhook_no_regions(self) -> None:
region_route = reverse(
"sentry-extensions-bitbucket-webhook", kwargs={"organization_id": self.organization.id}
)
request = self.factory.post(region_route)
parser = BitbucketRequestParser(request=request, response_handler=self.get_response)
# Missing region
OrganizationMapping.objects.get(organization_id=self.organization.id).update(
region_name="eu"
)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == 200
assert response.content == b"passthrough"
assert_no_webhook_payloads()
@override_settings(SILO_MODE=SiloMode.CONTROL)
@override_regions(region_config)
def test_routing_webhook_with_regions(self) -> None:
self.get_integration()
region_route = reverse(
"sentry-extensions-bitbucket-webhook", kwargs={"organization_id": self.organization.id}
)
request = self.factory.post(region_route)
parser = BitbucketRequestParser(request=request, response_handler=self.get_response)
response = parser.get_response()
assert isinstance(response, HttpResponse)
assert response.status_code == 202
assert response.content == b""
assert_webhook_payloads_for_mailbox(
request=request,
mailbox_name=f"bitbucket:{self.organization.id}",
region_names=[self.region.name],
)
| BitbucketRequestParserTest |
python | joke2k__faker | tests/providers/test_automotive.py | {
"start": 2826,
"end": 3209
} | class ____(_SimpleAutomotiveTestMixin):
"""Test de_DE automotive provider methods"""
license_plate_pattern: Pattern = re.compile(
r"(?P<prefix>[A-Z\u00D6\u00DC]{1,3})-[A-Z]{1,2}-[1-9]{1,4}",
re.UNICODE,
)
def perform_extra_checks(self, license_plate, match):
assert match.group("prefix") in DeDeAutomotiveProvider.license_plate_prefix
| TestDeDe |
python | walkccc__LeetCode | solutions/2249. Count Lattice Points Inside a Circle/2249-2.py | {
"start": 0,
"end": 389
} | class ____:
def countLatticePoints(self, circles: list[list[int]]) -> int:
points = set()
# dx := relative to x
# dy := relative to y
# So, dx^2 + dy^2 = r^2.
for x, y, r in circles:
for dx in range(-r, r + 1):
yMax = int((r**2 - dx**2)**0.5)
for dy in range(-yMax, yMax + 1):
points.add((x + dx, y + dy))
return len(points)
| Solution |
python | scipy__scipy | scipy/spatial/tests/test_distance.py | {
"start": 67955,
"end": 79562
} | class ____:
# If test case name ends on "_E" then an exception is expected for the
# given input, if it ends in "_F" then False is expected for the is_valid_y
# check. Otherwise the input is expected to be valid.
def test_is_valid_y_improper_shape_2D_E(self):
y = np.zeros((3, 3,), dtype=np.float64)
with pytest.raises(ValueError):
is_valid_y_throw(y)
def test_is_valid_y_improper_shape_2D_F(self):
y = np.zeros((3, 3,), dtype=np.float64)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_improper_shape_3D_E(self):
y = np.zeros((3, 3, 3), dtype=np.float64)
with pytest.raises(ValueError):
is_valid_y_throw(y)
def test_is_valid_y_improper_shape_3D_F(self):
y = np.zeros((3, 3, 3), dtype=np.float64)
assert_equal(is_valid_y(y), False)
def test_is_valid_y_correct_2_by_2(self):
y = self.correct_n_by_n(2)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_3_by_3(self):
y = self.correct_n_by_n(3)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_4_by_4(self):
y = self.correct_n_by_n(4)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_correct_5_by_5(self):
y = self.correct_n_by_n(5)
assert_equal(is_valid_y(y), True)
def test_is_valid_y_2_100(self):
a = set()
for n in range(2, 16):
a.add(n * (n - 1) / 2)
for i in range(5, 105):
if i not in a:
with pytest.raises(ValueError):
self.bad_y(i)
def bad_y(self, n):
y = np.random.rand(n)
return is_valid_y(y, throw=True)
def correct_n_by_n(self, n):
y = np.random.rand((n * (n - 1)) // 2)
return y
@pytest.mark.parametrize("p", [-10.0, -0.5, 0.0])
def test_bad_p(p):
# Raise ValueError if p <=0.
with pytest.raises(ValueError):
minkowski([1, 2], [3, 4], p)
with pytest.raises(ValueError):
minkowski([1, 2], [3, 4], p, [1, 1])
def test_sokalsneath_all_false():
# Regression test for ticket #876
with pytest.raises(ValueError):
sokalsneath([False, False, False], [False, False, False])
def test_canberra():
# Regression test for ticket #1430.
assert_equal(wcanberra([1, 2, 3], [2, 4, 6]), 1)
assert_equal(wcanberra([1, 1, 0, 0], [1, 0, 1, 0]), 2)
def test_braycurtis():
# Regression test for ticket #1430.
assert_almost_equal(wbraycurtis([1, 2, 3], [2, 4, 6]), 1. / 3, decimal=15)
assert_almost_equal(wbraycurtis([1, 1, 0, 0], [1, 0, 1, 0]), 0.5, decimal=15)
def test_euclideans():
# Regression test for ticket #1328.
x1 = np.array([1, 1, 1])
x2 = np.array([0, 0, 0])
# Basic test of the calculation.
assert_almost_equal(wsqeuclidean(x1, x2), 3.0, decimal=14)
assert_almost_equal(weuclidean(x1, x2), np.sqrt(3), decimal=14)
# Check flattening for (1, N) or (N, 1) inputs
with pytest.raises(ValueError, match="Input vector should be 1-D"):
weuclidean(x1[np.newaxis, :], x2[np.newaxis, :]), np.sqrt(3)
with pytest.raises(ValueError, match="Input vector should be 1-D"):
wsqeuclidean(x1[np.newaxis, :], x2[np.newaxis, :])
with pytest.raises(ValueError, match="Input vector should be 1-D"):
wsqeuclidean(x1[:, np.newaxis], x2[:, np.newaxis])
# Distance metrics only defined for vectors (= 1-D)
x = np.arange(4).reshape(2, 2)
with pytest.raises(ValueError):
weuclidean(x, x)
with pytest.raises(ValueError):
wsqeuclidean(x, x)
# Another check, with random data.
rs = np.random.RandomState(1234567890)
x = rs.rand(10)
y = rs.rand(10)
d1 = weuclidean(x, y)
d2 = wsqeuclidean(x, y)
assert_almost_equal(d1**2, d2, decimal=14)
def test_hamming_unequal_length():
# Regression test for gh-4290.
x = [0, 0, 1]
y = [1, 0, 1, 0]
# Used to give an AttributeError from ndarray.mean called on bool
with pytest.raises(ValueError):
whamming(x, y)
def test_hamming_unequal_length_with_w():
u = [0, 0, 1]
v = [0, 0, 1]
w = [1, 0, 1, 0]
msg = "'w' should have the same length as 'u' and 'v'."
with pytest.raises(ValueError, match=msg):
whamming(u, v, w)
def test_hamming_string_array():
# https://github.com/scikit-learn/scikit-learn/issues/4014
a = np.array(['eggs', 'spam', 'spam', 'eggs', 'spam', 'spam', 'spam',
'spam', 'spam', 'spam', 'spam', 'eggs', 'eggs', 'spam',
'eggs', 'eggs', 'eggs', 'eggs', 'eggs', 'spam'],
dtype='|S4')
b = np.array(['eggs', 'spam', 'spam', 'eggs', 'eggs', 'spam', 'spam',
'spam', 'spam', 'eggs', 'spam', 'eggs', 'spam', 'eggs',
'spam', 'spam', 'eggs', 'spam', 'spam', 'eggs'],
dtype='|S4')
desired = 0.45
assert_allclose(whamming(a, b), desired)
def test_minkowski_w():
# Regression test for gh-8142.
arr_in = np.array([[83.33333333, 100., 83.33333333, 100., 36.,
60., 90., 150., 24., 48.],
[83.33333333, 100., 83.33333333, 100., 36.,
60., 90., 150., 24., 48.]])
p0 = pdist(arr_in, metric='minkowski', p=1, w=None)
c0 = cdist(arr_in, arr_in, metric='minkowski', p=1, w=None)
p1 = pdist(arr_in, metric='minkowski', p=1)
c1 = cdist(arr_in, arr_in, metric='minkowski', p=1)
assert_allclose(p0, p1, rtol=1e-15)
assert_allclose(c0, c1, rtol=1e-15)
def test_sqeuclidean_dtypes():
# Assert that sqeuclidean returns the right types of values.
# Integer types should be converted to floating for stability.
# Floating point types should be the same as the input.
x = [1, 2, 3]
y = [4, 5, 6]
for dtype in [np.int8, np.int16, np.int32, np.int64]:
d = wsqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
assert_(np.issubdtype(d.dtype, np.floating))
for dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
umax = np.iinfo(dtype).max
d1 = wsqeuclidean([0], np.asarray([umax], dtype=dtype))
d2 = wsqeuclidean(np.asarray([umax], dtype=dtype), [0])
assert_equal(d1, d2)
assert_equal(d1, np.float64(umax)**2)
dtypes = [np.float32, np.float64, np.complex64, np.complex128]
for dtype in ['float16', 'float128']:
# These aren't present in older numpy versions; float128 may also not
# be present on all platforms.
if hasattr(np, dtype):
dtypes.append(getattr(np, dtype))
for dtype in dtypes:
d = wsqeuclidean(np.asarray(x, dtype=dtype), np.asarray(y, dtype=dtype))
assert_equal(d.dtype, dtype)
def test_modifies_input(metric):
# test whether cdist or pdist modifies input arrays
X1 = np.asarray([[1., 2., 3.],
[1.2, 2.3, 3.4],
[2.2, 2.3, 4.4],
[22.2, 23.3, 44.4]])
X1_copy = X1.copy()
cdist(X1, X1, metric)
pdist(X1, metric)
assert_array_equal(X1, X1_copy)
def test_Xdist_deprecated_args(metric):
# testing both cdist and pdist deprecated warnings
X1 = np.asarray([[1., 2., 3.],
[1.2, 2.3, 3.4],
[2.2, 2.3, 4.4],
[22.2, 23.3, 44.4]])
with pytest.raises(TypeError):
cdist(X1, X1, metric, 2.)
with pytest.raises(TypeError):
pdist(X1, metric, 2.)
for arg in ["p", "V", "VI"]:
kwargs = {arg: np.asarray(1.)}
if ((arg == "V" and metric == "seuclidean")
or (arg == "VI" and metric == "mahalanobis")
or (arg == "p" and metric == "minkowski")):
continue
with pytest.raises(TypeError):
cdist(X1, X1, metric, **kwargs)
with pytest.raises(TypeError):
pdist(X1, metric, **kwargs)
def test_Xdist_non_negative_weights(metric):
X = eo['random-float32-data'][::5, ::2]
w = np.ones(X.shape[1])
w[::5] = -w[::5]
if metric in ['seuclidean', 'mahalanobis', 'jensenshannon']:
pytest.skip("not applicable")
for m in [metric, eval(metric), "test_" + metric]:
with pytest.raises(ValueError):
pdist(X, m, w=w)
with pytest.raises(ValueError):
cdist(X, X, m, w=w)
def test__validate_vector():
x = [1, 2, 3]
y = _validate_vector(x)
assert_array_equal(y, x)
y = _validate_vector(x, dtype=np.float64)
assert_array_equal(y, x)
assert_equal(y.dtype, np.float64)
x = [1]
y = _validate_vector(x)
assert_equal(y.ndim, 1)
assert_equal(y, x)
x = 1
with pytest.raises(ValueError, match="Input vector should be 1-D"):
_validate_vector(x)
x = np.arange(5).reshape(1, -1, 1)
with pytest.raises(ValueError, match="Input vector should be 1-D"):
_validate_vector(x)
x = [[1, 2], [3, 4]]
with pytest.raises(ValueError, match="Input vector should be 1-D"):
_validate_vector(x)
def test_yule_all_same():
# Test yule avoids a divide by zero when exactly equal
x = np.ones((2, 6), dtype=bool)
d = wyule(x[0], x[0])
assert d == 0.0
d = pdist(x, 'yule')
assert_equal(d, [0.0])
d = cdist(x[:1], x[:1], 'yule')
assert_equal(d, [[0.0]])
def test_jensenshannon():
assert_almost_equal(jensenshannon([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], 2.0),
1.0)
assert_almost_equal(jensenshannon([1.0, 0.0], [0.5, 0.5]),
0.46450140402245893)
assert_almost_equal(jensenshannon([1.0, 0.0, 0.0], [1.0, 0.0, 0.0]), 0.0)
assert_almost_equal(jensenshannon([[1.0, 2.0]], [[0.5, 1.5]], axis=0),
[0.0, 0.0])
assert_almost_equal(jensenshannon([[1.0, 2.0]], [[0.5, 1.5]], axis=1),
[0.0649045])
assert_almost_equal(jensenshannon([[1.0, 2.0]], [[0.5, 1.5]], axis=0,
keepdims=True), [[0.0, 0.0]])
assert_almost_equal(jensenshannon([[1.0, 2.0]], [[0.5, 1.5]], axis=1,
keepdims=True), [[0.0649045]])
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]])
b = np.array([[13, 14, 15, 16],
[17, 18, 19, 20],
[21, 22, 23, 24]])
assert_almost_equal(jensenshannon(a, b, axis=0),
[0.1954288, 0.1447697, 0.1138377, 0.0927636])
assert_almost_equal(jensenshannon(a, b, axis=1),
[0.1402339, 0.0399106, 0.0201815])
def test_gh_17703():
arr_1 = np.array([1, 0, 0])
arr_2 = np.array([2, 0, 0])
expected = dice(arr_1, arr_2)
actual = pdist([arr_1, arr_2], metric='dice')
assert_allclose(actual, expected)
actual = cdist(np.atleast_2d(arr_1),
np.atleast_2d(arr_2), metric='dice')
assert_allclose(actual, expected)
def test_immutable_input(metric):
if metric in ("jensenshannon", "mahalanobis", "seuclidean"):
pytest.skip("not applicable")
x = np.arange(10, dtype=np.float64)
x.setflags(write=False)
getattr(scipy.spatial.distance, metric)(x, x, w=x)
def test_gh_23109():
a = np.array([0, 0, 1, 1])
b = np.array([0, 1, 1, 0])
w = np.asarray([1.5, 1.2, 0.7, 1.3])
expected = yule(a, b, w=w)
assert_allclose(expected, 1.1954022988505748)
actual = cdist(np.atleast_2d(a),
np.atleast_2d(b),
metric='yule', w=w)
assert_allclose(actual, expected)
| TestIsValidY |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 304111,
"end": 304325
} | class ____(VegaLiteSchema):
"""DictSelectionInit schema wrapper."""
_schema = {"$ref": "#/definitions/Dict<SelectionInit>"}
def __init__(self, **kwds):
super().__init__(**kwds)
| DictSelectionInit |
python | catalyst-team__catalyst | examples/reinforcement_learning/reinforce.py | {
"start": 1200,
"end": 4154
} | class ____(IterableDataset):
def __init__(self, buffer: RolloutBuffer):
self.buffer = buffer
def __iter__(self) -> Iterator[Sequence[np.array]]:
for i in range(len(self.buffer)):
states, actions, rewards = self.buffer.sample(i)
yield states, actions, rewards
self.buffer.buffer.clear()
def __len__(self) -> int:
return self.buffer.capacity
# REINFORCE
def get_cumulative_rewards(rewards, gamma=0.99):
G = [rewards[-1]]
for r in reversed(rewards[:-1]):
G.insert(0, r + gamma * G[0])
return G
def to_one_hot(y, n_dims=None):
"""Takes an integer vector and converts it to 1-hot matrix."""
y_tensor = y
y_tensor = y_tensor.type(torch.LongTensor).view(-1, 1)
n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1
y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1)
return y_one_hot
def get_action(env, network: nn.Module, state: np.array) -> int:
state = torch.tensor(state[None], dtype=torch.float32)
logits = network(state).detach()
probas = F.softmax(logits, -1).cpu().numpy()[0]
action = np.random.choice(len(probas), p=probas)
return int(action)
def generate_session(
env,
network: nn.Module,
t_max: int = 1000,
rollout_buffer: Optional[RolloutBuffer] = None,
) -> Tuple[float, int]:
total_reward = 0
states, actions, rewards = [], [], []
state = env.reset()
for t in range(t_max):
action = get_action(env, network, state=state)
next_state, reward, done, _ = env.step(action)
# record session history to train later
states.append(state)
actions.append(action)
rewards.append(reward)
total_reward += reward
state = next_state
if done:
break
if rollout_buffer is not None:
rollout_buffer.append(Rollout(states, actions, rewards))
return total_reward, t
def generate_sessions(
env,
network: nn.Module,
t_max: int = 1000,
rollout_buffer: Optional[RolloutBuffer] = None,
num_sessions: int = 100,
) -> Tuple[float, int]:
sessions_reward, sessions_steps = 0, 0
for i_episone in range(num_sessions):
r, t = generate_session(
env=env, network=network, t_max=t_max, rollout_buffer=rollout_buffer
)
sessions_reward += r
sessions_steps += t
return sessions_reward, sessions_steps
def get_network(env, num_hidden: int = 128):
inner_fn = get_optimal_inner_init(nn.ReLU)
outer_fn = outer_init
network = torch.nn.Sequential(
nn.Linear(env.observation_space.shape[0], num_hidden),
nn.ReLU(),
nn.Linear(num_hidden, num_hidden),
nn.ReLU(),
)
head = nn.Linear(num_hidden, env.action_space.n)
network.apply(inner_fn)
head.apply(outer_fn)
return torch.nn.Sequential(network, head)
# Catalyst
| RolloutDataset |
python | django__django | tests/postgres_tests/test_array.py | {
"start": 58719,
"end": 60320
} | class ____(PostgreSQLTestCase):
def test_saving_and_querying_for_sql_null(self):
obj = OtherTypesArrayModel.objects.create(json=[None, None])
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(json__1__isnull=True), [obj]
)
# RemovedInDjango70Warning.
msg = (
"Using None as the right-hand side of an exact lookup on JSONField to mean "
"JSON scalar 'null' is deprecated. Use JSONNull() instead (or use the "
"__isnull lookup if you meant SQL NULL)."
)
with self.assertWarnsMessage(RemovedInDjango70Warning, msg):
# RemovedInDjango70Warning: deindent, and replace [] with [obj].
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(json__1=None), []
)
def test_saving_and_querying_for_json_null(self):
obj = OtherTypesArrayModel.objects.create(json=[JSONNull(), JSONNull()])
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(json__1=JSONNull()), [obj]
)
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(json__1__isnull=True), []
)
def test_saving_and_querying_for_nested_json_nulls(self):
obj = OtherTypesArrayModel.objects.create(json=[[None, 1], [None, 2]])
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(json__1__0=None), [obj]
)
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(json__1__0__isnull=True), []
)
| TestJSONFieldQuerying |
python | getsentry__sentry | src/sentry/incidents/action_handlers.py | {
"start": 14987,
"end": 27108
} | class ____(DefaultActionHandler):
@property
def provider(self) -> str:
return "sentry_app"
def send_alert(
self,
action: AlertRuleTriggerAction,
incident: Incident,
project: Project,
metric_value: int | float | None,
new_status: IncidentStatus,
notification_uuid: str | None = None,
):
from sentry.rules.actions.notify_event_service import send_incident_alert_notification
if metric_value is None:
metric_value = get_metric_count_from_incident(incident)
notification_context = NotificationContext.from_alert_rule_trigger_action(action)
alert_context = AlertContext.from_alert_rule_incident(incident.alert_rule)
metric_issue_context = MetricIssueContext.from_legacy_models(
incident=incident,
new_status=new_status,
metric_value=metric_value,
)
incident_serialized_response = serialize(incident, serializer=IncidentSerializer())
success = send_incident_alert_notification(
notification_context=notification_context,
alert_context=alert_context,
metric_issue_context=metric_issue_context,
incident_serialized_response=incident_serialized_response,
organization=incident.organization,
notification_uuid=notification_uuid,
)
if success:
self.record_alert_sent_analytics(
organization_id=incident.organization.id,
project_id=project.id,
alert_id=incident.alert_rule.id,
external_id=action.sentry_app_id,
notification_uuid=notification_uuid,
)
def format_duration(minutes):
"""
Format minutes into a duration string
"""
if minutes >= 1440:
days = int(minutes // 1440)
return f"{days:d} day{pluralize(days)}"
if minutes >= 60:
hours = int(minutes // 60)
return f"{hours:d} hour{pluralize(hours)}"
if minutes >= 1:
minutes = int(minutes)
return f"{minutes:d} minute{pluralize(minutes)}"
seconds = int(minutes // 60)
return f"{seconds:d} second{pluralize(seconds)}"
def generate_incident_trigger_email_context(
project: Project,
organization: Organization,
alert_rule_serialized_response: AlertRuleSerializerResponse,
incident_serialized_response: DetailedIncidentSerializerResponse,
metric_issue_context: MetricIssueContext,
alert_context: AlertContext,
open_period_context: OpenPeriodContext,
trigger_status: TriggerStatus,
trigger_threshold: float | None,
user: User | RpcUser | None = None,
notification_uuid: str | None = None,
detector_serialized_response: DetectorSerializerResponse | None = None,
) -> dict[str, Any]:
from sentry.notifications.notification_action.utils import should_fire_workflow_actions
from sentry.seer.anomaly_detection.types import AnomalyDetectionThresholdType
snuba_query = metric_issue_context.snuba_query
is_active = trigger_status == TriggerStatus.ACTIVE
is_threshold_type_above = (
alert_context.threshold_type == AlertRuleThresholdType.ABOVE
or alert_context.threshold_type == AnomalyDetectionThresholdType.ABOVE
)
subscription = metric_issue_context.subscription
alert_link_params = {
"referrer": "metric_alert_email",
}
# if alert threshold and threshold type is above then show '>'
# if resolve threshold and threshold type is *BELOW* then show '>'
# we can simplify this to be the below statement
show_greater_than_string = is_active == is_threshold_type_above
environment_string = snuba_query.environment.name if snuba_query.environment else "All"
aggregate = snuba_query.aggregate
if is_mri_field(aggregate):
aggregate = format_mri_field(aggregate)
elif CRASH_RATE_ALERT_AGGREGATE_ALIAS in aggregate:
aggregate = aggregate.split(f"AS {CRASH_RATE_ALERT_AGGREGATE_ALIAS}")[0].strip()
threshold: None | str | float = None
if alert_context.detection_type == AlertRuleDetectionType.DYNAMIC:
threshold_prefix_string = alert_context.detection_type.title()
threshold = f"({alert_context.sensitivity} responsiveness)"
alert_link_params["type"] = "anomaly_detection"
else:
threshold_prefix_string = ">" if show_greater_than_string else "<"
threshold = trigger_threshold if is_active else alert_context.resolve_threshold
if threshold is None:
# Setting this to trigger threshold because in the case of a resolve if no resolve
# threshold is specified this will be None. Since we add a comparison sign to the
# string it makes sense to set this to the trigger alert threshold if no threshold is
# specified
threshold = trigger_threshold
chart_url = None
if features.has("organizations:metric-alert-chartcuterie", organization):
try:
chart_url = build_metric_alert_chart(
organization=organization,
alert_rule_serialized_response=alert_rule_serialized_response,
selected_incident_serialized=incident_serialized_response,
snuba_query=snuba_query,
alert_context=alert_context,
open_period_context=open_period_context,
size=ChartSize({"width": 600, "height": 200}),
subscription=subscription,
detector_serialized_response=detector_serialized_response,
)
except Exception:
logging.exception("Error while attempting to build_metric_alert_chart")
tz = settings.SENTRY_DEFAULT_TIME_ZONE
if user is not None:
options: list[RpcUserOption] = user_option_service.get_many(
filter=dict(keys=["timezone"], user_ids=[user.id])
)
if options and options[0].value is not None:
tz = options[0].value
if notification_uuid:
alert_link_params["notification_uuid"] = notification_uuid
if features.has("organizations:workflow-engine-ui-links", organization):
assert (
metric_issue_context.group is not None
), "Group should not be None when workflow engine ui links are enabled"
alert_link = organization.absolute_url(
reverse(
"sentry-group",
kwargs={
"organization_slug": organization.slug,
"project_id": project.id,
"group_id": metric_issue_context.group.id,
},
),
query=urlencode(alert_link_params),
)
elif should_fire_workflow_actions(organization, MetricIssue.type_id):
# lookup the incident_id from the open_period_identifier
try:
incident_group_open_period = IncidentGroupOpenPeriod.objects.get(
group_open_period_id=metric_issue_context.open_period_identifier
)
incident_identifier = incident_group_open_period.incident_identifier
except IncidentGroupOpenPeriod.DoesNotExist:
# the corresponding metric detector was not dual written
incident_identifier = get_fake_id_from_object_id(
metric_issue_context.open_period_identifier
)
alert_link = organization.absolute_url(
reverse(
"sentry-metric-alert",
kwargs={
"organization_slug": organization.slug,
"incident_id": incident_identifier,
},
),
query=urlencode(alert_link_params),
)
else:
alert_link = organization.absolute_url(
reverse(
"sentry-metric-alert",
kwargs={
"organization_slug": organization.slug,
"incident_id": metric_issue_context.open_period_identifier,
},
),
query=urlencode(alert_link_params),
)
snooze_alert_url = None
snooze_alert = False
# We don't have user muting for workflows in the new workflow engine system
# so we don't need to show the snooze alert url
if not features.has("organizations:workflow-engine-ui-links", organization):
snooze_alert = True
snooze_alert_url = alert_link + "&" + urlencode({"mute": "1"})
query_str = build_query_strings(subscription=subscription, snuba_query=snuba_query).query_string
return {
"link": alert_link,
"project_slug": project.slug,
"incident_name": metric_issue_context.title,
"environment": environment_string,
"time_window": format_duration(snuba_query.time_window / 60),
"triggered_at": open_period_context.date_started,
"aggregate": aggregate,
"query": query_str,
"threshold": threshold,
# if alert threshold and threshold type is above then show '>'
# if resolve threshold and threshold type is *BELOW* then show '>'
"threshold_prefix_string": threshold_prefix_string,
"status": INCIDENT_STATUS[metric_issue_context.new_status],
"status_key": INCIDENT_STATUS[metric_issue_context.new_status].lower(),
"is_critical": metric_issue_context.new_status == IncidentStatus.CRITICAL,
"is_warning": metric_issue_context.new_status == IncidentStatus.WARNING,
"unsubscribe_link": None,
"chart_url": chart_url,
"timezone": tz,
"snooze_alert": snooze_alert,
"snooze_alert_url": snooze_alert_url,
}
def build_message(context: dict[str, Any], status: TriggerStatus, user_id: int) -> MessageBuilder:
display = EMAIL_STATUS_DISPLAY[status]
return MessageBuilder(
subject="[{}] {} - {}".format(
context["status"], context["incident_name"], context["project_slug"]
),
template="sentry/emails/incidents/trigger.txt",
html_template="sentry/emails/incidents/trigger.html",
type=f"incident.alert_rule_{display.lower()}",
context=context,
headers={"X-SMTPAPI": orjson.dumps({"category": "metric_alert_email"}).decode()},
)
def email_users(
metric_issue_context: MetricIssueContext,
open_period_context: OpenPeriodContext,
alert_context: AlertContext,
alert_rule_serialized_response: AlertRuleSerializerResponse,
incident_serialized_response: DetailedIncidentSerializerResponse,
trigger_status: TriggerStatus,
targets: list[tuple[int, str]],
project: Project,
notification_uuid: str | None = None,
detector_serialized_response: DetectorSerializerResponse | None = None,
) -> list[int]:
users = user_service.get_many_by_id(ids=[user_id for user_id, _ in targets])
sent_to_users = []
for index, (user_id, email) in enumerate(targets):
user = users[index]
# TODO(iamrajjoshi): Temporarily assert that alert_threshold is not None
# This should be removed when we update the typing and fetch the trigger_threshold in the new system
if trigger_status == TriggerStatus.ACTIVE:
assert alert_context.alert_threshold is not None
email_context = generate_incident_trigger_email_context(
project=project,
organization=project.organization,
metric_issue_context=metric_issue_context,
alert_rule_serialized_response=alert_rule_serialized_response,
incident_serialized_response=incident_serialized_response,
alert_context=alert_context,
open_period_context=open_period_context,
trigger_status=trigger_status,
trigger_threshold=alert_context.alert_threshold,
user=user,
notification_uuid=notification_uuid,
detector_serialized_response=detector_serialized_response,
)
build_message(email_context, trigger_status, user_id).send_async(to=[email])
sent_to_users.append(user_id)
return sent_to_users
| SentryAppActionHandler |
python | getsentry__sentry | src/sentry/api/bases/project.py | {
"start": 1064,
"end": 1242
} | class ____(Exception):
def __init__(self, new_url: str, slug: str):
self.new_url = new_url
self.slug = slug
super().__init__(new_url, slug)
| ProjectMoved |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_hparams.py | {
"start": 19124,
"end": 20779
} | class ____(BoringModel):
def __init__(self, arg1, arg2):
super().__init__()
self.save_hyperparameters(arg1, arg2)
@pytest.mark.parametrize(
("cls", "config"), [(AnotherArgModel, {"arg1": 42}), (OtherArgsModel, {"arg1": 3.14, "arg2": "abc"})]
)
def test_single_config_models_fail(tmp_path, cls, config):
"""Test fail on passing unsupported config type."""
with pytest.raises(ValueError, match=r"Primitives \(<class 'bool'>*"):
_ = cls(**config)
@pytest.mark.parametrize("past_key", ["module_arguments"])
def test_load_past_checkpoint(tmp_path, past_key):
model = CustomBoringModel()
# verify we can train
trainer = Trainer(default_root_dir=tmp_path, max_epochs=1)
trainer.fit(model)
# make sure the raw checkpoint saved the properties
raw_checkpoint_path = _raw_checkpoint_path(trainer)
raw_checkpoint = torch.load(raw_checkpoint_path, weights_only=True)
raw_checkpoint[past_key] = raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
raw_checkpoint["hparams_type"] = "Namespace"
raw_checkpoint[past_key]["batch_size"] = -17
del raw_checkpoint[LightningModule.CHECKPOINT_HYPER_PARAMS_KEY]
# save back the checkpoint
torch.save(raw_checkpoint, raw_checkpoint_path)
# verify that model loads correctly
model2 = CustomBoringModel.load_from_checkpoint(raw_checkpoint_path)
assert model2.hparams.batch_size == -17
def test_hparams_pickle(tmp_path):
ad = AttributeDict({"key1": 1, "key2": "abc"})
pkl = pickle.dumps(ad)
assert ad == pickle.loads(pkl)
pkl = cloudpickle.dumps(ad)
assert ad == pickle.loads(pkl)
| OtherArgsModel |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/databricks_sql_datasource.py | {
"start": 1914,
"end": 2149
} | class ____(pydantic.UrlError):
"""
Custom Pydantic error for missing http_path in DatabricksDsn query.
"""
code = "url.query.http_path"
msg_template = "'http_path' query param is invalid or missing"
| _UrlHttpPathError |
python | spyder-ide__spyder | external-deps/python-lsp-server/pylsp/plugins/rope_autoimport.py | {
"start": 721,
"end": 13367
} | class ____:
"""Handles the cache creation."""
def __init__(self) -> None:
self.thread = None
def reload_cache(
self,
config: Config,
workspace: Workspace,
files: Optional[list[Document]] = None,
single_thread: Optional[bool] = True,
):
if self.is_blocked():
return
memory: bool = config.plugin_settings("rope_autoimport").get("memory", False)
rope_config = config.settings().get("rope", {})
autoimport = workspace._rope_autoimport(rope_config, memory)
resources: Optional[list[Resource]] = (
None
if files is None
else [document._rope_resource(rope_config) for document in files]
)
if single_thread:
self._reload_cache(workspace, autoimport, resources)
else:
# Creating the cache may take 10-20s for a environment with 5k python modules. That's
# why we decided to move cache creation into its own thread.
self.thread = threading.Thread(
target=self._reload_cache, args=(workspace, autoimport, resources)
)
self.thread.start()
def _reload_cache(
self,
workspace: Workspace,
autoimport: AutoImport,
resources: Optional[list[Resource]] = None,
) -> None:
task_handle = PylspTaskHandle(workspace)
autoimport.generate_cache(task_handle=task_handle, resources=resources)
autoimport.generate_modules_cache(task_handle=task_handle)
def is_blocked(self):
return self.thread and self.thread.is_alive()
@hookimpl
def pylsp_settings() -> dict[str, dict[str, dict[str, Any]]]:
# Default rope_completion to disabled
return {
"plugins": {
"rope_autoimport": {
"enabled": False,
"memory": False,
"completions": {
"enabled": True,
},
"code_actions": {
"enabled": True,
},
}
}
}
def _should_insert(expr: tree.BaseNode, word_node: tree.Leaf) -> bool:
"""
Check if we should insert the word_node on the given expr.
Works for both correct and incorrect code. This is because the
user is often working on the code as they write it.
"""
if not word_node:
return False
if len(expr.children) == 0:
return True
first_child = expr.children[0]
if isinstance(first_child, tree.EndMarker):
if "#" in first_child.prefix:
return False # Check for single line comment
if first_child == word_node:
return True # If the word is the first word then its fine
if len(expr.children) > 1:
if any(
node.type == "operator" and "." in node.value or node.type == "trailer"
for node in expr.children
):
return False # Check if we're on a method of a function
if isinstance(first_child, (tree.PythonErrorNode, tree.PythonNode)):
# The tree will often include error nodes like this to indicate errors
# we want to ignore errors since the code is being written
return _should_insert(first_child, word_node)
return _handle_first_child(first_child, expr, word_node)
def _handle_first_child(
first_child: NodeOrLeaf, expr: tree.BaseNode, word_node: tree.Leaf
) -> bool:
"""Check if we suggest imports given the following first child."""
if isinstance(first_child, tree.Import):
return False
if isinstance(first_child, (tree.PythonLeaf, tree.PythonErrorLeaf)):
# Check if the first item is a from or import statement even when incomplete
if first_child.value in ("import", "from"):
return False
if isinstance(first_child, tree.Keyword):
if first_child.value == "def":
return _should_import_function(word_node, expr)
if first_child.value == "class":
return _should_import_class(word_node, expr)
return True
def _should_import_class(word_node: tree.Leaf, expr: tree.BaseNode) -> bool:
prev_node = None
for node in expr.children:
if isinstance(node, tree.Name):
if isinstance(prev_node, tree.Operator):
if node == word_node and prev_node.value == "(":
return True
prev_node = node
return False
def _should_import_function(word_node: tree.Leaf, expr: tree.BaseNode) -> bool:
prev_node = None
for node in expr.children:
if _handle_argument(node, word_node):
return True
if isinstance(prev_node, tree.Operator):
if prev_node.value == "->":
if node == word_node:
return True
prev_node = node
return False
def _handle_argument(node: NodeOrLeaf, word_node: tree.Leaf):
if isinstance(node, tree.PythonNode):
if node.type == "tfpdef":
if node.children[2] == word_node:
return True
if node.type == "parameters":
for parameter in node.children:
if _handle_argument(parameter, word_node):
return True
return False
def _process_statements(
suggestions: list[SearchResult],
doc_uri: str,
word: str,
autoimport: AutoImport,
document: Document,
feature: str = "completions",
) -> Generator[dict[str, Any], None, None]:
for suggestion in suggestions:
insert_line = autoimport.find_insertion_line(document.source) - 1
start = {"line": insert_line, "character": 0}
edit_range = {"start": start, "end": start}
edit = {"range": edit_range, "newText": suggestion.import_statement + "\n"}
score = _get_score(
suggestion.source, suggestion.import_statement, suggestion.name, word
)
if score > _score_max:
continue
# TODO make this markdown
if feature == "completions":
yield {
"label": suggestion.name,
"kind": suggestion.itemkind,
"sortText": _sort_import(score),
"data": {"doc_uri": doc_uri},
"detail": _document(suggestion.import_statement),
"additionalTextEdits": [edit],
}
elif feature == "code_actions":
yield {
"title": suggestion.import_statement,
"kind": "quickfix",
"edit": {"changes": {doc_uri: [edit]}},
# data is a supported field for codeAction responses
# See https://microsoft.github.io/language-server-protocol/specifications/lsp/3.17/specification/#textDocument_codeAction
"data": {"sortText": _sort_import(score)},
}
else:
raise ValueError(f"Unknown feature: {feature}")
def get_names(script: Script) -> set[str]:
"""Get all names to ignore from the current file."""
raw_names = script.get_names(definitions=True)
log.debug(raw_names)
return {name.name for name in raw_names}
@hookimpl
def pylsp_completions(
config: Config,
workspace: Workspace,
document: Document,
position,
ignored_names: Union[set[str], None],
):
"""Get autoimport suggestions."""
if (
not config.plugin_settings("rope_autoimport")
.get("completions", {})
.get("enabled", True)
) or cache.is_blocked():
return []
line = document.lines[position["line"]]
expr = parso.parse(line)
word_node = expr.get_leaf_for_position((1, position["character"]))
if not _should_insert(expr, word_node):
return []
word = word_node.value
log.debug(f"autoimport: searching for word: {word}")
rope_config = config.settings(document_path=document.path).get("rope", {})
ignored_names: set[str] = ignored_names or get_names(
document.jedi_script(use_document_path=True)
)
autoimport = workspace._rope_autoimport(rope_config)
suggestions = list(autoimport.search_full(word, ignored_names=ignored_names))
results = sorted(
_process_statements(
suggestions, document.uri, word, autoimport, document, "completions"
),
key=lambda statement: statement["sortText"],
)
if len(results) > MAX_RESULTS_COMPLETIONS:
results = results[:MAX_RESULTS_COMPLETIONS]
return results
def _document(import_statement: str) -> str:
return """# Auto-Import\n""" + import_statement
def _get_score(
source: int, full_statement: str, suggested_name: str, desired_name
) -> int:
import_length = len("import")
full_statement_score = len(full_statement) - import_length
suggested_name_score = (len(suggested_name) - len(desired_name)) ** 2
source_score = 20 * source
return suggested_name_score + full_statement_score + source_score
def _sort_import(score: int) -> str:
score = max(min(score, (_score_max) - 1), 0)
# Since we are using ints, we need to pad them.
# We also want to prioritize autoimport behind everything since its the last priority.
# The minimum is to prevent score from overflowing the pad
return "[z" + str(score).rjust(_score_pow, "0")
def get_name_or_module(document, diagnostic) -> str:
start = diagnostic["range"]["start"]
return (
parso.parse(document.lines[start["line"]])
.get_leaf_for_position((1, start["character"] + 1))
.value
)
@hookimpl
def pylsp_code_actions(
config: Config,
workspace: Workspace,
document: Document,
range: dict,
context: dict,
) -> list[dict]:
"""
Provide code actions through rope.
Parameters
----------
config : pylsp.config.config.Config
Current config.
workspace : pylsp.workspace.Workspace
Current workspace.
document : pylsp.workspace.Document
Document to apply code actions on.
range : dict
Range argument given by pylsp. Not used here.
context : dict
CodeActionContext given as dict.
Returns
-------
List of dicts containing the code actions.
"""
if (
not config.plugin_settings("rope_autoimport")
.get("code_actions", {})
.get("enabled", True)
) or cache.is_blocked():
return []
log.debug(f"textDocument/codeAction: {document} {range} {context}")
code_actions = []
for diagnostic in context.get("diagnostics", []):
if "undefined name" not in diagnostic.get("message", "").lower():
continue
word = get_name_or_module(document, diagnostic)
log.debug(f"autoimport: searching for word: {word}")
rope_config = config.settings(document_path=document.path).get("rope", {})
autoimport = workspace._rope_autoimport(rope_config)
suggestions = list(autoimport.search_full(word))
log.debug("autoimport: suggestions: %s", suggestions)
results = sorted(
_process_statements(
suggestions,
document.uri,
word,
autoimport,
document,
"code_actions",
),
key=lambda statement: statement["data"]["sortText"],
)
if len(results) > MAX_RESULTS_CODE_ACTIONS:
results = results[:MAX_RESULTS_CODE_ACTIONS]
code_actions.extend(results)
return code_actions
@hookimpl
def pylsp_initialize(config: Config, workspace: Workspace) -> None:
"""Initialize AutoImport.
Generates the cache for local and global items.
"""
cache.reload_cache(config, workspace)
@hookimpl
def pylsp_document_did_open(config: Config, workspace: Workspace) -> None:
"""Initialize AutoImport.
Generates the cache for local and global items.
"""
cache.reload_cache(config, workspace)
@hookimpl
def pylsp_document_did_save(
config: Config, workspace: Workspace, document: Document
) -> None:
"""Update the names associated with this document."""
cache.reload_cache(config, workspace, [document])
@hookimpl
def pylsp_workspace_configuration_changed(config: Config, workspace: Workspace) -> None:
"""
Initialize autoimport if it has been enabled through a
workspace/didChangeConfiguration message from the frontend.
Generates the cache for local and global items.
"""
if config.plugin_settings("rope_autoimport").get("enabled", False):
cache.reload_cache(config, workspace)
else:
log.debug("autoimport: Skipping cache reload.")
cache: AutoimportCache = AutoimportCache()
| AutoimportCache |
python | fastai__fastai | nbs/examples/migrating_fastai.py | {
"start": 78,
"end": 1183
} | class ____(nn.Sequential):
def __init__(self):
super().__init__(
nn.Conv2d(1, 32, 3, 1), nn.ReLU(),
nn.Conv2d(32, 64, 3, 1), nn.MaxPool2d(2), nn.Dropout2d(0.25),
Flatten(), nn.Linear(9216, 128), nn.ReLU(), nn.Dropout2d(0.5),
nn.Linear(128, 10), nn.LogSoftmax(dim=1) )
batch_size,test_batch_size = 256,512
epochs,lr = 1,1e-2
kwargs = {'num_workers': 1, 'pin_memory': True}
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(
datasets.MNIST('../data', train=True, download=True, transform=transform),
batch_size=batch_size, shuffle=True, **kwargs)
test_loader = DataLoader(
datasets.MNIST('../data', train=False, transform=transform),
batch_size=test_batch_size, shuffle=True, **kwargs)
if __name__ == '__main__':
data = DataLoaders(train_loader, test_loader).cuda()
learn = Learner(data, Net(), loss_func=F.nll_loss, opt_func=Adam, metrics=accuracy)
learn.fit_one_cycle(epochs, lr)
| Net |
python | django__django | tests/get_or_create/tests.py | {
"start": 600,
"end": 7364
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
Person.objects.create(
first_name="John", last_name="Lennon", birthday=date(1940, 10, 9)
)
def test_get_or_create_method_with_get(self):
created = Person.objects.get_or_create(
first_name="John",
last_name="Lennon",
defaults={"birthday": date(1940, 10, 9)},
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 1)
def test_get_or_create_method_with_create(self):
created = Person.objects.get_or_create(
first_name="George",
last_name="Harrison",
defaults={"birthday": date(1943, 2, 25)},
)[1]
self.assertTrue(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_redundant_instance(self):
"""
If we execute the exact same statement twice, the second time,
it won't create a Person.
"""
Person.objects.get_or_create(
first_name="George",
last_name="Harrison",
defaults={"birthday": date(1943, 2, 25)},
)
created = Person.objects.get_or_create(
first_name="George",
last_name="Harrison",
defaults={"birthday": date(1943, 2, 25)},
)[1]
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 2)
def test_get_or_create_invalid_params(self):
"""
If you don't specify a value or default value for all required
fields, you will get an error.
"""
with self.assertRaises(IntegrityError):
Person.objects.get_or_create(first_name="Tom", last_name="Smith")
def test_get_or_create_with_pk_property(self):
"""
Using the pk property of a model is allowed.
"""
Thing.objects.get_or_create(pk=1)
def test_get_or_create_with_model_property_defaults(self):
"""Using a property with a setter implemented is allowed."""
t, _ = Thing.objects.get_or_create(
defaults={"capitalized_name_property": "annie"}, pk=1
)
self.assertEqual(t.name, "Annie")
def test_get_or_create_on_related_manager(self):
p = Publisher.objects.create(name="Acme Publishing")
# Create a book through the publisher.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
# The publisher should have one book.
self.assertEqual(p.books.count(), 1)
# Try get_or_create again, this time nothing should be created.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertFalse(created)
# And the publisher should still have one book.
self.assertEqual(p.books.count(), 1)
# Add an author to the book.
ed, created = book.authors.get_or_create(name="Ed")
self.assertTrue(created)
# The book should have one author.
self.assertEqual(book.authors.count(), 1)
# Try get_or_create again, this time nothing should be created.
ed, created = book.authors.get_or_create(name="Ed")
self.assertFalse(created)
# And the book should still have one author.
self.assertEqual(book.authors.count(), 1)
# Add a second author to the book.
fred, created = book.authors.get_or_create(name="Fred")
self.assertTrue(created)
# The book should have two authors now.
self.assertEqual(book.authors.count(), 2)
# Create an Author not tied to any books.
Author.objects.create(name="Ted")
# There should be three Authors in total. The book object should have
# two.
self.assertEqual(Author.objects.count(), 3)
self.assertEqual(book.authors.count(), 2)
# Try creating a book through an author.
_, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p)
self.assertTrue(created)
# Now Ed has two Books, Fred just one.
self.assertEqual(ed.books.count(), 2)
self.assertEqual(fred.books.count(), 1)
# Use the publisher's primary key value instead of a model instance.
_, created = ed.books.get_or_create(
name="The Great Book of Ed", publisher_id=p.id
)
self.assertTrue(created)
# Try get_or_create again, this time nothing should be created.
_, created = ed.books.get_or_create(
name="The Great Book of Ed", publisher_id=p.id
)
self.assertFalse(created)
# The publisher should have three books.
self.assertEqual(p.books.count(), 3)
def test_defaults_exact(self):
"""
If you have a field named defaults and want to use it as an exact
lookup, you need to use 'defaults__exact'.
"""
obj, created = Person.objects.get_or_create(
first_name="George",
last_name="Harrison",
defaults__exact="testing",
defaults={
"birthday": date(1943, 2, 25),
"defaults": "testing",
},
)
self.assertTrue(created)
self.assertEqual(obj.defaults, "testing")
obj2, created = Person.objects.get_or_create(
first_name="George",
last_name="Harrison",
defaults__exact="testing",
defaults={
"birthday": date(1943, 2, 25),
"defaults": "testing",
},
)
self.assertFalse(created)
self.assertEqual(obj, obj2)
def test_callable_defaults(self):
"""
Callables in `defaults` are evaluated if the instance is created.
"""
obj, created = Person.objects.get_or_create(
first_name="George",
defaults={"last_name": "Harrison", "birthday": lambda: date(1943, 2, 25)},
)
self.assertTrue(created)
self.assertEqual(date(1943, 2, 25), obj.birthday)
def test_callable_defaults_not_called(self):
def raise_exception():
raise AssertionError
obj, created = Person.objects.get_or_create(
first_name="John",
last_name="Lennon",
defaults={"birthday": lambda: raise_exception()},
)
def test_defaults_not_evaluated_unless_needed(self):
"""`defaults` aren't evaluated if the instance isn't created."""
def raise_exception():
raise AssertionError
obj, created = Person.objects.get_or_create(
first_name="John",
defaults=lazy(raise_exception, object)(),
)
self.assertFalse(created)
| GetOrCreateTests |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 90053,
"end": 90200
} | class ____(_PrintableStructure):
_fields_ = [
('avgFactor', c_uint),
('timeslice', c_uint),
]
| c_nvmlVgpuSchedDataWithARR_t |
python | numba__numba | numba/core/types/functions.py | {
"start": 18908,
"end": 18963
} | class ____(Literal, Opaque):
pass
| MakeFunctionLiteral |
python | sqlalchemy__sqlalchemy | test/ext/test_associationproxy.py | {
"start": 94757,
"end": 94962
} | class ____(
ScalarRemoveTest, fixtures.DeclarativeMappedTest
):
run_create_tables = None
useobject = False
cascade_scalar_deletes = False
uselist = False
| ScalarRemoveScalarScalarNoCascade |
python | getsentry__sentry | src/sentry/preprod/migrations/0001_emerge_upload_models.py | {
"start": 339,
"end": 7305
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
initial = True
dependencies = [
("sentry", "0913_split_discover_dataset_dashboards_self_hosted"),
]
operations = [
migrations.CreateModel(
name="PreprodBuildConfiguration",
fields=[
(
"id",
sentry.db.models.fields.bounded.BoundedBigAutoField(
primary_key=True, serialize=False
),
),
("date_updated", models.DateTimeField(auto_now=True)),
("date_added", models.DateTimeField(auto_now_add=True)),
("name", models.CharField(max_length=255)),
(
"project",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="sentry.project"
),
),
],
options={
"db_table": "sentry_preprodbuildconfiguration",
"unique_together": {("project", "name")},
},
),
migrations.CreateModel(
name="PreprodArtifact",
fields=[
(
"id",
sentry.db.models.fields.bounded.BoundedBigAutoField(
primary_key=True, serialize=False
),
),
("date_updated", models.DateTimeField(auto_now=True)),
("date_added", models.DateTimeField(auto_now_add=True)),
(
"file_id",
sentry.db.models.fields.bounded.BoundedBigIntegerField(
db_index=True, null=True
),
),
("date_built", models.DateTimeField(null=True)),
(
"state",
sentry.db.models.fields.bounded.BoundedPositiveIntegerField(
default=sentry.preprod.models.PreprodArtifact.ArtifactState["UPLOADING"]
),
),
(
"artifact_type",
sentry.db.models.fields.bounded.BoundedPositiveIntegerField(null=True),
),
(
"error_code",
sentry.db.models.fields.bounded.BoundedPositiveIntegerField(null=True),
),
("error_message", models.TextField(null=True)),
("build_version", models.CharField(max_length=255, null=True)),
("build_number", sentry.db.models.fields.bounded.BoundedBigIntegerField(null=True)),
("extras", sentry.db.models.fields.jsonfield.JSONField(null=True)),
(
"project",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="sentry.project"
),
),
(
"build_configuration",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="preprod.preprodbuildconfiguration",
),
),
],
options={
"db_table": "sentry_preprodartifact",
},
),
migrations.CreateModel(
name="PreprodArtifactSizeMetrics",
fields=[
(
"id",
sentry.db.models.fields.bounded.BoundedBigAutoField(
primary_key=True, serialize=False
),
),
("date_updated", models.DateTimeField(auto_now=True)),
("date_added", models.DateTimeField(auto_now_add=True)),
(
"metrics_artifact_type",
sentry.db.models.fields.bounded.BoundedPositiveIntegerField(null=True),
),
(
"state",
sentry.db.models.fields.bounded.BoundedPositiveIntegerField(
default=sentry.preprod.models.PreprodArtifactSizeMetrics.SizeAnalysisState[
"PENDING"
]
),
),
(
"error_code",
sentry.db.models.fields.bounded.BoundedPositiveIntegerField(null=True),
),
("error_message", models.TextField(null=True)),
("processing_version", models.CharField(max_length=255, null=True)),
(
"min_install_size",
sentry.db.models.fields.bounded.BoundedPositiveBigIntegerField(null=True),
),
(
"max_install_size",
sentry.db.models.fields.bounded.BoundedPositiveBigIntegerField(null=True),
),
(
"min_download_size",
sentry.db.models.fields.bounded.BoundedPositiveBigIntegerField(null=True),
),
(
"max_download_size",
sentry.db.models.fields.bounded.BoundedPositiveBigIntegerField(null=True),
),
(
"preprod_artifact",
sentry.db.models.fields.foreignkey.FlexibleForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="preprod.preprodartifact"
),
),
],
options={
"db_table": "sentry_preprodartifactsizemetrics",
"unique_together": {("preprod_artifact", "metrics_artifact_type")},
},
),
]
| Migration |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/ecs.py | {
"start": 4520,
"end": 6307
} | class ____(Protocol):
"""
A structured Protocol for ``boto3.client('ecs')``.
This is used for type hints on :py:meth:`.EcsOperator.client`.
.. seealso::
- https://mypy.readthedocs.io/en/latest/protocols.html
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html
"""
def run_task(self, **kwargs) -> dict:
"""
Run a task.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task
"""
...
def get_waiter(self, x: str) -> Waiter:
"""
Get a waiter.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.get_waiter
"""
...
def describe_tasks(self, cluster: str, tasks) -> dict:
"""
Describe tasks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.describe_tasks
"""
...
def stop_task(self, cluster, task, reason: str) -> dict:
"""
Stop a task.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.stop_task
"""
...
def describe_task_definition(self, taskDefinition: str) -> dict:
"""
Describe a task definition.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.describe_task_definition
"""
...
def list_tasks(self, cluster: str, launchType: str, desiredStatus: str, family: str) -> dict:
"""
List tasks.
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.list_tasks
"""
...
| EcsProtocol |
python | sympy__sympy | sympy/stats/drv_types.py | {
"start": 1648,
"end": 3533
} | class ____(SingleDiscreteDistribution):
_argnames = ('pdf',)
def __new__(cls, pdf, set=S.Integers):
return Basic.__new__(cls, pdf, set)
@property
def set(self):
return self.args[1]
@staticmethod
def check(pdf, set):
x = Dummy('x')
val = Sum(pdf(x), (x, set._inf, set._sup)).doit()
_value_check(Eq(val, 1) != S.false, "The pdf is incorrect on the given set.")
def DiscreteRV(symbol, density, set=S.Integers, **kwargs):
"""
Create a Discrete Random Variable given the following:
Parameters
==========
symbol : Symbol
Represents name of the random variable.
density : Expression containing symbol
Represents probability density function.
set : set
Represents the region where the pdf is valid, by default is real line.
check : bool
If True, it will check whether the given density
integrates to 1 over the given set. If False, it
will not perform this check. Default is False.
Examples
========
>>> from sympy.stats import DiscreteRV, P, E
>>> from sympy import Rational, Symbol
>>> x = Symbol('x')
>>> n = 10
>>> density = Rational(1, 10)
>>> X = DiscreteRV(x, density, set=set(range(n)))
>>> E(X)
9/2
>>> P(X>3)
3/5
Returns
=======
RandomSymbol
"""
set = sympify(set)
pdf = Piecewise((density, set.as_relational(symbol)), (0, True))
pdf = Lambda(symbol, pdf)
# have a default of False while `rv` should have a default of True
kwargs['check'] = kwargs.pop('check', False)
return rv(symbol.name, DiscreteDistributionHandmade, pdf, set, **kwargs)
#-------------------------------------------------------------------------------
# Flory-Schulz distribution ------------------------------------------------------------
| DiscreteDistributionHandmade |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 286792,
"end": 287486
} | class ____(sgqlc.types.Input):
"""Specifies the attributes for a new or updated required status
check.
"""
__schema__ = github_schema
__field_names__ = ("context", "app_id")
context = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="context")
"""Status check context that must pass for commits to be accepted to
the matching branch.
"""
app_id = sgqlc.types.Field(ID, graphql_name="appId")
"""The ID of the App that must set the status in order for it to be
accepted. Omit this value to use whichever app has recently been
setting this status, or use "any" to allow any app to set the
status.
"""
| RequiredStatusCheckInput |
python | tensorflow__tensorflow | tensorflow/python/keras/saving/saved_model/model_serialization.py | {
"start": 2504,
"end": 2646
} | class ____(ModelSavedModelSaver):
@property
def object_identifier(self):
return constants.SEQUENTIAL_IDENTIFIER
| SequentialSavedModelSaver |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 217870,
"end": 219078
} | class ____(Operation):
def call(self, x1, x2):
return backend.numpy.divide_no_nan(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
output_dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
float,
)
x1_sparse = getattr(x1, "sparse", False)
x2_sparse = getattr(x2, "sparse", False)
output_sparse = x1_sparse and not x2_sparse
return KerasTensor(
output_shape, dtype=output_dtype, sparse=output_sparse
)
@keras_export(["keras.ops.divide_no_nan", "keras.ops.numpy.divide_no_nan"])
def divide_no_nan(x1, x2):
"""Safe element-wise division which returns 0 where the denominator is 0.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
The quotient `x1/x2`, element-wise, with zero where x2 is zero.
"""
if any_symbolic_tensors((x1, x2)):
return DivideNoNan().symbolic_call(x1, x2)
return backend.numpy.divide_no_nan(x1, x2)
| DivideNoNan |
python | kamyu104__LeetCode-Solutions | Python/count-visited-nodes-in-a-directed-graph.py | {
"start": 56,
"end": 959
} | class ____(object):
def countVisitedNodes(self, edges):
"""
:type edges: List[int]
:rtype: List[int]
"""
def find_cycles(adj):
result = [0]*len(adj)
lookup = [0]*len(adj)
stk = [] # added
idx = 0
for u in xrange(len(adj)):
prev = idx
while not lookup[u]:
idx += 1
lookup[u] = idx
stk.append(u) # added
u = adj[u]
if lookup[u] > prev:
l = idx-lookup[u]+1
for _ in xrange(l): # added
result[stk.pop()] = l
while stk: # added
result[stk[-1]] = result[adj[stk[-1]]]+1
stk.pop()
return result
return find_cycles(edges)
| Solution |
python | openai__openai-python | tests/test_transform.py | {
"start": 13762,
"end": 15611
} | class ____(TypedDict):
foo: Annotated[Union[str, Base64FileInput], PropertyInfo(format="base64")]
@parametrize
@pytest.mark.asyncio
async def test_base64_file_input(use_async: bool) -> None:
# strings are left as-is
assert await transform({"foo": "bar"}, TypedDictBase64Input, use_async) == {"foo": "bar"}
# pathlib.Path is automatically converted to base64
assert await transform({"foo": SAMPLE_FILE_PATH}, TypedDictBase64Input, use_async) == {
"foo": "SGVsbG8sIHdvcmxkIQo="
} # type: ignore[comparison-overlap]
# io instances are automatically converted to base64
assert await transform({"foo": io.StringIO("Hello, world!")}, TypedDictBase64Input, use_async) == {
"foo": "SGVsbG8sIHdvcmxkIQ=="
} # type: ignore[comparison-overlap]
assert await transform({"foo": io.BytesIO(b"Hello, world!")}, TypedDictBase64Input, use_async) == {
"foo": "SGVsbG8sIHdvcmxkIQ=="
} # type: ignore[comparison-overlap]
@parametrize
@pytest.mark.asyncio
async def test_transform_skipping(use_async: bool) -> None:
# lists of ints are left as-is
data = [1, 2, 3]
assert await transform(data, List[int], use_async) is data
# iterables of ints are converted to a list
data = iter([1, 2, 3])
assert await transform(data, Iterable[int], use_async) == [1, 2, 3]
@parametrize
@pytest.mark.asyncio
async def test_strips_notgiven(use_async: bool) -> None:
assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"}
assert await transform({"foo_bar": not_given}, Foo1, use_async) == {}
@parametrize
@pytest.mark.asyncio
async def test_strips_omit(use_async: bool) -> None:
assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"}
assert await transform({"foo_bar": omit}, Foo1, use_async) == {}
| TypedDictBase64Input |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 97483,
"end": 101207
} | class ____:
def test_getelement(self):
def check(dtype):
D = array([[1,0,0],
[4,3,0],
[0,2,0],
[0,0,0]], dtype=dtype)
A = self.spcreator(D)
M,N = D.shape
for i in range(-M, M):
for j in range(-N, N):
assert_equal(A[i,j], D[i,j])
assert_equal(type(A[1,1]), dtype)
for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]:
assert_raises((IndexError, TypeError), A.__getitem__, ij)
for dtype in supported_dtypes:
check(np.dtype(dtype))
def test_setelement(self):
def check(dtype, scalar_container):
A = self.spcreator((3, 4), dtype=dtype)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
A[0, 0] = scalar_container(dtype.type(0)) # bug 870
A[1, 2] = scalar_container(dtype.type(4.0))
A[0, 1] = scalar_container(dtype.type(3))
A[2, 0] = scalar_container(dtype.type(2.0))
A[0, -1] = scalar_container(dtype.type(8))
A[-1, -2] = scalar_container(dtype.type(7))
A[0, 1] = scalar_container(dtype.type(5))
if dtype != np.bool_:
assert_array_equal(
A.toarray(), [[0, 5, 0, 8], [0, 0, 4, 0], [2, 0, 7, 0]]
)
for ij in [(0, 4), (-1, 4), (3, 0), (3, 4), (3, -1)]:
assert_raises(IndexError, A.__setitem__, ij, 123.0)
for v in [[1, 2, 3], array([1, 2, 3])]:
assert_raises(ValueError, A.__setitem__, (0, 0), v)
if not np.issubdtype(dtype, np.complexfloating) and dtype != np.bool_:
for v in [3j]:
assert_raises(TypeError, A.__setitem__, (0, 0), v)
scalar_containers = [
lambda x: csr_array(np.array([[x]])), np.array, lambda x: x
]
for scalar_container in scalar_containers:
for dtype in supported_dtypes:
check(np.dtype(dtype), scalar_container)
def test_negative_index_assignment(self):
# Regression test for GitHub issue 4428.
def check(dtype):
A = self.spcreator((3, 10), dtype=dtype)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
A[0, -4] = 1
assert_equal(A[0, -4], 1)
for dtype in self.math_dtypes:
check(np.dtype(dtype))
def test_scalar_assign_2(self):
n, m = (5, 10)
def _test_set(i, j, nitems):
msg = f"{i!r} ; {j!r} ; {nitems!r}"
A = self.spcreator((n, m))
with warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
A[i, j] = 1
assert_almost_equal(A.sum(), nitems, err_msg=msg)
assert_almost_equal(A[i, j], 1, err_msg=msg)
# [i,j]
for i, j in [(2, 3), (-1, 8), (-1, -2), (array(-1), -2), (-1, array(-2)),
(array(-1), array(-2))]:
_test_set(i, j, 1)
def test_index_scalar_assign(self):
A = self.spcreator((5, 5))
B = np.zeros((5, 5))
with warnings.catch_warnings():
warnings.filterwarnings("ignore", WMSG, SparseEfficiencyWarning)
for C in [A, B]:
C[0,1] = 1
C[3,0] = 4
C[3,0] = 9
assert_array_equal(A.toarray(), B)
@pytest.mark.thread_unsafe(reason="fails in parallel")
| _TestGetSet |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/utils/datafusion.py | {
"start": 845,
"end": 1307
} | class ____(Enum):
"""Enum for Data Fusion pipeline types."""
BATCH = "batch"
STREAM = "stream"
@staticmethod
def from_str(value: str) -> DataFusionPipelineType:
value_to_item = {item.value: item for item in DataFusionPipelineType}
if value in value_to_item:
return value_to_item[value]
raise ValueError(f"Invalid value '{value}'. Valid values are: {[i for i in value_to_item.keys()]}")
| DataFusionPipelineType |
python | pypa__pip | src/pip/_vendor/dependency_groups/_implementation.py | {
"start": 1072,
"end": 1706
} | class ____(ValueError):
"""
An error representing the detection of a cycle.
"""
def __init__(self, requested_group: str, group: str, include_group: str) -> None:
self.requested_group = requested_group
self.group = group
self.include_group = include_group
if include_group == group:
reason = f"{group} includes itself"
else:
reason = f"{include_group} -> {group}, {group} -> {include_group}"
super().__init__(
"Cyclic dependency group include while resolving "
f"{requested_group}: {reason}"
)
| CyclicDependencyError |
python | apache__airflow | airflow-core/src/airflow/utils/scheduler_health.py | {
"start": 1193,
"end": 2574
} | class ____(BaseHTTPRequestHandler):
"""Small webserver to serve scheduler health check."""
def do_GET(self):
if self.path == "/health":
try:
with create_session() as session:
scheduler_job = session.scalar(
select(Job)
.filter_by(job_type=SchedulerJobRunner.job_type)
.filter_by(hostname=get_hostname())
.order_by(Job.latest_heartbeat.desc())
.limit(1)
)
if scheduler_job and scheduler_job.is_alive():
self.send_response(200)
self.end_headers()
else:
self.send_error(503)
except Exception:
log.exception("Exception when executing Health check")
self.send_error(503)
else:
self.send_error(404)
def serve_health_check():
"""Start a http server to serve scheduler health check."""
health_check_host = conf.get("scheduler", "SCHEDULER_HEALTH_CHECK_SERVER_HOST")
health_check_port = conf.getint("scheduler", "SCHEDULER_HEALTH_CHECK_SERVER_PORT")
httpd = HTTPServer((health_check_host, health_check_port), HealthServer)
httpd.serve_forever()
if __name__ == "__main__":
serve_health_check()
| HealthServer |
python | django__django | tests/migrations/test_migrations_first/second.py | {
"start": 43,
"end": 699
} | class ____(migrations.Migration):
dependencies = [
("migrations", "thefirst"),
("migrations2", "0002_second"),
]
operations = [
migrations.DeleteModel("Tribble"),
migrations.RemoveField("Author", "silly_field"),
migrations.AddField("Author", "rating", models.IntegerField(default=0)),
migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
(
"author",
models.ForeignKey("migrations.Author", models.SET_NULL, null=True),
),
],
),
]
| Migration |
python | docker__docker-py | tests/integration/api_container_test.py | {
"start": 25337,
"end": 28157
} | class ____(BaseAPIIntegrationTest):
def test_get_file_archive_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
TEST_IMG, f'sh -c "echo {data} > /vol1/data.txt"',
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
self.client.wait(ctnr)
with tempfile.NamedTemporaryFile() as destination:
strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt')
for d in strm:
destination.write(d)
destination.seek(0)
retrieved_data = helpers.untar_file(destination, 'data.txt')\
.decode('utf-8')
assert data == retrieved_data.strip()
def test_get_file_stat_from_container(self):
data = 'The Maid and the Pocket Watch of Blood'
ctnr = self.client.create_container(
TEST_IMG, f'sh -c "echo -n {data} > /vol1/data.txt"',
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
self.client.wait(ctnr)
strm, stat = self.client.get_archive(ctnr, '/vol1/data.txt')
assert 'name' in stat
assert stat['name'] == 'data.txt'
assert 'size' in stat
assert stat['size'] == len(data)
def test_copy_file_to_container(self):
data = b'Deaf To All But The Song'
with tempfile.NamedTemporaryFile(delete=False) as test_file:
test_file.write(data)
test_file.seek(0)
ctnr = self.client.create_container(
TEST_IMG,
f"cat {os.path.join('/vol1/', os.path.basename(test_file.name))}",
volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
with helpers.simple_tar(test_file.name) as test_tar:
self.client.put_archive(ctnr, '/vol1', test_tar)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
assert logs.strip() == data
def test_copy_directory_to_container(self):
files = ['a.py', 'b.py', 'foo/b.py']
dirs = ['foo', 'bar']
base = helpers.make_tree(dirs, files)
ctnr = self.client.create_container(
TEST_IMG, 'ls -p /vol1', volumes=['/vol1']
)
self.tmp_containers.append(ctnr)
with docker.utils.tar(base) as test_tar:
self.client.put_archive(ctnr, '/vol1', test_tar)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr).decode('utf-8')
results = logs.strip().split()
assert 'a.py' in results
assert 'b.py' in results
assert 'foo/' in results
assert 'bar/' in results
| ArchiveTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 176117,
"end": 176683
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("organization_id", "forking_enabled", "client_mutation_id")
organization_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="organizationId"
)
forking_enabled = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="forkingEnabled"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| UpdateOrganizationAllowPrivateRepositoryForkingSettingInput |
python | yandexdataschool__Practical_RL | week06_policy_based/env_batch.py | {
"start": 154,
"end": 1342
} | class ____(Space):
def __init__(self, spaces):
first_type = type(spaces[0])
first_shape = spaces[0].shape
first_dtype = spaces[0].dtype
for space in spaces:
if not isinstance(space, first_type):
raise TypeError(
"spaces have different types: {}, {}".format(
first_type, type(space)
)
)
if first_shape != space.shape:
raise ValueError(
"spaces have different shapes: {}, {}".format(
first_shape, space.shape
)
)
if first_dtype != space.dtype:
raise ValueError(
"spaces have different data types: {}, {}".format(
first_dtype, space.dtype
)
)
self.spaces = spaces
super().__init__(shape=self.spaces[0].shape, dtype=self.spaces[0].dtype)
def sample(self):
return np.stack([space.sample() for space in self.spaces])
def __getattr__(self, attr):
return getattr(self.spaces[0], attr)
| SpaceBatch |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorRegistryReleases.py | {
"start": 11450,
"end": 13780
} | class ____(BaseModel):
class Config:
extra = Extra.allow
destinationDefinitionId: UUID
name: str
dockerRepository: str
dockerImageTag: str
documentationUrl: str
icon: Optional[str] = None
iconUrl: Optional[str] = None
spec: Dict[str, Any]
tombstone: Optional[bool] = Field(
False,
description="if false, the configuration is active. if true, then this configuration is permanently off.",
)
public: Optional[bool] = Field(
False,
description="true if this connector definition is available to all workspaces",
)
custom: Optional[bool] = Field(
False, description="whether this is a custom connector definition"
)
releaseStage: Optional[ReleaseStage] = None
supportLevel: Optional[SupportLevel] = None
releaseDate: Optional[date] = Field(
None,
description="The date when this connector was first released, in yyyy-mm-dd format.",
)
tags: Optional[List[str]] = Field(
None,
description="An array of tags that describe the connector. E.g: language:python, keyword:rds, etc.",
)
resourceRequirements: Optional[ActorDefinitionResourceRequirements] = None
protocolVersion: Optional[str] = Field(
None, description="the Airbyte Protocol version supported by the connector"
)
normalizationConfig: Optional[NormalizationDestinationDefinitionConfig] = None
supportsDbt: Optional[bool] = Field(
None,
description="an optional flag indicating whether DBT is used in the normalization. If the flag value is NULL - DBT is not used.",
)
allowedHosts: Optional[AllowedHosts] = None
releases: Optional[ConnectorRegistryReleases] = None
ab_internal: Optional[AirbyteInternal] = None
supportsRefreshes: Optional[bool] = False
supportsFileTransfer: Optional[bool] = False
supportsDataActivation: Optional[bool] = False
generated: Optional[GeneratedFields] = None
packageInfo: Optional[ConnectorPackageInfo] = None
language: Optional[str] = Field(
None, description="The language the connector is written in"
)
ConnectorRegistryReleases.update_forward_refs()
ConnectorReleaseCandidates.update_forward_refs()
VersionReleaseCandidate.update_forward_refs()
| ConnectorRegistryDestinationDefinition |
python | numba__numba | numba/core/typeinfer.py | {
"start": 5594,
"end": 6393
} | class ____(object):
"""
A simple constraint for direct propagation of types for assignments.
"""
def __init__(self, dst, src, loc):
self.dst = dst
self.src = src
self.loc = loc
def __call__(self, typeinfer):
with new_error_context("typing of assignment at {loc}",
loc=self.loc):
typeinfer.copy_type(self.src, self.dst, loc=self.loc)
# If `dst` is refined, notify us
typeinfer.refine_map[self.dst] = self
def refine(self, typeinfer, target_type):
# Do not back-propagate to locked variables (e.g. constants)
assert target_type.is_precise()
typeinfer.add_type(self.src, target_type, unless_locked=True,
loc=self.loc)
| Propagate |
python | PyCQA__pylint | doc/data/messages/m/missing-class-docstring/bad.py | {
"start": 0,
"end": 162
} | class ____: # [missing-class-docstring]
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
| Person |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/resources.py | {
"start": 40715,
"end": 41654
} | class ____(StateBackedDefinitionsLoader[AirbyteWorkspaceData]):
workspace: Union[AirbyteWorkspace, AirbyteCloudWorkspace]
translator: DagsterAirbyteTranslator
connection_selector_fn: Optional[Callable[[AirbyteConnection], bool]]
@property
def defs_key(self) -> str:
return f"{AIRBYTE_RECONSTRUCTION_METADATA_KEY_PREFIX}.{self.workspace.workspace_id}"
def fetch_state(self) -> AirbyteWorkspaceData:
return self.workspace.fetch_airbyte_workspace_data()
def defs_from_state(self, state: AirbyteWorkspaceData) -> Definitions:
all_asset_specs = [
self.translator.get_asset_spec(props)
for props in state.to_airbyte_connection_table_props_data()
if not self.connection_selector_fn
or self.connection_selector_fn(state.connections_by_id[props.connection_id])
]
return Definitions(assets=all_asset_specs)
| AirbyteWorkspaceDefsLoader |
python | apache__airflow | airflow-core/src/airflow/models/asset.py | {
"start": 14374,
"end": 16036
} | class ____(Base):
"""Reference from a DAG to an asset name reference of which it is a consumer."""
name: Mapped[str] = mapped_column(
String(length=1500).with_variant(
String(
length=1500,
# latin1 allows for more indexed length in mysql
# and this field should only be ascii chars
collation="latin1_general_cs",
),
"mysql",
),
primary_key=True,
nullable=False,
)
dag_id: Mapped[str] = mapped_column(StringID(), primary_key=True, nullable=False)
created_at: Mapped[datetime] = mapped_column(UtcDateTime, default=timezone.utcnow, nullable=False)
dag = relationship("DagModel", back_populates="schedule_asset_name_references")
__tablename__ = "dag_schedule_asset_name_reference"
__table_args__ = (
PrimaryKeyConstraint(name, dag_id, name="dsanr_pkey"),
ForeignKeyConstraint(
columns=(dag_id,),
refcolumns=["dag.dag_id"],
name="dsanr_dag_id_fkey",
ondelete="CASCADE",
),
Index("idx_dag_schedule_asset_name_reference_dag_id", dag_id),
)
def __eq__(self, other: object) -> bool:
if isinstance(other, self.__class__):
return self.name == other.name and self.dag_id == other.dag_id
return NotImplemented
def __hash__(self):
return hash(self.__mapper__.primary_key)
def __repr__(self):
args = [f"{x.name}={getattr(self, x.name)!r}" for x in self.__mapper__.primary_key]
return f"{self.__class__.__name__}({', '.join(args)})"
| DagScheduleAssetNameReference |
python | huggingface__transformers | src/transformers/models/electra/modeling_electra.py | {
"start": 19322,
"end": 20728
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([ElectraLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
| ElectraEncoder |
python | graphql-python__graphene | graphene/types/tests/test_objecttype.py | {
"start": 378,
"end": 437
} | class ____(Interface):
ifield = Field(MyType)
| MyInterface |
python | langchain-ai__langchain | libs/core/langchain_core/runnables/base.py | {
"start": 130204,
"end": 148011
} | class ____(RunnableSerializable[Input, dict[str, Any]]):
"""Runnable that runs a mapping of `Runnable`s in parallel.
Returns a mapping of their outputs.
`RunnableParallel` is one of the two main composition primitives,
alongside `RunnableSequence`. It invokes `Runnable`s concurrently, providing the
same input to each.
A `RunnableParallel` can be instantiated directly or by using a dict literal
within a sequence.
Here is a simple example that uses functions to illustrate the use of
`RunnableParallel`:
```python
from langchain_core.runnables import RunnableLambda
def add_one(x: int) -> int:
return x + 1
def mul_two(x: int) -> int:
return x * 2
def mul_three(x: int) -> int:
return x * 3
runnable_1 = RunnableLambda(add_one)
runnable_2 = RunnableLambda(mul_two)
runnable_3 = RunnableLambda(mul_three)
sequence = runnable_1 | { # this dict is coerced to a RunnableParallel
"mul_two": runnable_2,
"mul_three": runnable_3,
}
# Or equivalently:
# sequence = runnable_1 | RunnableParallel(
# {"mul_two": runnable_2, "mul_three": runnable_3}
# )
# Also equivalently:
# sequence = runnable_1 | RunnableParallel(
# mul_two=runnable_2,
# mul_three=runnable_3,
# )
sequence.invoke(1)
await sequence.ainvoke(1)
sequence.batch([1, 2, 3])
await sequence.abatch([1, 2, 3])
```
`RunnableParallel` makes it easy to run `Runnable`s in parallel. In the below
example, we simultaneously stream output from two different `Runnable` objects:
```python
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel
from langchain_openai import ChatOpenAI
model = ChatOpenAI()
joke_chain = (
ChatPromptTemplate.from_template("tell me a joke about {topic}") | model
)
poem_chain = (
ChatPromptTemplate.from_template("write a 2-line poem about {topic}")
| model
)
runnable = RunnableParallel(joke=joke_chain, poem=poem_chain)
# Display stream
output = {key: "" for key, _ in runnable.output_schema()}
for chunk in runnable.stream({"topic": "bear"}):
for key in chunk:
output[key] = output[key] + chunk[key].content
print(output) # noqa: T201
```
"""
steps__: Mapping[str, Runnable[Input, Any]]
def __init__(
self,
steps__: Mapping[
str,
Runnable[Input, Any]
| Callable[[Input], Any]
| Mapping[str, Runnable[Input, Any] | Callable[[Input], Any]],
]
| None = None,
**kwargs: Runnable[Input, Any]
| Callable[[Input], Any]
| Mapping[str, Runnable[Input, Any] | Callable[[Input], Any]],
) -> None:
"""Create a `RunnableParallel`.
Args:
steps__: The steps to include.
**kwargs: Additional steps to include.
"""
merged = {**steps__} if steps__ is not None else {}
merged.update(kwargs)
super().__init__(
steps__={key: coerce_to_runnable(r) for key, r in merged.items()}
)
@classmethod
@override
def is_lc_serializable(cls) -> bool:
"""Return `True` as this class is serializable."""
return True
@classmethod
@override
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the LangChain object.
Returns:
`["langchain", "schema", "runnable"]`
"""
return ["langchain", "schema", "runnable"]
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@override
def get_name(self, suffix: str | None = None, *, name: str | None = None) -> str:
"""Get the name of the `Runnable`.
Args:
suffix: The suffix to use.
name: The name to use.
Returns:
The name of the `Runnable`.
"""
name = name or self.name or f"RunnableParallel<{','.join(self.steps__.keys())}>"
return super().get_name(suffix, name=name)
@property
@override
def InputType(self) -> Any:
"""The type of the input to the `Runnable`."""
for step in self.steps__.values():
if step.InputType:
return step.InputType
return Any
@override
def get_input_schema(self, config: RunnableConfig | None = None) -> type[BaseModel]:
"""Get the input schema of the `Runnable`.
Args:
config: The config to use.
Returns:
The input schema of the `Runnable`.
"""
if all(
s.get_input_schema(config).model_json_schema().get("type", "object")
== "object"
for s in self.steps__.values()
):
for step in self.steps__.values():
fields = step.get_input_schema(config).model_fields
root_field = fields.get("root")
if root_field is not None and root_field.annotation != Any:
return super().get_input_schema(config)
# This is correct, but pydantic typings/mypy don't think so.
return create_model_v2(
self.get_name("Input"),
field_definitions={
k: (v.annotation, v.default)
for step in self.steps__.values()
for k, v in step.get_input_schema(config).model_fields.items()
if k != "__root__"
},
)
return super().get_input_schema(config)
@override
def get_output_schema(
self, config: RunnableConfig | None = None
) -> type[BaseModel]:
"""Get the output schema of the `Runnable`.
Args:
config: The config to use.
Returns:
The output schema of the `Runnable`.
"""
fields = {k: (v.OutputType, ...) for k, v in self.steps__.items()}
return create_model_v2(self.get_name("Output"), field_definitions=fields)
@property
@override
def config_specs(self) -> list[ConfigurableFieldSpec]:
"""Get the config specs of the `Runnable`.
Returns:
The config specs of the `Runnable`.
"""
return get_unique_config_specs(
spec for step in self.steps__.values() for spec in step.config_specs
)
@override
def get_graph(self, config: RunnableConfig | None = None) -> Graph:
"""Get the graph representation of the `Runnable`.
Args:
config: The config to use.
Returns:
The graph representation of the `Runnable`.
Raises:
ValueError: If a `Runnable` has no first or last node.
"""
# Import locally to prevent circular import
from langchain_core.runnables.graph import Graph # noqa: PLC0415
graph = Graph()
input_node = graph.add_node(self.get_input_schema(config))
output_node = graph.add_node(self.get_output_schema(config))
for step in self.steps__.values():
step_graph = step.get_graph()
step_graph.trim_first_node()
step_graph.trim_last_node()
if not step_graph:
graph.add_edge(input_node, output_node)
else:
step_first_node, step_last_node = graph.extend(step_graph)
if not step_first_node:
msg = f"Runnable {step} has no first node"
raise ValueError(msg)
if not step_last_node:
msg = f"Runnable {step} has no last node"
raise ValueError(msg)
graph.add_edge(input_node, step_first_node)
graph.add_edge(step_last_node, output_node)
return graph
@override
def __repr__(self) -> str:
map_for_repr = ",\n ".join(
f"{k}: {indent_lines_after_first(repr(v), ' ' + k + ': ')}"
for k, v in self.steps__.items()
)
return "{\n " + map_for_repr + "\n}"
@override
def invoke(
self, input: Input, config: RunnableConfig | None = None, **kwargs: Any
) -> dict[str, Any]:
# setup callbacks
config = ensure_config(config)
callback_manager = CallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
local_callbacks=None,
verbose=False,
inheritable_tags=config.get("tags"),
local_tags=None,
inheritable_metadata=config.get("metadata"),
local_metadata=None,
)
# start the root run
run_manager = callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
def _invoke_step(
step: Runnable[Input, Any], input_: Input, config: RunnableConfig, key: str
) -> Any:
child_config = patch_config(
config,
# mark each step as a child run
callbacks=run_manager.get_child(f"map:key:{key}"),
)
with set_config_context(child_config) as context:
return context.run(
step.invoke,
input_,
child_config,
)
# gather results from all steps
try:
# copy to avoid issues from the caller mutating the steps during invoke()
steps = dict(self.steps__)
with get_executor_for_config(config) as executor:
futures = [
executor.submit(_invoke_step, step, input, config, key)
for key, step in steps.items()
]
output = {
key: future.result()
for key, future in zip(steps, futures, strict=False)
}
# finish the root run
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(output)
return output
@override
async def ainvoke(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> dict[str, Any]:
# setup callbacks
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
async def _ainvoke_step(
step: Runnable[Input, Any], input_: Input, config: RunnableConfig, key: str
) -> Any:
child_config = patch_config(
config,
callbacks=run_manager.get_child(f"map:key:{key}"),
)
with set_config_context(child_config) as context:
return await coro_with_context(
step.ainvoke(input_, child_config), context, create_task=True
)
# gather results from all steps
try:
# copy to avoid issues from the caller mutating the steps during invoke()
steps = dict(self.steps__)
results = await asyncio.gather(
*(
_ainvoke_step(
step,
input,
# mark each step as a child run
config,
key,
)
for key, step in steps.items()
)
)
output = dict(zip(steps, results, strict=False))
# finish the root run
except BaseException as e:
await run_manager.on_chain_error(e)
raise
else:
await run_manager.on_chain_end(output)
return output
def _transform(
self,
inputs: Iterator[Input],
run_manager: CallbackManagerForChainRun,
config: RunnableConfig,
) -> Iterator[AddableDict]:
# Shallow copy steps to ignore mutations while in progress
steps = dict(self.steps__)
# Each step gets a copy of the input iterator,
# which is consumed in parallel in a separate thread.
input_copies = list(safetee(inputs, len(steps), lock=threading.Lock()))
with get_executor_for_config(config) as executor:
# Create the transform() generator for each step
named_generators = [
(
name,
step.transform(
input_copies.pop(),
patch_config(
config, callbacks=run_manager.get_child(f"map:key:{name}")
),
),
)
for name, step in steps.items()
]
# Start the first iteration of each generator
futures = {
executor.submit(next, generator): (step_name, generator)
for step_name, generator in named_generators
}
# Yield chunks from each as they become available,
# and start the next iteration of that generator that yielded it.
# When all generators are exhausted, stop.
while futures:
completed_futures, _ = wait(futures, return_when=FIRST_COMPLETED)
for future in completed_futures:
(step_name, generator) = futures.pop(future)
try:
chunk = AddableDict({step_name: future.result()})
yield chunk
futures[executor.submit(next, generator)] = (
step_name,
generator,
)
except StopIteration:
pass
@override
def transform(
self,
input: Iterator[Input],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Iterator[dict[str, Any]]:
yield from self._transform_stream_with_config(
input, self._transform, config, **kwargs
)
@override
def stream(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[dict[str, Any]]:
yield from self.transform(iter([input]), config)
async def _atransform(
self,
inputs: AsyncIterator[Input],
run_manager: AsyncCallbackManagerForChainRun,
config: RunnableConfig,
) -> AsyncIterator[AddableDict]:
# Shallow copy steps to ignore mutations while in progress
steps = dict(self.steps__)
# Each step gets a copy of the input iterator,
# which is consumed in parallel in a separate thread.
input_copies = list(atee(inputs, len(steps), lock=asyncio.Lock()))
# Create the transform() generator for each step
named_generators = [
(
name,
step.atransform(
input_copies.pop(),
patch_config(
config, callbacks=run_manager.get_child(f"map:key:{name}")
),
),
)
for name, step in steps.items()
]
# Wrap in a coroutine to satisfy linter
async def get_next_chunk(generator: AsyncIterator) -> Output | None:
return await py_anext(generator)
# Start the first iteration of each generator
tasks = {
asyncio.create_task(get_next_chunk(generator)): (step_name, generator)
for step_name, generator in named_generators
}
# Yield chunks from each as they become available,
# and start the next iteration of the generator that yielded it.
# When all generators are exhausted, stop.
while tasks:
completed_tasks, _ = await asyncio.wait(
tasks, return_when=asyncio.FIRST_COMPLETED
)
for task in completed_tasks:
(step_name, generator) = tasks.pop(task)
try:
chunk = AddableDict({step_name: task.result()})
yield chunk
new_task = asyncio.create_task(get_next_chunk(generator))
tasks[new_task] = (step_name, generator)
except StopAsyncIteration:
pass
@override
async def atransform(
self,
input: AsyncIterator[Input],
config: RunnableConfig | None = None,
**kwargs: Any,
) -> AsyncIterator[dict[str, Any]]:
async for chunk in self._atransform_stream_with_config(
input, self._atransform, config, **kwargs
):
yield chunk
@override
async def astream(
self,
input: Input,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> AsyncIterator[dict[str, Any]]:
async def input_aiter() -> AsyncIterator[Input]:
yield input
async for chunk in self.atransform(input_aiter(), config):
yield chunk
# We support both names
RunnableMap = RunnableParallel
| RunnableParallel |
python | hyperopt__hyperopt | hyperopt/tests/unit/test_rdists.py | {
"start": 1733,
"end": 3488
} | class ____(unittest.TestCase):
def test_cdf_logcdf(self):
check_cdf_logcdf(lognorm_gen(0, 1), (), "")
check_cdf_logcdf(lognorm_gen(0, 1), (), "")
def test_cdf_ppf(self):
check_cdf_ppf(lognorm_gen(0, 1), (), "")
check_cdf_ppf(lognorm_gen(-2, 1), (), "")
def test_pdf_logpdf(self):
check_pdf_logpdf(lognorm_gen(0, 1), args=(), msg="base case")
check_pdf_logpdf(
lognorm_gen(mu=-4, sigma=0.5), args=(), msg="non-default mu, sigma"
)
def test_pdf(self):
check_pdf(lognorm_gen(0, 1), (), "")
check_pdf(lognorm_gen(mu=-4, sigma=2), (), "")
def test_distribution_rvs(self):
import warnings
warnings.warn("test_distribution_rvs is being skipped!")
return # XXX
alpha = 0.01
loc = 0
scale = 1
arg = (loc, scale)
distfn = lognorm_gen(0, 1)
D, pval = stats.kstest(distfn.rvs, distfn.cdf, args=arg, N=1000)
if pval < alpha:
npt.assert_(
pval > alpha,
f"D = {D:f}; pval = {pval:f}; alpha = {alpha:f}; args={arg}",
)
def check_d_samples(dfn, n, rtol=1e-2, atol=1e-2):
counts = defaultdict(int)
# print 'sample', dfn.rvs(size=n)
inc = 1 / n
for s in dfn.rvs(size=n):
counts[s] += inc
for ii, p in sorted(counts.items()):
t = np.allclose(dfn.pmf(ii), p, rtol=rtol, atol=atol)
if not t:
print(("Error in sampling frequencies", ii))
print("value\tpmf\tfreq")
for jj in sorted(counts):
print(f"{jj:.2f}\t{dfn.pmf(jj):.3f}\t{counts[jj]:.4f}")
npt.assert_(t, "n = %i; pmf = %f; p = %f" % (n, dfn.pmf(ii), p))
| TestLogNormal |
python | openai__openai-python | src/openai/resources/chat/chat.py | {
"start": 2314,
"end": 2558
} | class ____:
def __init__(self, chat: Chat) -> None:
self._chat = chat
@cached_property
def completions(self) -> CompletionsWithRawResponse:
return CompletionsWithRawResponse(self._chat.completions)
| ChatWithRawResponse |
python | apache__airflow | providers/openlineage/src/airflow/providers/openlineage/plugins/facets.py | {
"start": 2522,
"end": 3431
} | class ____(RunFacet):
"""
Airflow facet providing state information.
This facet is designed to be sent at a completion event, offering state information about
the DAG run and each individual task. This information is crucial for understanding
the execution flow and comprehensive post-run analysis and debugging, including why certain tasks
did not emit events, which can occur due to the use of control flow operators like the BranchOperator.
Attributes:
dagRunState: This indicates the final status of the entire DAG run (e.g., "success", "failed").
tasksState: A dictionary mapping task IDs to their respective states. (e.g., "failed", "skipped").
tasksDuration: A dictionary mapping task IDs to it's duration in seconds.
"""
dagRunState: str
tasksState: dict[str, str | None]
tasksDuration: dict[str, float]
@define
| AirflowStateRunFacet |
python | langchain-ai__langchain | libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_tool_emulator.py | {
"start": 3751,
"end": 9702
} | class ____:
"""Test basic tool emulator functionality."""
def test_emulates_specified_tool_by_name(self) -> None:
"""Test that tools specified by name are emulated."""
# Model that will call the tool
agent_model = FakeModel(
messages=cycle(
[
AIMessage(
content="",
tool_calls=[
{"name": "get_weather", "id": "1", "args": {"location": "Paris"}}
],
),
AIMessage(content="The weather has been retrieved."),
]
)
)
# Model that emulates tool responses
emulator_model = FakeEmulatorModel(responses=["Emulated: 72°F, sunny in Paris"])
emulator = LLMToolEmulator(tools=["get_weather"], model=emulator_model)
agent = create_agent(
model=agent_model,
tools=[get_weather, calculator],
middleware=[emulator],
)
result = agent.invoke({"messages": [HumanMessage("What's the weather in Paris?")]})
# Should complete without raising NotImplementedError
assert isinstance(result["messages"][-1], AIMessage)
def test_emulates_specified_tool_by_instance(self) -> None:
"""Test that tools specified by BaseTool instance are emulated."""
agent_model = FakeModel(
messages=cycle(
[
AIMessage(
content="",
tool_calls=[{"name": "search_web", "id": "1", "args": {"query": "Python"}}],
),
AIMessage(content="Search results retrieved."),
]
)
)
emulator_model = FakeEmulatorModel(responses=["Emulated: Python is a programming language"])
emulator = LLMToolEmulator(tools=[search_web], model=emulator_model)
agent = create_agent(
model=agent_model,
tools=[search_web, calculator],
middleware=[emulator],
)
result = agent.invoke({"messages": [HumanMessage("Search for Python")]})
assert isinstance(result["messages"][-1], AIMessage)
def test_non_emulated_tools_execute_normally(self) -> None:
"""Test that tools not in tools_to_emulate execute normally."""
agent_model = FakeModel(
messages=cycle(
[
AIMessage(
content="",
tool_calls=[
{"name": "calculator", "id": "1", "args": {"expression": "2+2"}}
],
),
AIMessage(content="The calculation is complete."),
]
)
)
emulator_model = FakeEmulatorModel(responses=["Should not be used"])
# Only emulate get_weather, not calculator
emulator = LLMToolEmulator(tools=["get_weather"], model=emulator_model)
agent = create_agent(
model=agent_model,
tools=[get_weather, calculator],
middleware=[emulator],
)
result = agent.invoke({"messages": [HumanMessage("Calculate 2+2")]})
# Calculator should execute normally and return Result: 4
tool_messages = [
msg for msg in result["messages"] if hasattr(msg, "name") and msg.name == "calculator"
]
assert len(tool_messages) > 0
assert "Result: 4" in tool_messages[0].content
def test_empty_tools_to_emulate_does_nothing(self) -> None:
"""Test that empty tools_to_emulate list means no emulation occurs."""
agent_model = FakeModel(
messages=cycle(
[
AIMessage(
content="",
tool_calls=[
{"name": "calculator", "id": "1", "args": {"expression": "5*5"}}
],
),
AIMessage(content="Done."),
]
)
)
emulator_model = FakeEmulatorModel(responses=["Should not be used"])
emulator = LLMToolEmulator(tools=[], model=emulator_model)
agent = create_agent(
model=agent_model,
tools=[calculator],
middleware=[emulator],
)
result = agent.invoke({"messages": [HumanMessage("Calculate 5*5")]})
# Calculator should execute normally
tool_messages = [
msg for msg in result["messages"] if hasattr(msg, "name") and msg.name == "calculator"
]
assert len(tool_messages) > 0
assert "Result: 25" in tool_messages[0].content
def test_none_tools_emulates_all(self) -> None:
"""Test that None tools means ALL tools are emulated (emulate_all behavior)."""
agent_model = FakeModel(
messages=cycle(
[
AIMessage(
content="",
tool_calls=[
{"name": "get_weather", "id": "1", "args": {"location": "NYC"}}
],
),
AIMessage(content="Done."),
]
)
)
emulator_model = FakeEmulatorModel(responses=["Emulated: 65°F in NYC"])
# tools=None means emulate ALL tools
emulator = LLMToolEmulator(tools=None, model=emulator_model)
agent = create_agent(
model=agent_model,
tools=[get_weather],
middleware=[emulator],
)
result = agent.invoke({"messages": [HumanMessage("What's the weather in NYC?")]})
# Should complete without raising NotImplementedError
# (get_weather would normally raise NotImplementedError)
assert isinstance(result["messages"][-1], AIMessage)
| TestLLMToolEmulatorBasic |
python | google__pytype | pytype/tests/test_basic2.py | {
"start": 79,
"end": 2585
} | class ____(test_base.BaseTest):
"""Basic tests."""
def test_exec_function(self):
self.assertNoCrash(
self.Check,
"""
g = {}
exec("a = 11", g, g)
assert g['a'] == 11
""",
)
def test_import_shadowed(self):
"""Test that we import modules from pytd/ rather than typeshed."""
# We can't import the following modules from typeshed; this tests that we
# import them correctly from our internal pytd/ versions.
for module in ["importlib", "re", "signal"]:
ty = self.Infer(f"import {module}")
self.assertTypesMatchPytd(ty, f"import {module}")
def test_cleanup(self):
ty = self.Infer("""
with open("foo.py", "r") as f:
v = f.read()
w = 42
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import TextIO
f = ... # type: TextIO
v = ... # type: str
w = ... # type: int
""",
)
def test_store_fast_empty_var(self):
self.assertNoCrash(
self.Check,
"""
def bar():
raise NotImplementedError
def foo():
try:
bar()
except () as e: # Emits STORE_FAST for `e` and STACK[-1] is empty var
# (i.e. no bindings)
pass
""",
)
def test_unconditionally_del_export(self):
ty = self.Infer("""
foo = 1
def bar():
return 1
class Baz:
def __init__(self):
self.baz = 1
del foo # unconditionally deleted --> should not appear in types
del bar # unconditionally deleted --> should not appear in types
del Baz # unconditionally deleted --> should not appear in types
""")
self.assertTypesMatchPytd(ty, "")
def test_conditionally_del_export(self):
ty = self.Infer("""
foo = 1
def bar():
return 1
class Baz:
def __init__(self):
self.baz = 1
if __random__:
del foo # conditionally deleted --> should appear in types
del bar # conditionally deleted --> should appear in types
del Baz # conditionally deleted --> should appear in types
""")
self.assertTypesMatchPytd(
ty,
"""
# TODO: b/359466700 - Ideally we could say that `foo` might be absent.
foo: int
def bar() -> int: ...
class Baz:
baz: int
def __init__(self) -> None: ...
""",
)
if __name__ == "__main__":
test_base.main()
| TestExec |
python | Netflix__metaflow | metaflow/plugins/exit_hook/exit_hook_decorator.py | {
"start": 97,
"end": 1547
} | class ____(FlowDecorator):
name = "exit_hook"
allow_multiple = True
defaults = {
"on_success": [],
"on_error": [],
"options": {},
}
def flow_init(
self, flow, graph, environment, flow_datastore, metadata, logger, echo, options
):
on_success = self.attributes["on_success"]
on_error = self.attributes["on_error"]
if not on_success and not on_error:
raise MetaflowException(
"Choose at least one of the options on_success/on_error"
)
self.success_hooks = []
self.error_hooks = []
for success_fn in on_success:
if isinstance(success_fn, str):
self.success_hooks.append(success_fn)
elif callable(success_fn):
self.success_hooks.append(success_fn.__name__)
else:
raise ValueError(
"Exit hooks inside 'on_success' must be a function or a string referring to the function"
)
for error_fn in on_error:
if isinstance(error_fn, str):
self.error_hooks.append(error_fn)
elif callable(error_fn):
self.error_hooks.append(error_fn.__name__)
else:
raise ValueError(
"Exit hooks inside 'on_error' must be a function or a string referring to the function"
)
| ExitHookDecorator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.