language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | ray-project__ray | rllib/env/tests/test_single_agent_episode.py | {
"start": 426,
"end": 1078
} | class ____(gym.Env):
def __init__(self):
self.observation_space = gym.spaces.Discrete(201)
self.action_space = gym.spaces.Discrete(200)
self.t = 0
def reset(
self, *, seed: Optional[int] = None, options=Optional[Dict[str, Any]]
) -> Tuple[ObsType, Dict[str, Any]]:
self.t = 0
return 0, {}
def step(
self, action: ActType
) -> Tuple[ObsType, SupportsFloat, bool, bool, Dict[str, Any]]:
self.t += 1
if self.t == 200:
is_terminated = True
else:
is_terminated = False
return self.t, self.t, is_terminated, False, {}
| TestEnv |
python | vyperlang__vyper | vyper/venom/analysis/reachable.py | {
"start": 215,
"end": 971
} | class ____(IRAnalysis):
"""
Compute control flow graph information for each basic block in the function.
"""
reachable: dict[IRBasicBlock, OrderedSet[IRBasicBlock]]
def analyze(self) -> None:
self.cfg = self.analyses_cache.request_analysis(CFGAnalysis)
self.reachable = defaultdict(OrderedSet)
self._compute_reachable_r(self.function.entry)
def _compute_reachable_r(self, bb):
if bb in self.reachable:
return
s = self.cfg.cfg_out(bb).copy()
self.reachable[bb] = s
for out_bb in self.cfg.cfg_out(bb):
self._compute_reachable_r(out_bb)
s.update(self.reachable[out_bb])
def invalidate(self):
del self.reachable
| ReachableAnalysis |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-bedrock-converse/llama_index/llms/bedrock_converse/base.py | {
"start": 1583,
"end": 45059
} | class ____(FunctionCallingLLM):
"""
Bedrock Converse LLM.
Examples:
`pip install llama-index-llms-bedrock-converse`
```python
from llama_index.llms.bedrock_converse import BedrockConverse
llm = BedrockConverse(
model="anthropic.claude-3-haiku-20240307-v1:0",
aws_access_key_id="AWS Access Key ID to use",
aws_secret_access_key="AWS Secret Access Key to use",
aws_session_token="AWS Session Token to use",
region_name="AWS Region to use, eg. us-east-1",
)
resp = llm.complete("Paul Graham is ")
print(resp)
```
"""
model: str = Field(description="The modelId of the Bedrock model to use.")
temperature: float = Field(
default=DEFAULT_TEMPERATURE,
description="The temperature to use for sampling.",
ge=0.0,
le=1.0,
)
max_tokens: int = Field(description="The maximum number of tokens to generate.")
profile_name: Optional[str] = Field(
description="The name of aws profile to use. If not given, then the default profile is used."
)
aws_access_key_id: Optional[str] = Field(
description="AWS Access Key ID to use", exclude=True
)
aws_secret_access_key: Optional[str] = Field(
description="AWS Secret Access Key to use", exclude=True
)
aws_session_token: Optional[str] = Field(
description="AWS Session Token to use", exclude=True
)
region_name: Optional[str] = Field(
description="AWS region name to use. Uses region configured in AWS CLI if not passed",
exclude=True,
)
api_version: Optional[str] = Field(
description=(
"The API version to use. By default, botocore will use the latest API version when creating a client. "
"You only need to specify this parameter if you want to use a previous API version of the client."
),
exclude=True,
)
use_ssl: bool = Field(
description="Whether or not to use SSL. By default, SSL is used. Note that not all services support non-ssl connections.",
exclude=True,
)
verify: Optional[Union[bool, str]] = Field(
description="Whether or not to verify SSL certificates. By default SSL certificates are verified.",
exclude=True,
)
endpoint_url: Optional[str] = Field(
description=(
"The complete URL to use for the constructed client. Normally, botocore will automatically construct the appropriate "
"URL to use when communicating with a service. You can specify a complete URL (including the 'http/https' scheme) to override this behavior. "
"If this value is provided, then ``use_ssl`` is ignored."
),
exclude=True,
)
botocore_session: Optional[Any] = Field(
description="Use this Botocore session instead of creating a new default one.",
exclude=True,
)
botocore_config: Optional[Any] = Field(
description="Custom configuration object to use instead of the default generated one.",
exclude=True,
)
max_retries: int = Field(
default=10, description="The maximum number of API retries.", gt=0
)
timeout: float = Field(
default=60.0,
description="The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts.",
)
system_prompt_caching: bool = Field(
default=False,
description="Whether to cache the system prompt. If you are using a system prompt, you should set this to True.",
)
tool_caching: bool = Field(
default=False,
description="Whether to cache the tools. If you are using tools, you should set this to True.",
)
guardrail_identifier: Optional[str] = Field(
description="The unique identifier of the guardrail that you want to use. If you don't provide a value, no guardrail is applied to the invocation."
)
guardrail_version: Optional[str] = Field(
description="The version number for the guardrail. The value can also be DRAFT"
)
guardrail_stream_processing_mode: Optional[Literal["sync", "async"]] = Field(
description=(
"The stream processing mode to use when leveraging a guardrail in a streaming request (ConverseStream). "
"If set, the specified mode will be included in the request's guardrail configuration object, altering the streaming response behavior. "
"If a value is not provided, no mode will be explicitly included in the request's guardrail configuration object, and thus Amazon Bedrock's default, Synchronous Mode, will be used."
)
)
application_inference_profile_arn: Optional[str] = Field(
description="The ARN of an application inference profile to invoke in place of the model. If provided, make sure the model argument refers to the same one underlying the application inference profile."
)
trace: Optional[str] = Field(
description="Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace."
)
thinking: Optional[ThinkingDict] = Field(
description="Specifies the thinking configuration of a reasoning model. Only applicable to Anthropic and DeepSeek models",
default=None,
)
supports_forced_tool_calls: bool = Field(
default=True,
description="Whether the model supports forced tool calls. If True, the model can be forced to call at least 1 or more tools.",
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional kwargs for the bedrock invokeModel request.",
)
_config: Any = PrivateAttr()
_client: Any = PrivateAttr()
_asession: Any = PrivateAttr()
_boto_client_kwargs: Any = PrivateAttr()
def __init__(
self,
model: str,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: Optional[int] = 512,
profile_name: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
region_name: Optional[str] = None,
api_version: Optional[str] = None,
use_ssl: bool = True,
verify: Optional[Union[bool, str]] = None,
endpoint_url: Optional[str] = None,
botocore_session: Optional[Any] = None,
client: Optional[Any] = None,
timeout: Optional[float] = 60.0,
max_retries: Optional[int] = 10,
botocore_config: Optional[Any] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
system_prompt_caching: Optional[bool] = False,
tool_caching: Optional[bool] = False,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
guardrail_identifier: Optional[str] = None,
guardrail_version: Optional[str] = None,
guardrail_stream_processing_mode: Optional[Literal["sync", "async"]] = None,
application_inference_profile_arn: Optional[str] = None,
trace: Optional[str] = None,
thinking: Optional[ThinkingDict] = None,
supports_forced_tool_calls: bool = True,
) -> None:
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
session_kwargs = {
"profile_name": profile_name,
"region_name": region_name,
"aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key,
"aws_session_token": aws_session_token,
"botocore_session": botocore_session,
}
if not is_reasoning(model) and thinking is not None:
thinking = None
warnings.warn(
"You set thinking parameters for a non-reasoning models, they will be ignored",
UserWarning,
)
super().__init__(
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs,
timeout=timeout,
max_retries=max_retries,
model=model,
callback_manager=callback_manager,
system_prompt=system_prompt,
system_prompt_caching=system_prompt_caching,
tool_caching=tool_caching,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
profile_name=profile_name,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
api_version=api_version,
use_ssl=use_ssl,
verify=verify,
endpoint_url=endpoint_url,
botocore_session=botocore_session,
botocore_config=botocore_config,
guardrail_identifier=guardrail_identifier,
guardrail_version=guardrail_version,
guardrail_stream_processing_mode=guardrail_stream_processing_mode,
application_inference_profile_arn=application_inference_profile_arn,
trace=trace,
thinking=thinking,
supports_forced_tool_calls=supports_forced_tool_calls,
)
self._config = None
self._boto_client_kwargs = {
"api_version": api_version,
"use_ssl": use_ssl,
"verify": verify,
"endpoint_url": endpoint_url,
}
try:
import boto3
import aioboto3
from botocore.config import Config
self._config = (
Config(
retries={"max_attempts": max_retries, "mode": "standard"},
connect_timeout=timeout,
read_timeout=timeout,
user_agent_extra="x-client-framework:llama_index",
)
if botocore_config is None
else botocore_config
)
session = boto3.Session(**session_kwargs)
self._asession = aioboto3.Session(**session_kwargs)
except ImportError:
raise ImportError(
"boto3 and/or aioboto3 package not found, install with"
"'pip install boto3 aioboto3"
)
# Prior to general availability, custom boto3 wheel files were
# distributed that used the bedrock service to invokeModel.
# This check prevents any services still using those wheel files
# from breaking
if client is not None:
self._client = client
elif "bedrock-runtime" in session.get_available_services():
self._client = session.client(
"bedrock-runtime",
config=self._config,
**self._boto_client_kwargs,
)
else:
self._client = session.client(
"bedrock",
config=self._config,
**self._boto_client_kwargs,
)
@classmethod
def class_name(cls) -> str:
return "Bedrock_Converse_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=bedrock_modelname_to_context_size(self.model),
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
is_function_calling_model=is_bedrock_function_calling_model(self.model),
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.application_inference_profile_arn or self.model,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
def _get_content_and_tool_calls(
self, response: Optional[Dict[str, Any]] = None, content: Dict[str, Any] = None
) -> Tuple[
List[Union[TextBlock, ThinkingBlock, ToolCallBlock]], List[str], List[str]
]:
assert response is not None or content is not None, (
f"Either response or content must be provided. Got response: {response}, content: {content}"
)
assert response is None or content is None, (
f"Only one of response or content should be provided. Got response: {response}, content: {content}"
)
tool_call_ids = []
status = []
blocks: List[TextBlock | ThinkingBlock | ToolCallBlock] = []
if content is not None:
content_list = [content]
else:
content_list = response["output"]["message"]["content"]
for content_block in content_list:
if text := content_block.get("text", None):
blocks.append(TextBlock(text=text))
if thinking := content_block.get("reasoningContent", None):
blocks.append(
ThinkingBlock(
content=thinking.get("reasoningText", {}).get("text", None),
additional_information={
"signature": thinking.get("reasoningText", {}).get(
"signature", None
)
},
)
)
if tool_usage := content_block.get("toolUse", None):
if "toolUseId" not in tool_usage:
tool_usage["toolUseId"] = content_block["toolUseId"]
if "name" not in tool_usage:
tool_usage["name"] = content_block["name"]
blocks.append(
ToolCallBlock(
tool_name=tool_usage.get("name", ""),
tool_call_id=tool_usage.get("toolUseId"),
tool_kwargs=tool_usage.get("input", {}),
)
)
if tool_result := content_block.get("toolResult", None):
for tool_result_content in tool_result["content"]:
if text := tool_result_content.get("text", None):
text_content += text
tool_call_ids.append(tool_result_content.get("toolUseId", ""))
status.append(tool_result.get("status", ""))
return blocks, tool_call_ids, status
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
# convert Llama Index messages to AWS Bedrock Converse messages
converse_messages, system_prompt = messages_to_converse_messages(
messages, self.model
)
all_kwargs = self._get_all_kwargs(**kwargs)
if self.thinking is not None:
all_kwargs["thinking"] = self.thinking
# invoke LLM in AWS Bedrock Converse with retry
response = converse_with_retry(
client=self._client,
messages=converse_messages,
system_prompt=system_prompt,
system_prompt_caching=self.system_prompt_caching,
tool_caching=self.tool_caching,
max_retries=self.max_retries,
stream=False,
guardrail_identifier=self.guardrail_identifier,
guardrail_version=self.guardrail_version,
trace=self.trace,
**all_kwargs,
)
blocks, tool_call_ids, status = self._get_content_and_tool_calls(response)
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
blocks=blocks,
additional_kwargs={
"tool_call_id": tool_call_ids,
"status": status,
},
),
raw=dict(response),
additional_kwargs=self._get_response_token_counts(dict(response)),
)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
complete_fn = chat_to_completion_decorator(self.chat)
return complete_fn(prompt, **kwargs)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
# convert Llama Index messages to AWS Bedrock Converse messages
converse_messages, system_prompt = messages_to_converse_messages(
messages, self.model
)
all_kwargs = self._get_all_kwargs(**kwargs)
if self.thinking is not None:
all_kwargs["thinking"] = self.thinking
# invoke LLM in AWS Bedrock Converse with retry
response = converse_with_retry(
client=self._client,
messages=converse_messages,
system_prompt=system_prompt,
system_prompt_caching=self.system_prompt_caching,
tool_caching=self.tool_caching,
max_retries=self.max_retries,
stream=True,
guardrail_identifier=self.guardrail_identifier,
guardrail_version=self.guardrail_version,
guardrail_stream_processing_mode=self.guardrail_stream_processing_mode,
trace=self.trace,
**all_kwargs,
)
def gen() -> ChatResponseGen:
content = {}
tool_calls = [] # Track tool calls separately
current_tool_call = None # Track the current tool call being built
role = MessageRole.ASSISTANT
thinking = ""
thinking_signature = ""
for chunk in response["stream"]:
if content_block_delta := chunk.get("contentBlockDelta"):
content_delta = content_block_delta["delta"]
content = join_two_dicts(content, content_delta)
if "reasoningContent" in content_delta:
thinking += content_delta.get("reasoningContent", {}).get(
"text", ""
)
thinking_signature += content_delta.get(
"reasoningContent", {}
).get("signature", "")
# If this delta contains tool call info, update current tool call
if "toolUse" in content_delta:
tool_use_delta = content_delta["toolUse"]
if current_tool_call:
# Handle the input field specially - concatenate partial JSON strings
if "input" in tool_use_delta:
if "input" in current_tool_call:
current_tool_call["input"] += tool_use_delta[
"input"
]
else:
current_tool_call["input"] = tool_use_delta["input"]
# Remove input from the delta to prevent it from being processed again
tool_use_without_input = {
k: v
for k, v in tool_use_delta.items()
if k != "input"
}
if tool_use_without_input:
current_tool_call = join_two_dicts(
current_tool_call, tool_use_without_input
)
else:
# For other fields, use the normal joining
current_tool_call = join_two_dicts(
current_tool_call, tool_use_delta
)
blocks: List[Union[TextBlock, ThinkingBlock, ToolCallBlock]] = [
TextBlock(text=content.get("text", ""))
]
if thinking != "":
blocks.insert(
0,
ThinkingBlock(
content=thinking,
additional_information={
"signature": thinking_signature
},
),
)
if tool_calls:
for tool_call in tool_calls:
blocks.append(
ToolCallBlock(
tool_kwargs=tool_call.get("input", {}),
tool_name=tool_call.get("name", ""),
tool_call_id=tool_call.get("toolUseId"),
)
)
yield ChatResponse(
message=ChatMessage(
role=role,
blocks=blocks,
additional_kwargs={
"tool_call_id": [
tc.get("toolUseId", "") for tc in tool_calls
],
"status": [], # Will be populated when tool results come in
},
),
delta=content_delta.get("text", ""),
raw=chunk,
additional_kwargs=self._get_response_token_counts(dict(chunk)),
)
elif content_block_start := chunk.get("contentBlockStart"):
# New tool call starting
if "toolUse" in content_block_start["start"]:
tool_use = content_block_start["start"]["toolUse"]
# Start tracking a new tool call
current_tool_call = tool_use
# Add to our list of tool calls
tool_calls.append(current_tool_call)
blocks: List[Union[TextBlock, ThinkingBlock, ToolCallBlock]] = [
TextBlock(text=content.get("text", ""))
]
if thinking != "":
blocks.insert(
0,
ThinkingBlock(
content=thinking,
additional_information={
"signature": thinking_signature
},
),
)
if tool_calls:
for tool_call in tool_calls:
blocks.append(
ToolCallBlock(
tool_kwargs=tool_call.get("input", {}),
tool_name=tool_call.get("name", ""),
tool_call_id=tool_call.get("toolUseId"),
)
)
yield ChatResponse(
message=ChatMessage(
role=role,
blocks=blocks,
additional_kwargs={
"tool_call_id": [
tc.get("toolUseId", "") for tc in tool_calls
],
"status": [], # Will be populated when tool results come in
},
),
raw=chunk,
)
elif message_stop := chunk.get("messageStop"):
# Handle messageStop event - this contains the stop reason
# We don't yield here, just track the event
pass
elif metadata := chunk.get("metadata"):
# Handle metadata event - this contains the final token usage
if usage := metadata.get("usage"):
# Yield a final response with correct token usage
blocks: List[Union[TextBlock, ThinkingBlock, ToolCallBlock]] = [
TextBlock(text=content.get("text", ""))
]
if thinking != "":
blocks.insert(
0,
ThinkingBlock(
content=thinking,
additional_information={
"signature": thinking_signature
},
),
)
if tool_calls:
for tool_call in tool_calls:
blocks.append(
ToolCallBlock(
tool_kwargs=tool_call.get("input", {}),
tool_name=tool_call.get("name", ""),
tool_call_id=tool_call.get("toolUseId"),
)
)
yield ChatResponse(
message=ChatMessage(
role=role,
blocks=blocks,
additional_kwargs={
"tool_call_id": [
tc.get("toolUseId", "") for tc in tool_calls
],
"status": [],
},
),
delta="",
raw=chunk,
additional_kwargs=self._get_response_token_counts(metadata),
)
return gen()
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
stream_complete_fn = stream_chat_to_completion_decorator(self.stream_chat)
return stream_complete_fn(prompt, **kwargs)
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
# convert Llama Index messages to AWS Bedrock Converse messages
converse_messages, system_prompt = messages_to_converse_messages(
messages, self.model
)
all_kwargs = self._get_all_kwargs(**kwargs)
if self.thinking is not None:
all_kwargs["thinking"] = self.thinking
# invoke LLM in AWS Bedrock Converse with retry
response = await converse_with_retry_async(
session=self._asession,
config=self._config,
messages=converse_messages,
system_prompt=system_prompt,
system_prompt_caching=self.system_prompt_caching,
tool_caching=self.tool_caching,
max_retries=self.max_retries,
stream=False,
guardrail_identifier=self.guardrail_identifier,
guardrail_version=self.guardrail_version,
trace=self.trace,
boto_client_kwargs=self._boto_client_kwargs,
**all_kwargs,
)
blocks, tool_call_ids, status = self._get_content_and_tool_calls(response)
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT,
blocks=blocks,
additional_kwargs={
"tool_call_id": tool_call_ids,
"status": status,
},
),
raw=dict(response),
additional_kwargs=self._get_response_token_counts(dict(response)),
)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
complete_fn = achat_to_completion_decorator(self.achat)
return await complete_fn(prompt, **kwargs)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
# convert Llama Index messages to AWS Bedrock Converse messages
converse_messages, system_prompt = messages_to_converse_messages(
messages, self.model
)
all_kwargs = self._get_all_kwargs(**kwargs)
if self.thinking is not None:
all_kwargs["thinking"] = self.thinking
# invoke LLM in AWS Bedrock Converse with retry
response_gen = await converse_with_retry_async(
session=self._asession,
config=self._config,
messages=converse_messages,
system_prompt=system_prompt,
system_prompt_caching=self.system_prompt_caching,
tool_caching=self.tool_caching,
max_retries=self.max_retries,
stream=True,
guardrail_identifier=self.guardrail_identifier,
guardrail_version=self.guardrail_version,
guardrail_stream_processing_mode=self.guardrail_stream_processing_mode,
trace=self.trace,
boto_client_kwargs=self._boto_client_kwargs,
**all_kwargs,
)
async def gen() -> ChatResponseAsyncGen:
content = {}
tool_calls = [] # Track tool calls separately
current_tool_call = None # Track the current tool call being built
role = MessageRole.ASSISTANT
thinking = ""
thinking_signature = ""
async for chunk in response_gen:
if content_block_delta := chunk.get("contentBlockDelta"):
content_delta = content_block_delta["delta"]
content = join_two_dicts(content, content_delta)
if "reasoningContent" in content_delta:
thinking += content_delta.get("reasoningContent", {}).get(
"text", ""
)
thinking_signature += content_delta.get(
"reasoningContent", {}
).get("signature", "")
# If this delta contains tool call info, update current tool call
if "toolUse" in content_delta:
tool_use_delta = content_delta["toolUse"]
if current_tool_call:
# Handle the input field specially - concatenate partial JSON strings
if "input" in tool_use_delta:
if "input" in current_tool_call:
current_tool_call["input"] += tool_use_delta[
"input"
]
else:
current_tool_call["input"] = tool_use_delta["input"]
# Remove input from the delta to prevent it from being processed again
tool_use_without_input = {
k: v
for k, v in tool_use_delta.items()
if k != "input"
}
if tool_use_without_input:
current_tool_call = join_two_dicts(
current_tool_call, tool_use_without_input
)
else:
# For other fields, use the normal joining
current_tool_call = join_two_dicts(
current_tool_call, tool_use_delta
)
blocks: List[Union[TextBlock, ThinkingBlock, ToolCallBlock]] = [
TextBlock(text=content.get("text", ""))
]
if thinking != "":
blocks.insert(
0,
ThinkingBlock(
content=thinking,
additional_information={
"signature": thinking_signature
},
),
)
if tool_calls:
for tool_call in tool_calls:
blocks.append(
ToolCallBlock(
tool_kwargs=tool_call.get("input", {}),
tool_name=tool_call.get("name", ""),
tool_call_id=tool_call.get("toolUseId"),
)
)
yield ChatResponse(
message=ChatMessage(
role=role,
blocks=blocks,
additional_kwargs={
"tool_call_id": [
tc.get("toolUseId", "") for tc in tool_calls
],
"status": [], # Will be populated when tool results come in
},
),
delta=content_delta.get("text", ""),
raw=chunk,
additional_kwargs=self._get_response_token_counts(dict(chunk)),
)
elif content_block_start := chunk.get("contentBlockStart"):
# New tool call starting
if "toolUse" in content_block_start["start"]:
tool_use = content_block_start["start"]["toolUse"]
# Start tracking a new tool call
current_tool_call = tool_use
# Add to our list of tool calls
tool_calls.append(current_tool_call)
blocks: List[Union[TextBlock, ThinkingBlock, ToolCallBlock]] = [
TextBlock(text=content.get("text", ""))
]
if thinking != "":
blocks.insert(
0,
ThinkingBlock(
content=thinking,
additional_information={
"signature": thinking_signature
},
),
)
if tool_calls:
for tool_call in tool_calls:
blocks.append(
ToolCallBlock(
tool_kwargs=tool_call.get("input", {}),
tool_name=tool_call.get("name", ""),
tool_call_id=tool_call.get("toolUseId"),
)
)
yield ChatResponse(
message=ChatMessage(
role=role,
blocks=blocks,
additional_kwargs={
"tool_call_id": [
tc.get("toolUseId", "") for tc in tool_calls
],
"status": [], # Will be populated when tool results come in
},
),
raw=chunk,
)
elif chunk.get("messageStop"):
# Handle messageStop event - this contains the stop reason
# We don't yield here, just track the event
pass
elif metadata := chunk.get("metadata"):
# Handle metadata event - this contains the final token usage
if usage := metadata.get("usage"):
# Yield a final response with correct token usage
blocks: List[Union[TextBlock, ThinkingBlock, ToolCallBlock]] = [
TextBlock(text=content.get("text", ""))
]
if thinking != "":
blocks.insert(
0,
ThinkingBlock(
content=thinking,
additional_information={
"signature": thinking_signature
},
),
)
if tool_calls:
for tool_call in tool_calls:
blocks.append(
ToolCallBlock(
tool_kwargs=tool_call.get("input", {}),
tool_name=tool_call.get("name", ""),
tool_call_id=tool_call.get("toolUseId"),
)
)
yield ChatResponse(
message=ChatMessage(
role=role,
blocks=blocks,
additional_kwargs={
"tool_call_id": [
tc.get("toolUseId", "") for tc in tool_calls
],
"status": [],
},
),
delta="",
raw=chunk,
additional_kwargs=self._get_response_token_counts(metadata),
)
return gen()
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
astream_complete_fn = astream_chat_to_completion_decorator(self.astream_chat)
return await astream_complete_fn(prompt, **kwargs)
def _prepare_chat_with_tools(
self,
tools: List["BaseTool"],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
tool_required: bool = False,
tool_caching: bool = False,
tool_choice: Optional[dict] = None,
**kwargs: Any,
) -> Dict[str, Any]:
"""Prepare the arguments needed to let the LLM chat with tools."""
chat_history = chat_history or []
if isinstance(user_msg, str):
user_msg = ChatMessage(role=MessageRole.USER, content=user_msg)
chat_history.append(user_msg)
elif isinstance(user_msg, ChatMessage):
chat_history.append(user_msg)
# convert Llama Index tools to AWS Bedrock Converse tools
tool_config = tools_to_converse_tools(
tools,
tool_choice=tool_choice,
tool_required=tool_required,
tool_caching=tool_caching,
supports_forced_tool_calls=self.supports_forced_tool_calls,
)
return {
"messages": chat_history,
"tools": tool_config,
**kwargs,
}
def _validate_chat_with_tools_response(
self,
response: ChatResponse,
tools: List["BaseTool"],
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> ChatResponse:
"""Validate the response from chat_with_tools."""
if not allow_parallel_tool_calls:
force_single_tool_call(response)
return response
def get_tool_calls_from_response(
self,
response: "ChatResponse",
error_on_no_tool_call: bool = True,
**kwargs: Any,
) -> List[ToolSelection]:
"""Predict and call the tool."""
tool_calls = [
block
for block in response.message.blocks
if isinstance(block, ToolCallBlock)
]
if len(tool_calls) < 1:
if error_on_no_tool_call:
raise ValueError(
f"Expected at least one tool call, but got {len(tool_calls)} tool calls."
)
else:
return []
tool_selections = []
for tool_call in tool_calls:
# handle empty inputs
argument_dict = {}
if isinstance(tool_call.tool_kwargs, str):
# TODO parse_partial_json is not perfect
try:
argument_dict = parse_partial_json(tool_call.tool_kwargs)
except ValueError:
argument_dict = {}
elif isinstance(tool_call.tool_kwargs, dict):
argument_dict = tool_call.tool_kwargs
else:
continue
tool_selections.append(
ToolSelection(
tool_id=tool_call.tool_call_id or "",
tool_name=tool_call.tool_name,
tool_kwargs=argument_dict,
)
)
return tool_selections
def _get_response_token_counts(
self, response: Optional[Dict[str, Any]] = None
) -> dict:
"""Get the token usage reported by the response."""
if not response or not isinstance(response, dict):
return {}
usage = response.get("usage", {})
if not usage:
return {}
# Convert Bedrock's token count format to match OpenAI's format
# Cache token formats respecting Anthropic format
return {
"prompt_tokens": usage.get("inputTokens", 0),
"completion_tokens": usage.get("outputTokens", 0),
"total_tokens": usage.get("totalTokens", 0),
"cache_read_input_tokens": usage.get("cacheReadInputTokens", 0),
"cache_creation_input_tokens": usage.get("cacheWriteInputTokens", 0),
}
| BedrockConverse |
python | tensorflow__tensorflow | tensorflow/python/lib/io/tf_record.py | {
"start": 1445,
"end": 7854
} | class ____(object):
"""Options used for manipulating TFRecord files."""
compression_type_map = {
TFRecordCompressionType.ZLIB: "ZLIB",
TFRecordCompressionType.GZIP: "GZIP",
TFRecordCompressionType.NONE: ""
}
def __init__(self,
compression_type=None,
flush_mode=None,
input_buffer_size=None,
output_buffer_size=None,
window_bits=None,
compression_level=None,
compression_method=None,
mem_level=None,
compression_strategy=None):
# pylint: disable=line-too-long
"""Creates a `TFRecordOptions` instance.
Options only effect TFRecordWriter when compression_type is not `None`.
Documentation, details, and defaults can be found in
[`zlib_compression_options.h`](https://www.tensorflow.org/code/tensorflow/core/lib/io/zlib_compression_options.h)
and in the [zlib manual](http://www.zlib.net/manual.html).
Leaving an option as `None` allows C++ to set a reasonable default.
Args:
compression_type: `"GZIP"`, `"ZLIB"`, or `""` (no compression).
flush_mode: flush mode or `None`, Default: Z_NO_FLUSH.
input_buffer_size: int or `None`.
output_buffer_size: int or `None`.
window_bits: int or `None`.
compression_level: 0 to 9, or `None`.
compression_method: compression method or `None`.
mem_level: 1 to 9, or `None`.
compression_strategy: strategy or `None`. Default: Z_DEFAULT_STRATEGY.
Returns:
A `TFRecordOptions` object.
Raises:
ValueError: If compression_type is invalid.
"""
# pylint: enable=line-too-long
# Check compression_type is valid, but for backwards compatibility don't
# immediately convert to a string.
self.get_compression_type_string(compression_type)
self.compression_type = compression_type
self.flush_mode = flush_mode
self.input_buffer_size = input_buffer_size
self.output_buffer_size = output_buffer_size
self.window_bits = window_bits
self.compression_level = compression_level
self.compression_method = compression_method
self.mem_level = mem_level
self.compression_strategy = compression_strategy
@classmethod
def get_compression_type_string(cls, options):
"""Convert various option types to a unified string.
Args:
options: `TFRecordOption`, `TFRecordCompressionType`, or string.
Returns:
Compression type as string (e.g. `'ZLIB'`, `'GZIP'`, or `''`).
Raises:
ValueError: If compression_type is invalid.
"""
if not options:
return ""
elif isinstance(options, TFRecordOptions):
return cls.get_compression_type_string(options.compression_type)
elif isinstance(options, TFRecordCompressionType):
return cls.compression_type_map[options]
elif options in TFRecordOptions.compression_type_map:
return cls.compression_type_map[options]
elif options in TFRecordOptions.compression_type_map.values():
return options
else:
raise ValueError('Not a valid compression_type: "{}"'.format(options))
def _as_record_writer_options(self):
"""Convert to RecordWriterOptions for use with PyRecordWriter."""
options = _pywrap_record_io.RecordWriterOptions(
compat.as_bytes(
self.get_compression_type_string(self.compression_type)))
if self.flush_mode is not None:
options.zlib_options.flush_mode = self.flush_mode
if self.input_buffer_size is not None:
options.zlib_options.input_buffer_size = self.input_buffer_size
if self.output_buffer_size is not None:
options.zlib_options.output_buffer_size = self.output_buffer_size
if self.window_bits is not None:
options.zlib_options.window_bits = self.window_bits
if self.compression_level is not None:
options.zlib_options.compression_level = self.compression_level
if self.compression_method is not None:
options.zlib_options.compression_method = self.compression_method
if self.mem_level is not None:
options.zlib_options.mem_level = self.mem_level
if self.compression_strategy is not None:
options.zlib_options.compression_strategy = self.compression_strategy
return options
@tf_export(v1=["io.tf_record_iterator", "python_io.tf_record_iterator"])
@deprecation.deprecated(
date=None,
instructions=("Use eager execution and: \n"
"`tf.data.TFRecordDataset(path)`"))
def tf_record_iterator(path, options=None):
"""An iterator that read the records from a TFRecords file.
Args:
path: The path to the TFRecords file.
options: (optional) A TFRecordOptions object.
Returns:
An iterator of serialized TFRecords.
Raises:
IOError: If `path` cannot be opened for reading.
"""
compression_type = TFRecordOptions.get_compression_type_string(options)
return _pywrap_record_io.RecordIterator(path, compression_type)
def tf_record_random_reader(path):
"""Creates a reader that allows random-access reads from a TFRecords file.
The created reader object has the following method:
- `read(offset)`, which returns a tuple of `(record, ending_offset)`, where
`record` is the TFRecord read at the offset, and
`ending_offset` is the ending offset of the read record.
The method throws a `tf.errors.DataLossError` if data is corrupted at
the given offset. The method throws `IndexError` if the offset is out of
range for the TFRecords file.
Usage example:
```py
reader = tf_record_random_reader(file_path)
record_1, offset_1 = reader.read(0) # 0 is the initial offset.
# offset_1 is the ending offset of the 1st record and the starting offset of
# the next.
record_2, offset_2 = reader.read(offset_1)
# offset_2 is the ending offset of the 2nd record and the starting offset of
# the next.
# We can jump back and read the first record again if so desired.
reader.read(0)
```
Args:
path: The path to the TFRecords file.
Returns:
An object that supports random-access reading of the serialized TFRecords.
Raises:
IOError: If `path` cannot be opened for reading.
"""
return _pywrap_record_io.RandomRecordReader(path)
@tf_export(
"io.TFRecordWriter", v1=["io.TFRecordWriter", "python_io.TFRecordWriter"])
@deprecation.deprecated_endpoints("python_io.TFRecordWriter")
| TFRecordOptions |
python | kamyu104__LeetCode-Solutions | Python/count-pairs-with-xor-in-a-range.py | {
"start": 1611,
"end": 1976
} | class ____(object):
def countPairs(self, nums, low, high):
"""
:type nums: List[int]
:type low: int
:type high: int
:rtype: int
"""
result = 0
trie = Trie()
for x in nums:
result += trie.query(x, high+1)-trie.query(x, low)
trie.insert(x)
return result
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/minimum-value-to-get-positive-step-by-step-sum.py | {
"start": 29,
"end": 324
} | class ____(object):
def minStartValue(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
min_prefix, prefix = 0, 0
for num in nums:
prefix += num
min_prefix = min(min_prefix, prefix)
return 1-min_prefix
| Solution |
python | django__django | tests/introspection/models.py | {
"start": 31,
"end": 149
} | class ____(models.Model):
id = models.BigAutoField(primary_key=True)
name = models.CharField(max_length=50)
| City |
python | huggingface__transformers | src/transformers/models/mask2former/modeling_mask2former.py | {
"start": 53382,
"end": 59497
} | class ____(nn.Module):
"""
Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a
[`Mask2FormerPixelDecoderEncoderLayer`]. The encoder updates the flattened multi-scale feature maps through
multiple deformable attention layers.
Args:
config: Mask2FormerConfig
"""
def __init__(self, config: Mask2FormerConfig):
super().__init__()
self.config = config
self.dropout = config.dropout
self.layers = nn.ModuleList(
[Mask2FormerPixelDecoderEncoderLayer(config) for _ in range(config.encoder_layers)]
)
@staticmethod
def get_reference_points(spatial_shapes_list, valid_ratios, device):
"""
Get reference points for each feature map. Used in decoder.
Args:
spatial_shapes_list (`list` of `tuple`):
Spatial shapes of the backbone feature maps as a list of tuples.
valid_ratios (`torch.FloatTensor`):
Valid ratios of each feature map, has shape of `(batch_size, num_feature_levels, 2)`.
device (`torch.device`):
Device on which to create the tensors.
Returns:
`torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)`
"""
reference_points_list = []
for lvl, (height, width) in enumerate(spatial_shapes_list):
ref_y, ref_x = torch.meshgrid(
torch.linspace(0.5, height - 0.5, height, dtype=valid_ratios.dtype, device=device),
torch.linspace(0.5, width - 0.5, width, dtype=valid_ratios.dtype, device=device),
indexing="ij",
)
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * height)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * width)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(
self,
inputs_embeds=None,
attention_mask=None,
position_embeddings=None,
spatial_shapes_list=None,
level_start_index=None,
valid_ratios=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Flattened feature map (output of the backbone + projection layer) that is passed to the encoder.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`:
- 1 for pixel features that are real (i.e. **not masked**),
- 0 for pixel features that are padding (i.e. **masked**).
[What are attention masks?](../glossary#attention-mask)
position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Position embeddings that are added to the queries and keys in each self-attention layer.
spatial_shapes_list (`list` of `tuple`):
Spatial shapes of each feature map as a list of tuples.
level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`):
Starting index of each feature map.
valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`):
Ratio of valid area in each feature level.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
hidden_states = inputs_embeds
reference_points = self.get_reference_points(spatial_shapes_list, valid_ratios, device=inputs_embeds.device)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, encoder_layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states.transpose(1, 0),)
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states += (hidden_states.transpose(1, 0),)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
# Modified from from transformers.models.detr.modeling_deformable_detr.DeformableDetrModel with DeformableDetrModel->Mask2FormerPixelDecoder
| Mask2FormerPixelDecoderEncoderOnly |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 217511,
"end": 218163
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of DeleteProjectV2Item"""
__schema__ = github_schema
__field_names__ = ("project_id", "item_id", "client_mutation_id")
project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId")
"""The ID of the Project from which the item should be removed."""
item_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="itemId")
"""The ID of the item to be removed."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| DeleteProjectV2ItemInput |
python | plotly__plotly.py | plotly/graph_objs/scattermapbox/marker/colorbar/_tickfont.py | {
"start": 233,
"end": 9984
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattermapbox.marker.colorbar"
_path_str = "scattermapbox.marker.colorbar.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattermapbox.
marker.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super().__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattermapbox.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattermapbox.marker.colorbar.Tickfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickfont |
python | conda__conda | conda/testing/fixtures.py | {
"start": 8906,
"end": 11771
} | class ____:
"""Fixture for calling pip in specific conda environments."""
@overload
def __call__(
self,
*argv: PathType,
prefix: PathType,
raises: type[Exception] | tuple[type[Exception], ...],
) -> tuple[str, str, ExceptionInfo]: ...
@overload
def __call__(
self,
*argv: PathType,
prefix: PathType,
) -> tuple[str, str, int]: ...
def __call__(
self,
*argv: PathType,
prefix: PathType,
raises: type[Exception] | tuple[type[Exception], ...] | None = None,
) -> tuple[str | None, str | None, int | ExceptionInfo]:
"""Test pip CLI in a specific conda environment.
`pip ...` in environment == `pip_cli(..., prefix=env_path)`
:param argv: Arguments to pass to pip.
:param prefix: Path to the conda environment containing pip.
:param raises: Expected exception to intercept. If provided, the raised exception
will be returned instead of exit code (see pytest.raises and pytest.ExceptionInfo).
:return: Command results (stdout, stderr, exit code or pytest.ExceptionInfo).
"""
# build command using python -m pip (more reliable than finding pip executable)
prefix_path = Path(prefix)
python_exe = prefix_path / PYTHON_BINARY
cmd = [str(python_exe), "-m", "pip"] + [str(arg) for arg in argv]
# run command
with pytest.raises(raises) if raises else nullcontext() as exception:
try:
result = subprocess.run(
cmd,
capture_output=True,
text=True,
check=True,
)
code = result.returncode
stdout = result.stdout
stderr = result.stderr
except subprocess.CalledProcessError as e:
code = e.returncode
stdout = e.stdout
stderr = e.stderr
except FileNotFoundError:
# python executable not found
raise RuntimeError(
f"Python not found in environment {prefix_path}: {python_exe}"
)
return stdout, stderr, exception if raises else code
@pytest.fixture(scope="session")
def pip_cli() -> Iterator[PipCLIFixture]:
"""A function scoped fixture returning PipCLIFixture instance.
Use this for calling pip commands in specific conda environments during tests.
Uses `python -m pip` for reliable cross-platform execution.
Example:
def test_pip_install(tmp_env, pip_cli):
with tmp_env("python=3.10", "pip") as prefix:
stdout, stderr, code = pip_cli("install", "requests", prefix=prefix)
assert code == 0
"""
yield PipCLIFixture()
@dataclass
| PipCLIFixture |
python | getsentry__sentry | src/sentry/integrations/github_enterprise/client.py | {
"start": 180,
"end": 1123
} | class ____(GitHubBaseClient):
integration_name = IntegrationProviderSlug.GITHUB_ENTERPRISE.value
def __init__(self, base_url, integration, app_id, private_key, verify_ssl, org_integration_id):
self.base_url = f"https://{base_url}"
self.integration = integration
self.app_id = app_id
self.private_key = private_key
super().__init__(verify_ssl=verify_ssl, org_integration_id=org_integration_id)
def build_url(self, path: str) -> str:
if path.startswith("/"):
if path == "/graphql":
path = "/api/graphql"
else:
path = "/api/v3/{}".format(path.lstrip("/"))
return super().build_url(path)
def _get_installation_id(self) -> str:
return self.integration.metadata["installation_id"]
def _get_jwt(self):
return get_jwt(github_id=self.app_id, github_private_key=self.private_key)
| GitHubEnterpriseApiClient |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 68714,
"end": 72196
} | class ____(Operation):
def __init__(self, axisa=-1, axisb=-1, axisc=-1, axis=None, *, name=None):
super().__init__(name=name)
if axis is not None:
self.axisa = axis
self.axisb = axis
self.axisc = axis
else:
self.axisa = axisa
self.axisb = axisb
self.axisc = axisc
def call(self, x1, x2):
return backend.numpy.cross(x1, x2, self.axisa, self.axisb, self.axisc)
def compute_output_spec(self, x1, x2):
x1_shape = list(x1.shape)
x2_shape = list(x2.shape)
x1_value_size = x1_shape[self.axisa]
x2_value_size = x2_shape[self.axisa]
del x1_shape[self.axisa]
del x2_shape[self.axisb]
output_shape = broadcast_shapes(x1_shape, x2_shape)
if x1_value_size is not None and x1_value_size not in (2, 3):
raise ValueError(
"`x1`'s dim on `axis={axisa}` must be either 2 or 3, but "
f"received: {x1_value_size}"
)
if x2_value_size is not None and x2_value_size not in (2, 3):
raise ValueError(
"`x2`'s dim on `axis={axisb}` must be either 2 or 3, but "
f"received: {x2_value_size}"
)
if x1_value_size == 3 or x2_value_size == 3:
value_size = [3]
else:
value_size = []
output_shape = (
output_shape[: self.axisc] + value_size + output_shape[self.axisc :]
)
dtype = dtypes.result_type(x1.dtype, x2.dtype)
return KerasTensor(output_shape, dtype=dtype)
@keras_export(["keras.ops.cross", "keras.ops.numpy.cross"])
def cross(x1, x2, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""Returns the cross product of two (arrays of) vectors.
The cross product of `x1` and `x2` in R^3 is a vector
perpendicular to both `x1` and `x2`. If `x1` and `x2` are arrays of
vectors, the vectors are defined by the last axis of `x1` and `x2`
by default, and these axes can have dimensions 2 or 3.
Where the dimension of either `x1` or `x2` is 2, the third component of
the input vector is assumed to be zero and the cross product calculated
accordingly.
In cases where both input vectors have dimension 2, the z-component of
the cross product is returned.
Args:
x1: Components of the first vector(s).
x2: Components of the second vector(s).
axisa: Axis of `x1` that defines the vector(s). Defaults to `-1`.
axisb: Axis of `x2` that defines the vector(s). Defaults to `-1`.
axisc: Axis of the result containing the cross product vector(s).
Ignored if both input vectors have dimension 2, as the return is
scalar. By default, the last axis.
axis: If defined, the axis of `x1`, `x2` and the result that
defines the vector(s) and cross product(s). Overrides `axisa`,
`axisb` and `axisc`.
Note:
Torch backend does not support two dimensional vectors, or the
arguments `axisa`, `axisb` and `axisc`. Use `axis` instead.
Returns:
Vector cross product(s).
"""
if any_symbolic_tensors((x1, x2)):
return Cross(
axisa=axisa, axisb=axisb, axisc=axisc, axis=axis
).symbolic_call(x1, x2)
return backend.numpy.cross(
x1,
x2,
axisa=axisa,
axisb=axisb,
axisc=axisc,
axis=axis,
)
| Cross |
python | pytorch__pytorch | torch/nested/_internal/nested_tensor.py | {
"start": 15353,
"end": 25114
} | class ____(torch.autograd.Function):
@staticmethod
def forward( # pyrefly: ignore # bad-override
ctx,
values: torch.Tensor,
offsets: torch.Tensor,
metadata_cache: Optional[Dict[str, Any]] = None,
): # type: ignore[override]
# maintain BC with this usages of this where the seqlens are stuffed
# directly into the metadata cache as non-Tensors / ints
if metadata_cache is not None:
min_seqlen = metadata_cache.get("min_seqlen", None)
max_seqlen = metadata_cache.get("max_seqlen", None)
if min_seqlen is not None and not isinstance(min_seqlen, torch.Tensor):
metadata_cache["min_seqlen"] = _store_val_in_tensor(min_seqlen)
if max_seqlen is not None and not isinstance(max_seqlen, torch.Tensor):
metadata_cache["max_seqlen"] = _store_val_in_tensor(max_seqlen)
return NestedTensor(
values.detach(),
offsets=offsets,
_metadata_cache=metadata_cache,
)
@staticmethod
def backward(ctx, gO: NestedTensor): # type: ignore[override]
return gO._values, None, None
def buffer_from_jagged(jagged):
return ViewBufferFromNested.apply(jagged)
# Need to make it obvious that users should be passing in offsets
def jagged_from_list(
tensors: List[torch.Tensor],
offsets: Optional[torch.Tensor],
dtype=None,
device=None,
) -> tuple[NestedTensor, torch.Tensor]:
"""Constructs a NestedTensor backed by jagged layout from a list of tensors"""
if len(tensors) == 0:
raise RuntimeError("Cannot construct a nested tensor from an empty tensor list")
if not len(set(t.dtype for t in tensors)) == 1: # noqa: C401
raise RuntimeError(
"When constructing a nested tensor, all tensors in list must have the same dtype"
)
if not len(set(t.device for t in tensors)) == 1: # noqa: C401
raise RuntimeError(
"When constructing a nested tensor, all tensors in list must be on the same device"
)
if not len(set(t.dim() for t in tensors)) == 1: # noqa: C401
raise RuntimeError(
"When constructing a nested tensor, all tensors in list must have the same dim"
)
component_dim = tensors[0].dim()
if component_dim == 0:
raise RuntimeError(
"Cannot construct a nested tensor from a list of zero-dim tensors"
)
# Check that the NT is representable by the jagged layout, which
# allows for a single ragged dimension after the batch dim.
# e.g. (B, *, D_0, ..., D_N), (B, D_0, *, ..., D_N), etc.
sizes = [t.shape for t in tensors]
ragged_idx = None
for d in range(component_dim):
dim_is_ragged = any(size[d] != sizes[0][d] for size in sizes)
if dim_is_ragged:
if ragged_idx is None:
# add 1 to convert to outer NJT dim space
ragged_idx = d + 1
else:
raise RuntimeError(
"Cannot represent given tensor list as a nested tensor with the jagged layout. "
"Note that the jagged layout only allows for a single ragged dimension. "
"For example: (B, *, D_0, D_1, ..., D_N), with ragged * dim."
)
# allow for a rectangular NJT and default the ragged dim next to the batch dim
if ragged_idx is None:
ragged_idx = 1
# Set properties appropriately.
values = torch.cat(tensors, dim=(ragged_idx - 1))
to_kwargs = {}
if device is not None:
to_kwargs["device"] = device
if dtype is not None:
to_kwargs["dtype"] = dtype
values = values.to(**to_kwargs)
# Calculate jagged offsets if not provided.
if offsets is None:
# Jagged layout specifies that offsets are stored as int64 on the same device as values.
# TODO: An alternative way to construct offsets is to use F.pad. This avoids creating
# an extra leaf tensor during the forward, potentially resolving compatibility issues.
offsets = torch.cat(
[
torch.zeros(1, dtype=torch.int64, device=values.device),
torch.tensor(
[s[ragged_idx - 1] for s in sizes], device=values.device
).cumsum(dim=0),
]
)
# compute this now since it's easy
min_seqlen = min(t.shape[ragged_idx - 1] for t in tensors)
max_seqlen = max(t.shape[ragged_idx - 1] for t in tensors)
ret_nt = nested_view_from_values_offsets(
values,
offsets,
min_seqlen=min_seqlen,
max_seqlen=max_seqlen,
ragged_idx=ragged_idx,
)
return (ret_nt, offsets) # type: ignore[return-value]
def jagged_from_tensor_and_lengths(
tensor: torch.Tensor, starts: torch.Tensor, lengths: torch.Tensor
) -> tuple[NestedTensor, torch.Tensor, Optional[torch.Tensor]]:
"""Constructs a NestedTensor backed by jagged layout from a tensor, starts of sequences, and sequence lengths"""
batch_size = tensor.shape[0]
if is_expandable_to(starts.shape, (batch_size,)) and is_expandable_to(
lengths.shape, (batch_size,)
):
start_list = starts.expand(batch_size)
length_list = lengths.expand(batch_size)
else:
raise RuntimeError(
"When constructing a jagged nested tensor using narrow(), "
"your start and length must be Tensors that broadcast to input.shape[0]"
)
# Calculate jagged offsets
assert len(tensor.shape) >= 2, (
"tensor must at least be 2D for the nested narrow op to work"
)
max_seq_len = tensor.shape[1]
offset_lengths = max_seq_len * torch.arange(
0, batch_size, dtype=torch.int64, device=tensor.device
)
# Jagged layout specifies that offsets are stored as int64 on the same device as values.
offsets = torch.cat(
[
start_list + offset_lengths,
(start_list[-1] + offset_lengths[-1] + length_list[-1]).unsqueeze(0),
]
)
# Reshape buffer to flatten the 1st and 2nd dimension (view used to enforce non-copy)
if len(tensor.shape) > 2:
values = tensor.view(-1, *tensor.shape[2:])
else:
values = tensor.view(-1)
# Check if offsets and lengths make it possibly contiguous and return a regular NT
is_contiguous = True
orig_dim = tensor.shape[1]
if torch.any(length_list[1:-1].ne(orig_dim)):
is_contiguous = False
if torch.any(offsets[1:-2].diff().ne(orig_dim)):
is_contiguous = False
if offsets[0] + length_list[0] != orig_dim:
is_contiguous = False
actual_max_seqlen = int(torch.max(lengths).item())
min_seqlen = int(torch.min(lengths).item())
if is_contiguous:
ret_nt = nested_view_from_values_offsets(
values[offsets[0] : offsets[-1]],
offsets - offsets[0],
min_seqlen=min_seqlen,
max_seqlen=actual_max_seqlen,
)
else:
ret_nt = nested_view_from_values_offsets_lengths(
values,
offsets,
length_list,
min_seqlen=min_seqlen,
max_seqlen=actual_max_seqlen,
)
return (ret_nt, offsets, None if is_contiguous else length_list)
# NB: A dummy arg is required so that NestedTensor.__torch_dispatch__() is invoked
# for _nested_view_from_values_offsets(). Sizes don't matter much, but they shouldn't be
# 0/1 because the dummy can be fake-ified and we want to avoid specializing.
# This arg is otherwise unused.
_dummy_instance: Optional[torch.Tensor] = None
def _nt_view_dummy() -> torch.Tensor:
global _dummy_instance
if _dummy_instance is None:
_dummy_instance = NestedTensor(
values=torch.zeros(3, 3, device="meta"),
offsets=torch.zeros(3, device="meta", dtype=torch.int64),
).detach()
return _dummy_instance
def nested_view_from_values_offsets(
values, offsets, ragged_idx=1, min_seqlen=None, max_seqlen=None
):
min_seqlen_tensor = None
if min_seqlen is not None:
min_seqlen_tensor = _store_val_in_tensor(min_seqlen)
max_seqlen_tensor = None
if max_seqlen is not None:
max_seqlen_tensor = _store_val_in_tensor(max_seqlen)
return torch._nested_view_from_jagged( # type: ignore[attr-defined]
values,
offsets,
_nt_view_dummy(),
None,
ragged_idx,
min_seqlen_tensor,
max_seqlen_tensor,
) # type: ignore[return-value]
def nested_view_from_values_offsets_lengths(
values, offsets, lengths, ragged_idx=1, min_seqlen=None, max_seqlen=None
):
min_seqlen_tensor = None
if min_seqlen is not None:
min_seqlen_tensor = _store_val_in_tensor(min_seqlen)
max_seqlen_tensor = None
if max_seqlen is not None:
max_seqlen_tensor = _store_val_in_tensor(max_seqlen)
return torch._nested_view_from_jagged( # type: ignore[attr-defined]
values,
offsets,
_nt_view_dummy(),
lengths,
ragged_idx,
min_seqlen_tensor,
max_seqlen_tensor,
) # type: ignore[return-value]
def nested_from_padded(
padded, offsets, ragged_idx=1, min_seqlen=None, max_seqlen=None, sum_S=None
):
min_seqlen_tensor = None
if min_seqlen is not None:
min_seqlen_tensor = _store_val_in_tensor(min_seqlen)
max_seqlen_tensor = None
if max_seqlen is not None:
max_seqlen_tensor = _store_val_in_tensor(max_seqlen)
return torch._nested_from_padded_tensor(
padded,
offsets,
_nt_view_dummy(),
ragged_idx,
min_seqlen_tensor,
max_seqlen_tensor,
sum_S,
)
| ViewNestedFromBuffer |
python | fluentpython__example-code | 20-descriptor/bulkfood/bulkfood_v4b.py | {
"start": 1897,
"end": 2173
} | class ____:
weight = Quantity()
price = Quantity()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
| LineItem |
python | hynek__structlog | tests/processors/test_processors.py | {
"start": 7656,
"end": 20947
} | class ____:
parameter_strings = {
"pathname",
"filename",
"module",
"func_name",
"lineno",
"thread",
"thread_name",
"process",
"process_name",
}
# Exclude QUAL_NAME from the general set to keep parity with stdlib
# LogRecord-derived parameters. QUAL_NAME is tested separately.
_all_parameters = {
p
for p in set(CallsiteParameter)
if p is not CallsiteParameter.QUAL_NAME
}
def test_all_parameters(self) -> None:
"""
All callsite parameters are included in ``self.parameter_strings`` and
the dictionary returned by ``self.get_callsite_parameters`` contains
keys for all callsite parameters.
"""
assert self.parameter_strings == {
member.value for member in self._all_parameters
}
assert self.parameter_strings == self.get_callsite_parameters().keys()
@pytest.mark.skipif(
sys.version_info < (3, 11), reason="QUAL_NAME requires Python 3.11+"
)
def test_qual_name_structlog(self) -> None:
"""
QUAL_NAME is added for structlog-originated events on Python 3.11+.
"""
processor = CallsiteParameterAdder(
parameters={CallsiteParameter.QUAL_NAME}
)
event_dict: EventDict = {"event": "msg"}
actual = processor(None, None, event_dict)
assert actual["qual_name"].endswith(
f"{self.__class__.__name__}.test_qual_name_structlog"
)
def test_qual_name_logging_origin_absent(self) -> None:
"""
QUAL_NAME is not sourced from stdlib LogRecord and remains absent
(because it doesn't exist).
"""
processor = CallsiteParameterAdder(
parameters={CallsiteParameter.QUAL_NAME}
)
record = logging.LogRecord(
"name",
logging.INFO,
__file__,
0,
"message",
None,
None,
"func",
)
event_dict: EventDict = {
"event": "message",
"_record": record,
"_from_structlog": False,
}
actual = processor(None, None, event_dict)
assert "qual_name" not in actual
@pytest.mark.asyncio
@pytest.mark.parametrize(
("wrapper_class", "method_name"),
[
(structlog.stdlib.BoundLogger, "ainfo"),
(structlog.stdlib.AsyncBoundLogger, "info"),
],
)
async def test_async(self, wrapper_class, method_name) -> None:
"""
Callsite information for async invocations are correct.
"""
string_io = StringIO()
class StringIOLogger(structlog.PrintLogger):
def __init__(self):
super().__init__(file=string_io)
processor = self.make_processor(None, ["concurrent", "threading"])
structlog.configure(
processors=[processor, JSONRenderer()],
logger_factory=StringIOLogger,
wrapper_class=wrapper_class,
cache_logger_on_first_use=True,
)
logger = structlog.stdlib.get_logger()
callsite_params = self.get_callsite_parameters()
await getattr(logger, method_name)("baz")
logger_params = json.loads(string_io.getvalue())
# These are different when running under async
for key in ["thread", "thread_name"]:
callsite_params.pop(key)
logger_params.pop(key)
assert {"event": "baz", **callsite_params} == logger_params
def test_additional_ignores(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""
Stack frames from modules with names that start with values in
`additional_ignores` are ignored when determining the callsite.
"""
test_message = "test message"
additional_ignores = ["tests.additional_frame"]
processor = self.make_processor(None, additional_ignores)
event_dict: EventDict = {"event": test_message}
# Warning: the next two lines must appear exactly like this to make
# line numbers match.
callsite_params = self.get_callsite_parameters(1)
actual = processor(None, None, event_dict)
expected = {"event": test_message, **callsite_params}
assert expected == actual
@pytest.mark.parametrize(
("origin", "parameter_strings"),
itertools.product(
["logging", "structlog"],
[
None,
*[{parameter} for parameter in parameter_strings],
set(),
parameter_strings,
{"pathname", "filename"},
{"module", "func_name"},
],
),
)
def test_processor(
self,
origin: str,
parameter_strings: set[str] | None,
):
"""
The correct callsite parameters are added to event dictionaries.
"""
test_message = "test message"
processor = self.make_processor(parameter_strings)
if origin == "structlog":
event_dict: EventDict = {"event": test_message}
callsite_params = self.get_callsite_parameters()
actual = processor(None, None, event_dict)
elif origin == "logging":
callsite_params = self.get_callsite_parameters()
record = logging.LogRecord(
"name",
logging.INFO,
callsite_params["pathname"],
callsite_params["lineno"],
test_message,
None,
None,
callsite_params["func_name"],
)
event_dict: EventDict = {
"event": test_message,
"_record": record,
"_from_structlog": False,
}
actual = processor(None, None, event_dict)
else:
pytest.fail(f"invalid origin {origin}")
actual = {
key: value
for key, value in actual.items()
if not key.startswith("_")
}
callsite_params = self.filter_parameter_dict(
callsite_params, parameter_strings
)
expected = {"event": test_message, **callsite_params}
assert expected == actual
@pytest.mark.parametrize(
("setup", "origin", "parameter_strings"),
itertools.product(
["common-without-pre", "common-with-pre", "shared", "everywhere"],
["logging", "structlog"],
[
None,
*[{parameter} for parameter in parameter_strings],
set(),
parameter_strings,
{"pathname", "filename"},
{"module", "func_name"},
],
),
)
def test_e2e(
self,
setup: str,
origin: str,
parameter_strings: set[str] | None,
) -> None:
"""
Logging output contains the correct callsite parameters.
"""
logger = logging.Logger(sys._getframe().f_code.co_name)
string_io = StringIO()
handler = logging.StreamHandler(string_io)
processors = [self.make_processor(parameter_strings)]
if setup == "common-without-pre":
common_processors = processors
formatter = ProcessorFormatter(
processors=[*processors, JSONRenderer()]
)
elif setup == "common-with-pre":
common_processors = processors
formatter = ProcessorFormatter(
foreign_pre_chain=processors,
processors=[JSONRenderer()],
)
elif setup == "shared":
common_processors = []
formatter = ProcessorFormatter(
processors=[*processors, JSONRenderer()],
)
elif setup == "everywhere":
common_processors = processors
formatter = ProcessorFormatter(
foreign_pre_chain=processors,
processors=[*processors, JSONRenderer()],
)
else:
pytest.fail(f"invalid setup {setup}")
handler.setFormatter(formatter)
handler.setLevel(0)
logger.addHandler(handler)
logger.setLevel(0)
test_message = "test message"
if origin == "logging":
callsite_params = self.get_callsite_parameters()
logger.info(test_message)
elif origin == "structlog":
ctx = {}
bound_logger = BoundLogger(
logger,
[*common_processors, ProcessorFormatter.wrap_for_formatter],
ctx,
)
callsite_params = self.get_callsite_parameters()
bound_logger.info(test_message)
else:
pytest.fail(f"invalid origin {origin}")
callsite_params = self.filter_parameter_dict(
callsite_params, parameter_strings
)
actual = {
key: value
for key, value in json.loads(string_io.getvalue()).items()
if not key.startswith("_")
}
expected = {"event": test_message, **callsite_params}
assert expected == actual
def test_pickeable_callsite_parameter_adder(self) -> None:
"""
An instance of ``CallsiteParameterAdder`` can be pickled. This
functionality may be used to propagate structlog configurations to
subprocesses.
"""
pickle.dumps(CallsiteParameterAdder())
@classmethod
def make_processor(
cls,
parameter_strings: set[str] | None,
additional_ignores: list[str] | None = None,
) -> CallsiteParameterAdder:
"""
Creates a ``CallsiteParameterAdder`` with parameters matching the
supplied *parameter_strings* values and with the supplied
*additional_ignores* values.
Args:
parameter_strings:
Strings for which corresponding ``CallsiteParameters`` should
be included in the resulting ``CallsiteParameterAdded``.
additional_ignores:
Used as *additional_ignores* for the resulting
``CallsiteParameterAdded``.
"""
if parameter_strings is None:
return CallsiteParameterAdder(
parameters=cls._all_parameters,
additional_ignores=additional_ignores,
)
parameters = cls.filter_parameters(parameter_strings)
return CallsiteParameterAdder(
parameters=parameters,
additional_ignores=additional_ignores,
)
@classmethod
def filter_parameters(
cls, parameter_strings: set[str] | None
) -> set[CallsiteParameter]:
"""
Returns a set containing all ``CallsiteParameter`` members with values
that are in ``parameter_strings``.
Args:
parameter_strings:
The parameters strings for which corresponding
``CallsiteParameter`` members should be returned. If this value
is `None` then all ``CallsiteParameter`` will be returned.
"""
if parameter_strings is None:
return cls._all_parameters
return {
parameter
for parameter in cls._all_parameters
if parameter.value in parameter_strings
}
@classmethod
def filter_parameter_dict(
cls, input: dict[str, object], parameter_strings: set[str] | None
) -> dict[str, object]:
"""
Returns a dictionary that is equivalent to *input* but with all keys
not in *parameter_strings* removed.
Args:
parameter_strings:
The keys to keep in the dictionary, if this value is ``None``
then all keys matching ``cls.parameter_strings`` are kept.
"""
if parameter_strings is None:
parameter_strings = cls.parameter_strings
return {
key: value
for key, value in input.items()
if key in parameter_strings
}
@classmethod
def get_callsite_parameters(cls, offset: int = 1) -> dict[str, object]:
"""
This function creates dictionary of callsite parameters for the line
that is ``offset`` lines after the invocation of this function.
Args:
offset:
The amount of lines after the invocation of this function that
callsite parameters should be generated for.
"""
frame_info = inspect.stack()[1]
frame_traceback = inspect.getframeinfo(frame_info[0])
return {
"pathname": frame_traceback.filename,
"filename": os.path.basename(frame_traceback.filename),
"module": os.path.splitext(
os.path.basename(frame_traceback.filename)
)[0],
"func_name": frame_info.function,
"lineno": frame_info.lineno + offset,
"thread": threading.get_ident(),
"thread_name": threading.current_thread().name,
"process": os.getpid(),
"process_name": get_processname(),
}
| TestCallsiteParameterAdder |
python | dagster-io__dagster | python_modules/libraries/dagster-fivetran/dagster_fivetran_tests/deprecated/test_asset_defs.py | {
"start": 6723,
"end": 7094
} | class ____(DagsterFivetranTranslator):
def get_asset_spec(self, props: FivetranConnectorTableProps) -> AssetSpec:
default_spec = super().get_asset_spec(props)
return default_spec.replace_attributes(
key=default_spec.key.with_prefix("prefix"),
metadata={**default_spec.metadata, "custom": "metadata"},
)
| MyCustomTranslator |
python | astropy__astropy | astropy/nddata/mixins/tests/test_ndarithmetic.py | {
"start": 789,
"end": 59884
} | class ____(StdDevUncertainty):
@property
def supports_correlated(self):
return False
# Correspondence between NDArithmetic & Python method/function names:
STR_TO_OPERATOR = {
"add": operator.add,
"subtract": operator.sub,
"multiply": operator.mul,
"divide": operator.truediv,
}
# Test with Data covers:
# scalars, 1D, 2D and 3D
# broadcasting between them
@pytest.mark.filterwarnings("ignore:divide by zero encountered.*")
@pytest.mark.parametrize(
("data1", "data2"),
[
(np.array(5), np.array(10)),
(np.array(5), np.arange(10)),
(np.array(5), np.arange(10).reshape(2, 5)),
(np.arange(10), np.ones(10) * 2),
(np.arange(10), np.ones((10, 10)) * 2),
(np.arange(10).reshape(2, 5), np.ones((2, 5)) * 3),
(np.arange(1000).reshape(20, 5, 10), np.ones((20, 5, 10)) * 3),
],
)
@pytest.mark.parametrize("meth", STR_TO_OPERATOR)
def test_arithmetics_data(data1, data2, meth):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
nd = getattr(nd1, meth)(nd2)
assert_array_equal(STR_TO_OPERATOR[meth](data1, data2), nd.data)
# Check that broadcasting worked as expected
if data1.ndim > data2.ndim:
assert data1.shape == nd.data.shape
else:
assert data2.shape == nd.data.shape
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Test numpy functions that use astropy functions first
def test_arithmetics_ccddata():
ccd1 = CCDData([1, 2, 3], unit="adu")
ccd2 = CCDData([1.1, 2.2, 3.3], unit="adu")
ccd3 = CCDData([1.1, 2.2, 3.3, 4.4], unit="adu")
nd1 = NDDataArithmetic(ccd1)
nd3 = NDDataArithmetic(ccd3)
assert np.min(ccd1).data == ccd1.min().data
assert np.min(ccd2).data == ccd2.min().data
assert np.max(ccd1).data == ccd1.max().data
assert np.max(ccd2).data == ccd2.max().data
assert np.sum(ccd1).data == ccd1.sum().data
assert np.sum(ccd2).data == ccd2.sum().data
assert np.mean(ccd1).data == ccd1.mean().data
assert np.mean(ccd2).data == ccd2.mean().data
# Ensure exceptions are raised for numpy keys that are not None
def test_arithmetics_ccddata_errors():
ccd1 = CCDData([1, 2, 3], unit="adu")
ccd2 = CCDData([1.1, 2.2, 3.3, 4.4], unit="adu")
nd1 = NDDataArithmetic(ccd1)
nd2 = NDDataArithmetic(ccd2)
with pytest.raises(ValueError):
np.mean(ccd1, out=nd1)
with pytest.raises(ValueError):
np.mean(ccd2, out=nd2)
with pytest.raises(ValueError):
np.mean(ccd1, out=nd1, dtype=int)
with pytest.raises(ValueError):
np.mean(ccd2, dtype=int)
# Invalid arithmetic operations for data covering:
# not broadcastable data
def test_arithmetics_data_invalid():
nd1 = NDDataArithmetic([1, 2, 3])
nd2 = NDDataArithmetic([1, 2])
with pytest.raises(ValueError):
nd1.add(nd2)
# Test with Data and unit and covers:
# identical units (even dimensionless unscaled vs. no unit),
# equivalent units (such as meter and kilometer)
# equivalent composite units (such as m/s and km/h)
@pytest.mark.filterwarnings("ignore:divide by zero encountered.*")
@pytest.mark.parametrize(
("data1", "data2"),
[
(np.array(5) * u.s, np.array(10) * u.s),
(np.array(5) * u.s, np.arange(10) * u.h),
(np.array(5) * u.s, np.arange(10).reshape(2, 5) * u.min),
(np.arange(10) * u.m / u.s, np.ones(10) * 2 * u.km / u.s),
(np.arange(10) * u.m / u.s, np.ones((10, 10)) * 2 * u.m / u.h),
(np.arange(10).reshape(2, 5) * u.m / u.s, np.ones((2, 5)) * 3 * u.km / u.h),
(
np.arange(1000).reshape(20, 5, 10),
np.ones((20, 5, 10)) * 3 * u.dimensionless_unscaled,
),
(np.array(5), np.array(10) * u.s / u.h),
],
)
@pytest.mark.parametrize("meth", STR_TO_OPERATOR)
def test_arithmetics_data_unit_identical(data1, data2, meth):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
nd = getattr(nd1, meth)(nd2)
ref = STR_TO_OPERATOR[meth](data1, data2)
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd.data)
assert nd.unit == ref_unit
# Check that broadcasting worked as expected
if data1.ndim > data2.ndim:
assert data1.shape == nd.data.shape
else:
assert data2.shape == nd.data.shape
# Check all other attributes are not set
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Test with Data and unit and covers:
# not identical not convertible units
# one with unit (which is not dimensionless) and one without
@pytest.mark.parametrize(
("data1", "data2"),
[
(np.array(5) * u.s, np.array(10) * u.m),
(np.array(5) * u.Mpc, np.array(10) * u.km / u.s),
(np.array(5) * u.Mpc, np.array(10)),
(np.array(5), np.array(10) * u.s),
],
)
@pytest.mark.parametrize("meth", STR_TO_OPERATOR)
def test_arithmetics_data_unit_not_identical(data1, data2, meth):
nd1 = NDDataArithmetic(data1)
nd2 = NDDataArithmetic(data2)
if meth in ("add", "subtract"):
# Addition/subtraction should not be possible
with pytest.raises(UnitsError):
getattr(nd1, meth)(nd2)
else:
# Multiplication/division is possible
nd = getattr(nd1, meth)(nd2)
ref = STR_TO_OPERATOR[meth](data1, data2)
ref_unit, ref_data = ref.unit, ref.value
assert_array_equal(ref_data, nd.data)
assert nd.unit == ref_unit
# Check all other attributes are not set
assert nd.uncertainty is None
assert nd.mask is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Tests with wcs (not very sensible because there is no operation between them
# covering:
# both set and identical/not identical
# one set
# None set
@pytest.mark.parametrize(
("wcs1", "wcs2"),
[
(None, None),
(None, WCS(naxis=2)),
(WCS(naxis=2), None),
nd_testing.create_two_equal_wcs(naxis=2),
nd_testing.create_two_unequal_wcs(naxis=2),
],
)
@pytest.mark.parametrize("meth", STR_TO_OPERATOR)
def test_arithmetics_data_wcs(wcs1, wcs2, meth):
nd1 = NDDataArithmetic(1, wcs=wcs1)
nd2 = NDDataArithmetic(1, wcs=wcs2)
if wcs1 is None and wcs2 is None:
ref_wcs = None
elif wcs1 is None:
ref_wcs = wcs2
elif wcs2 is None:
ref_wcs = wcs1
else:
ref_wcs = wcs1
nd = getattr(nd1, meth)(nd2)
nd_testing.assert_wcs_seem_equal(ref_wcs, nd.wcs)
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert len(nd.meta) == 0
assert nd.mask is None
# Masks are completely separated in the NDArithmetics from the data so we need
# no correlated tests but covering:
# masks 1D, 2D and mixed cases with broadcasting
@pytest.mark.parametrize(
("mask1", "mask2"),
[
(None, None),
(None, False),
(True, None),
(False, False),
(True, False),
(False, True),
(True, True),
(np.array(False), np.array(True)),
(np.array(False), np.array([0, 1, 0, 1, 1], dtype=np.bool_)),
(np.array(True), np.array([[0, 1, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=np.bool_)),
(
np.array([0, 1, 0, 1, 1], dtype=np.bool_),
np.array([1, 1, 0, 0, 1], dtype=np.bool_),
),
(
np.array([0, 1, 0, 1, 1], dtype=np.bool_),
np.array([[0, 1, 0, 1, 1], [1, 0, 0, 1, 1]], dtype=np.bool_),
),
(
np.array([[0, 1, 0, 1, 1], [1, 0, 0, 1, 1]], dtype=np.bool_),
np.array([[0, 1, 0, 1, 1], [1, 1, 0, 1, 1]], dtype=np.bool_),
),
],
)
@pytest.mark.parametrize("meth", STR_TO_OPERATOR)
def test_arithmetics_data_masks(mask1, mask2, meth):
nd1 = NDDataArithmetic(1, mask=mask1)
nd2 = NDDataArithmetic(1, mask=mask2)
if mask1 is None and mask2 is None:
ref_mask = None
elif mask1 is None:
ref_mask = mask2
elif mask2 is None:
ref_mask = mask1
else:
ref_mask = mask1 | mask2
nd = getattr(nd1, meth)(nd2)
assert_array_equal(ref_mask, nd.mask)
# Check all other attributes are not set
assert nd.unit is None
assert nd.uncertainty is None
assert len(nd.meta) == 0
assert nd.wcs is None
# Check that masks are preserved+propagated in NDData collapse operations
@pytest.mark.parametrize(
("collapse_axis", "mask_sum", "unit"),
[(0, [3, 0, 3, 0], "Jy"), (1, [2, 0, 2, 0], None), (2, [2, 2, 2], "Jy")],
)
def test_collapse_masks(collapse_axis, mask_sum, unit):
shape = (2, 3, 4)
data = np.arange(np.prod(shape)).reshape(shape)
mask = data % 2 == 0
nd_masked = NDDataArithmetic(data=data, mask=mask, unit=unit)
nd_nomask = NDDataArithmetic(data=data, unit=unit)
assert_array_equal(nd_masked.sum(axis=collapse_axis).mask.sum(axis=0), mask_sum)
# if no mask is given, the collapse result should have no mask:
assert nd_nomask.sum(axis=collapse_axis).mask is None
# One additional case which can not be easily incorporated in the test above
# what happens if the masks are numpy ndarrays are not broadcastable
def test_arithmetics_data_masks_invalid():
nd1 = NDDataArithmetic(1, mask=np.array([1, 0], dtype=np.bool_))
nd2 = NDDataArithmetic(1, mask=np.array([1, 0, 1], dtype=np.bool_))
with pytest.raises(ValueError):
nd1.add(nd2)
with pytest.raises(ValueError):
nd1.multiply(nd2)
with pytest.raises(ValueError):
nd1.subtract(nd2)
with pytest.raises(ValueError):
nd1.divide(nd2)
# Covering:
# both have uncertainties (data and uncertainty without unit)
# tested against manually determined resulting uncertainties to verify the
# implemented formulas
# this test only works as long as data1 and data2 do not contain any 0
def test_arithmetics_stddevuncertainty_basic():
nd1 = NDDataArithmetic([1, 2, 3], uncertainty=StdDevUncertainty([1, 1, 3]))
nd2 = NDDataArithmetic([2, 2, 2], uncertainty=StdDevUncertainty([2, 2, 2]))
nd3 = nd1.add(nd2)
nd4 = nd2.add(nd1)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(np.array([1, 1, 3]) ** 2 + np.array([2, 2, 2]) ** 2)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2)
nd4 = nd2.subtract(nd1)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty (same as for add)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2)
nd4 = nd2.multiply(nd1)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.abs(np.array([2, 4, 6])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3])) ** 2
+ (np.array([2, 2, 2]) / np.array([2, 2, 2])) ** 2
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2)
nd4 = nd2.divide(nd1)
# Inverse operation gives a different uncertainty!
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = np.abs(np.array([1 / 2, 2 / 2, 3 / 2])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3])) ** 2
+ (np.array([2, 2, 2]) / np.array([2, 2, 2])) ** 2
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = np.abs(np.array([2, 1, 2 / 3])) * np.sqrt(
(np.array([1, 1, 3]) / np.array([1, 2, 3])) ** 2
+ (np.array([2, 2, 2]) / np.array([2, 2, 2])) ** 2
)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.parametrize(
("cor", "uncert1", "data2"),
[
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
],
)
def test_arithmetics_stddevuncertainty_basic_with_correlation(cor, uncert1, data2):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = np.array(uncert1)
uncert2 = np.array([2, 2, 2])
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertainty(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(
uncert1**2 + uncert2**2 + 2 * cor * np.abs(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = np.sqrt(
uncert1**2 + uncert2**2 - 2 * cor * np.abs(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = (np.abs(data1 * data2)) * np.sqrt(
(uncert1 / data1) ** 2
+ (uncert2 / data2) ** 2
+ (2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty!
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = (np.abs(data1 / data2)) * np.sqrt(
(uncert1 / data1) ** 2
+ (uncert2 / data2) ** 2
- (2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = (np.abs(data2 / data1)) * np.sqrt(
(uncert1 / data1) ** 2
+ (uncert2 / data2) ** 2
- (2 * cor * np.abs(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.parametrize(
("cor", "uncert1", "data2"),
[
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
],
)
def test_arithmetics_varianceuncertainty_basic_with_correlation(cor, uncert1, data2):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = np.array(uncert1) ** 2
uncert2 = np.array([2, 2, 2]) ** 2
nd1 = NDDataArithmetic(data1, uncertainty=VarianceUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=VarianceUncertainty(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = uncert1 + uncert2 + 2 * cor * np.sqrt(uncert1 * uncert2)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = uncert1 + uncert2 - 2 * cor * np.sqrt(uncert1 * uncert2)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = (data1 * data2) ** 2 * (
uncert1 / data1**2
+ uncert2 / data2**2
+ (2 * cor * np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty because of the
# prefactor nd1/nd2 vs nd2/nd1. Howeveare, a large chunk is the same.
ref_common = (
uncert1 / data1**2
+ uncert2 / data2**2
- (2 * cor * np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = (data1 / data2) ** 2 * ref_common
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = (data2 / data1) ** 2 * ref_common
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Tests for correlation, covering
# correlation between -1 and 1 with correlation term being positive / negative
# also with one data being once positive and once completely negative
# The point of this test is to compare the used formula to the theoretical one.
# TODO: Maybe covering units too but I think that should work because of
# the next tests. Also this may be reduced somehow.
@pytest.mark.filterwarnings("ignore:divide by zero encountered.*")
@pytest.mark.parametrize(
("cor", "uncert1", "data2"),
[
(-1, [1, 1, 3], [2, 2, 7]),
(-0.5, [1, 1, 3], [2, 2, 7]),
(-0.25, [1, 1, 3], [2, 2, 7]),
(0, [1, 1, 3], [2, 2, 7]),
(0.25, [1, 1, 3], [2, 2, 7]),
(0.5, [1, 1, 3], [2, 2, 7]),
(1, [1, 1, 3], [2, 2, 7]),
(-1, [-1, -1, -3], [2, 2, 7]),
(-0.5, [-1, -1, -3], [2, 2, 7]),
(-0.25, [-1, -1, -3], [2, 2, 7]),
(0, [-1, -1, -3], [2, 2, 7]),
(0.25, [-1, -1, -3], [2, 2, 7]),
(0.5, [-1, -1, -3], [2, 2, 7]),
(1, [-1, -1, -3], [2, 2, 7]),
(-1, [1, 1, 3], [-2, -3, -2]),
(-0.5, [1, 1, 3], [-2, -3, -2]),
(-0.25, [1, 1, 3], [-2, -3, -2]),
(0, [1, 1, 3], [-2, -3, -2]),
(0.25, [1, 1, 3], [-2, -3, -2]),
(0.5, [1, 1, 3], [-2, -3, -2]),
(1, [1, 1, 3], [-2, -3, -2]),
(-1, [-1, -1, -3], [-2, -3, -2]),
(-0.5, [-1, -1, -3], [-2, -3, -2]),
(-0.25, [-1, -1, -3], [-2, -3, -2]),
(0, [-1, -1, -3], [-2, -3, -2]),
(0.25, [-1, -1, -3], [-2, -3, -2]),
(0.5, [-1, -1, -3], [-2, -3, -2]),
(1, [-1, -1, -3], [-2, -3, -2]),
],
)
def test_arithmetics_inversevarianceuncertainty_basic_with_correlation(
cor, uncert1, data2
):
data1 = np.array([1, 2, 3])
data2 = np.array(data2)
uncert1 = 1 / np.array(uncert1) ** 2
uncert2 = 1 / np.array([2, 2, 2]) ** 2
nd1 = NDDataArithmetic(data1, uncertainty=InverseVariance(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=InverseVariance(uncert2))
nd3 = nd1.add(nd2, uncertainty_correlation=cor)
nd4 = nd2.add(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / (
1 / uncert1 + 1 / uncert2 + 2 * cor / np.sqrt(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.subtract(nd2, uncertainty_correlation=cor)
nd4 = nd2.subtract(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / (
1 / uncert1 + 1 / uncert2 - 2 * cor / np.sqrt(uncert1 * uncert2)
)
assert_array_equal(nd3.uncertainty.array, ref_uncertainty)
# Multiplication and Division only work with almost equal array comparisons
# since the formula implemented and the formula used as reference are
# slightly different.
nd3 = nd1.multiply(nd2, uncertainty_correlation=cor)
nd4 = nd2.multiply(nd1, uncertainty_correlation=cor)
# Inverse operation should result in the same uncertainty
assert_array_almost_equal(nd3.uncertainty.array, nd4.uncertainty.array)
# Compare it to the theoretical uncertainty
ref_uncertainty = 1 / (
(data1 * data2) ** 2
* (
1 / uncert1 / data1**2
+ 1 / uncert2 / data2**2
+ (2 * cor / np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty)
nd3 = nd1.divide(nd2, uncertainty_correlation=cor)
nd4 = nd2.divide(nd1, uncertainty_correlation=cor)
# Inverse operation gives a different uncertainty because of the
# prefactor nd1/nd2 vs nd2/nd1. Howeveare, a large chunk is the same.
ref_common = (
1 / uncert1 / data1**2
+ 1 / uncert2 / data2**2
- (2 * cor / np.sqrt(uncert1 * uncert2) / (data1 * data2))
)
# Compare it to the theoretical uncertainty
ref_uncertainty_1 = 1 / ((data1 / data2) ** 2 * ref_common)
assert_array_almost_equal(nd3.uncertainty.array, ref_uncertainty_1)
ref_uncertainty_2 = 1 / ((data2 / data1) ** 2 * ref_common)
assert_array_almost_equal(nd4.uncertainty.array, ref_uncertainty_2)
# Covering:
# just an example that a np.ndarray works as correlation, no checks for
# the right result since these were basically done in the function above.
def test_arithmetics_stddevuncertainty_basic_with_correlation_array():
data1 = np.array([1, 2, 3])
data2 = np.array([1, 1, 1])
uncert1 = np.array([1, 1, 1])
uncert2 = np.array([2, 2, 2])
cor = np.array([0, 0.25, 0])
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertainty(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertainty(uncert2))
nd1.add(nd2, uncertainty_correlation=cor)
# Covering:
# That propagate throws an exception when correlation is given but the
# uncertainty does not support correlation.
def test_arithmetics_with_correlation_unsupported():
data1 = np.array([1, 2, 3])
data2 = np.array([1, 1, 1])
uncert1 = np.array([1, 1, 1])
uncert2 = np.array([2, 2, 2])
cor = 3
nd1 = NDDataArithmetic(data1, uncertainty=StdDevUncertaintyUncorrelated(uncert1))
nd2 = NDDataArithmetic(data2, uncertainty=StdDevUncertaintyUncorrelated(uncert2))
with pytest.raises(ValueError):
nd1.add(nd2, uncertainty_correlation=cor)
# Covering:
# only one has an uncertainty (data and uncertainty without unit)
# tested against the case where the other one has zero uncertainty. (this case
# must be correct because we tested it in the last case)
# Also verify that if the result of the data has negative values the resulting
# uncertainty has no negative values.
def test_arithmetics_stddevuncertainty_one_missing():
nd1 = NDDataArithmetic([1, -2, 3])
nd1_ref = NDDataArithmetic([1, -2, 3], uncertainty=StdDevUncertainty([0, 0, 0]))
nd2 = NDDataArithmetic([2, 2, -2], uncertainty=StdDevUncertainty([2, 2, 2]))
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2.add(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2.subtract(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2.multiply(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2.divide(nd1_ref)
assert_array_equal(nd3.uncertainty.array, nd3_ref.uncertainty.array)
assert_array_equal(np.abs(nd3.uncertainty.array), nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide.*")
@pytest.mark.parametrize(
("uncert1", "uncert2"),
[
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
],
)
def test_arithmetics_stddevuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = StdDevUncertainty(uncert1)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(data1.unit)
else:
uncert1_ref = uncert1
uncert_ref1 = StdDevUncertainty(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = StdDevUncertainty(uncert2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(data2.unit)
else:
uncert2_ref = uncert2
uncert_ref2 = StdDevUncertainty(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide.*")
@pytest.mark.parametrize(
("uncert1", "uncert2"),
[
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
],
)
def test_arithmetics_varianceuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = VarianceUncertainty(uncert1**2)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(data1.unit**2)
else:
uncert1_ref = uncert1
uncert_ref1 = VarianceUncertainty(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = VarianceUncertainty(uncert2**2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(data2.unit**2)
else:
uncert2_ref = uncert2
uncert_ref2 = VarianceUncertainty(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Covering:
# data with unit and uncertainty with unit (but equivalent units)
# compared against correctly scaled NDDatas
@pytest.mark.filterwarnings("ignore:.*encountered in.*divide.*")
@pytest.mark.parametrize(
("uncert1", "uncert2"),
[
(np.array([1, 2, 3]) * u.m, None),
(np.array([1, 2, 3]) * u.cm, None),
(None, np.array([1, 2, 3]) * u.m),
(None, np.array([1, 2, 3]) * u.cm),
(np.array([1, 2, 3]), np.array([2, 3, 4])),
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.m, np.array([2, 3, 4])) * u.m,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])),
(np.array([1, 2, 3]), np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.cm, np.array([2, 3, 4])) * u.cm,
(np.array([1, 2, 3]) * u.km, np.array([2, 3, 4])) * u.cm,
],
)
def test_arithmetics_inversevarianceuncertainty_with_units(uncert1, uncert2):
# Data has same units
data1 = np.array([1, 2, 3]) * u.m
data2 = np.array([-4, 7, 0]) * u.m
if uncert1 is not None:
uncert1 = InverseVariance(1 / uncert1**2)
if isinstance(uncert1, Quantity):
uncert1_ref = uncert1.to_value(1 / data1.unit**2)
else:
uncert1_ref = uncert1
uncert_ref1 = InverseVariance(uncert1_ref, copy=True)
else:
uncert1 = None
uncert_ref1 = None
if uncert2 is not None:
uncert2 = InverseVariance(1 / uncert2**2)
if isinstance(uncert2, Quantity):
uncert2_ref = uncert2.to_value(1 / data2.unit**2)
else:
uncert2_ref = uncert2
uncert_ref2 = InverseVariance(uncert2_ref, copy=True)
else:
uncert2 = None
uncert_ref2 = None
nd1 = NDDataArithmetic(data1, uncertainty=uncert1)
nd2 = NDDataArithmetic(data2, uncertainty=uncert2)
nd1_ref = NDDataArithmetic(data1, uncertainty=uncert_ref1)
nd2_ref = NDDataArithmetic(data2, uncertainty=uncert_ref2)
# Let's start the tests
# Addition
nd3 = nd1.add(nd2)
nd3_ref = nd1_ref.add(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.add(nd1)
nd3_ref = nd2_ref.add(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Subtraction
nd3 = nd1.subtract(nd2)
nd3_ref = nd1_ref.subtract(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.subtract(nd1)
nd3_ref = nd2_ref.subtract(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Multiplication
nd3 = nd1.multiply(nd2)
nd3_ref = nd1_ref.multiply(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.multiply(nd1)
nd3_ref = nd2_ref.multiply(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Division
nd3 = nd1.divide(nd2)
nd3_ref = nd1_ref.divide(nd2_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
nd3 = nd2.divide(nd1)
nd3_ref = nd2_ref.divide(nd1_ref)
assert nd3.unit == nd3_ref.unit
assert nd3.uncertainty.unit == nd3_ref.uncertainty.unit
assert_array_equal(nd3.uncertainty.array, nd3.uncertainty.array)
# Test abbreviation and long name for taking the first found meta, mask, wcs
@pytest.mark.parametrize("use_abbreviation", ["ff", "first_found"])
def test_arithmetics_handle_switches(use_abbreviation):
meta1 = {"a": 1}
meta2 = {"b": 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
wcs1, wcs2 = nd_testing.create_two_unequal_wcs(naxis=1)
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(
data1, meta=meta1, mask=mask1, wcs=wcs1, uncertainty=uncertainty1
)
nd2 = NDDataArithmetic(
data2, meta=meta2, mask=mask2, wcs=wcs2, uncertainty=uncertainty2
)
nd3 = NDDataArithmetic(data1)
# Both have the attributes but option None is chosen
nd_ = nd1.add(
nd2,
propagate_uncertainties=None,
handle_meta=None,
handle_mask=None,
compare_wcs=None,
)
assert nd_.wcs is None
assert len(nd_.meta) == 0
assert nd_.mask is None
assert nd_.uncertainty is None
# Only second has attributes and False is chosen
nd_ = nd3.add(
nd2,
propagate_uncertainties=False,
handle_meta=use_abbreviation,
handle_mask=use_abbreviation,
compare_wcs=use_abbreviation,
)
nd_testing.assert_wcs_seem_equal(nd_.wcs, wcs2)
assert nd_.meta == meta2
assert nd_.mask == mask2
assert_array_equal(nd_.uncertainty.array, uncertainty2.array)
# Only first has attributes and False is chosen
nd_ = nd1.add(
nd3,
propagate_uncertainties=False,
handle_meta=use_abbreviation,
handle_mask=use_abbreviation,
compare_wcs=use_abbreviation,
)
nd_testing.assert_wcs_seem_equal(nd_.wcs, wcs1)
assert nd_.meta == meta1
assert nd_.mask == mask1
assert_array_equal(nd_.uncertainty.array, uncertainty1.array)
def test_arithmetics_meta_func():
def meta_fun_func(meta1, meta2, take="first"):
if take == "first":
return meta1
else:
return meta2
meta1 = {"a": 1}
meta2 = {"a": 3, "b": 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, uncertainty=uncertainty1)
nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, uncertainty=uncertainty2)
nd3 = nd1.add(nd2, handle_meta=meta_fun_func)
assert nd3.meta["a"] == 1
assert "b" not in nd3.meta
nd4 = nd1.add(nd2, handle_meta=meta_fun_func, meta_take="second")
assert nd4.meta["a"] == 3
assert nd4.meta["b"] == 2
with pytest.raises(KeyError):
nd1.add(nd2, handle_meta=meta_fun_func, take="second")
def test_arithmetics_wcs_func():
def wcs_comp_func(wcs1, wcs2, tolerance=0.1):
if tolerance < 0.01:
return False
return True
meta1 = {"a": 1}
meta2 = {"a": 3, "b": 2}
mask1 = True
mask2 = False
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
wcs1, wcs2 = nd_testing.create_two_equal_wcs(naxis=1)
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(
data1, meta=meta1, mask=mask1, wcs=wcs1, uncertainty=uncertainty1
)
nd2 = NDDataArithmetic(
data2, meta=meta2, mask=mask2, wcs=wcs2, uncertainty=uncertainty2
)
nd3 = nd1.add(nd2, compare_wcs=wcs_comp_func)
nd_testing.assert_wcs_seem_equal(nd3.wcs, wcs1)
# Fails because the function fails
with pytest.raises(ValueError):
nd1.add(nd2, compare_wcs=wcs_comp_func, wcs_tolerance=0.00001)
# Fails because for a parameter to be passed correctly to the function it
# needs the wcs_ prefix
with pytest.raises(KeyError):
nd1.add(nd2, compare_wcs=wcs_comp_func, tolerance=1)
def test_arithmetics_mask_func():
def mask_sad_func(mask1, mask2, fun=0):
if fun > 0.5:
return mask2
else:
return mask1
meta1 = {"a": 1}
meta2 = {"a": 3, "b": 2}
mask1 = [True, False, True]
mask2 = [True, False, False]
uncertainty1 = StdDevUncertainty([1, 2, 3])
uncertainty2 = StdDevUncertainty([1, 2, 3])
data1 = [1, 1, 1]
data2 = [1, 1, 1]
nd1 = NDDataArithmetic(data1, meta=meta1, mask=mask1, uncertainty=uncertainty1)
nd2 = NDDataArithmetic(data2, meta=meta2, mask=mask2, uncertainty=uncertainty2)
nd3 = nd1.add(nd2, handle_mask=mask_sad_func)
assert_array_equal(nd3.mask, nd1.mask)
nd4 = nd1.add(nd2, handle_mask=mask_sad_func, mask_fun=1)
assert_array_equal(nd4.mask, nd2.mask)
with pytest.raises(KeyError):
nd1.add(nd2, handle_mask=mask_sad_func, fun=1)
@pytest.mark.parametrize("meth", ["add", "subtract", "divide", "multiply"])
def test_two_argument_useage(meth):
ndd1 = NDDataArithmetic(np.ones((3, 3)))
ndd2 = NDDataArithmetic(np.ones((3, 3)))
# Call add on the class (not the instance) and compare it with already
# tested usage:
ndd3 = getattr(NDDataArithmetic, meth)(ndd1, ndd2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
# And the same done on an unrelated instance...
ndd3 = getattr(NDDataArithmetic(-100), meth)(ndd1, ndd2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
@pytest.mark.parametrize("meth", ["add", "subtract", "divide", "multiply"])
def test_two_argument_useage_non_nddata_first_arg(meth):
data1 = 50
data2 = 100
# Call add on the class (not the instance)
ndd3 = getattr(NDDataArithmetic, meth)(data1, data2)
# Compare it with the instance-useage and two identical NDData-like
# classes:
ndd1 = NDDataArithmetic(data1)
ndd2 = NDDataArithmetic(data2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
# and check it's also working when called on an instance
ndd3 = getattr(NDDataArithmetic(-100), meth)(data1, data2)
ndd4 = getattr(ndd1, meth)(ndd2)
np.testing.assert_array_equal(ndd3.data, ndd4.data)
def test_arithmetics_unknown_uncertainties():
# Not giving any uncertainty class means it is saved as UnknownUncertainty
ndd1 = NDDataArithmetic(
np.ones((3, 3)), uncertainty=UnknownUncertainty(np.ones((3, 3)))
)
ndd2 = NDDataArithmetic(
np.ones((3, 3)), uncertainty=UnknownUncertainty(np.ones((3, 3)) * 2)
)
# There is no way to propagate uncertainties:
with pytest.raises(IncompatibleUncertaintiesException):
ndd1.add(ndd2)
# But it should be possible without propagation
ndd3 = ndd1.add(ndd2, propagate_uncertainties=False)
np.testing.assert_array_equal(ndd1.uncertainty.array, ndd3.uncertainty.array)
ndd4 = ndd1.add(ndd2, propagate_uncertainties=None)
assert ndd4.uncertainty is None
def test_psf_warning():
"""Test that math on objects with a psf warn."""
ndd1 = NDDataArithmetic(np.ones((3, 3)), psf=np.zeros(3))
ndd2 = NDDataArithmetic(np.ones((3, 3)), psf=None)
# no warning if both are None
ndd2.add(ndd2)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd1.add(ndd2)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd2.add(ndd1)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd1.add(ndd1)
def test_raise_method_not_supported():
ndd1 = NDDataArithmetic(np.zeros(3), uncertainty=StdDevUncertainty(np.zeros(3)))
ndd2 = NDDataArithmetic(np.ones(3), uncertainty=StdDevUncertainty(np.ones(3)))
result = np.zeros(3)
correlation = 0
# no error should be raised for supported operations:
ndd1.uncertainty.propagate(np.add, ndd2, result, correlation)
# raise error for unsupported propagation operations:
with pytest.raises(ValueError):
ndd1.uncertainty.propagate(np.mod, ndd2, result, correlation)
def test_nddata_bitmask_arithmetic():
# NDData.mask is usually assumed to be boolean, but could be
# a bitmask. Ensure bitmask works:
array = np.array([[0, 1, 0], [1, 0, 1], [0, 1, 0]])
mask = np.array([[0, 1, 64], [8, 0, 1], [2, 1, 0]])
nref_nomask = NDDataRef(array)
nref_masked = NDDataRef(array, mask=mask)
# multiply no mask by constant (no mask * no mask)
assert nref_nomask.multiply(1.0, handle_mask=np.bitwise_or).mask is None
# multiply no mask by itself (no mask * no mask)
assert nref_nomask.multiply(nref_nomask, handle_mask=np.bitwise_or).mask is None
# multiply masked by constant (mask * no mask)
np.testing.assert_equal(
nref_masked.multiply(1.0, handle_mask=np.bitwise_or).mask, mask
)
# multiply masked by itself (mask * mask)
np.testing.assert_equal(
nref_masked.multiply(nref_masked, handle_mask=np.bitwise_or).mask, mask
)
# multiply masked by no mask (mask * no mask)
np.testing.assert_equal(
nref_masked.multiply(nref_nomask, handle_mask=np.bitwise_or).mask, mask
)
# check bitwise logic still works
other_mask = np.array([[64, 1, 0], [2, 1, 0], [8, 0, 2]])
nref_mask_other = NDDataRef(array, mask=other_mask)
np.testing.assert_equal(
nref_mask_other.multiply(nref_masked, handle_mask=np.bitwise_or).mask,
np.bitwise_or(mask, other_mask),
)
# Covers different dtypes with various types of scalars as the 2nd operand
# (issue #18384):
@pytest.mark.parametrize(
"ndd_type",
(
pytest.param(np.uint16),
pytest.param(np.float32),
pytest.param(np.float64),
),
)
@pytest.mark.parametrize(
"scalar_type",
(
pytest.param(int, id="int"),
pytest.param(float, id="float"),
pytest.param(np.uint8, id="uint8"),
pytest.param(np.int16, id="int16"),
pytest.param(np.float32, id="float32"),
pytest.param(np.float64, id="float64"),
pytest.param(lambda v: np.array(v, dtype=np.int16), id="int16_0D_array"),
pytest.param(lambda v: np.array(v, dtype=np.float32), id="float32_0D_array"),
),
)
@pytest.mark.parametrize("meth", STR_TO_OPERATOR)
def test_arithmetics_dtypes_with_scalar(ndd_type, scalar_type, meth):
nddata = NDDataRef(np.array([1, 2, 3, 4], dtype=ndd_type))
scalar = scalar_type(2)
out = getattr(nddata, meth)(scalar)
ref = STR_TO_OPERATOR[meth](nddata.data, scalar)
# Enforce the same behaviour as NumPy, rather than fixed behaviour:
assert out.data.shape == ref.shape
assert out.data.dtype == ref.dtype
assert_array_equal(out.data, ref)
# Covers adding scalar quantity with matching, non-default dtypes:
@pytest.mark.parametrize("ndd_type", (np.uint16, np.float32, np.float64))
@pytest.mark.parametrize("meth", ("add", "subtract"))
def test_add_quantity_matching_dtype(ndd_type, meth):
nddata = NDDataRef(np.array([1, 2, 3, 4], dtype=ndd_type), unit=u.adu)
quantity = u.Quantity(2, dtype=ndd_type, unit=u.adu)
out = getattr(nddata, meth)(quantity)
ref = STR_TO_OPERATOR[meth](nddata.data, quantity.value)
assert out.data.shape == nddata.data.shape == ref.shape
assert out.data.dtype == nddata.data.dtype == ref.dtype
assert_array_equal(out.data, ref)
# Covers scaling with units and non-default dtypes:
@pytest.mark.parametrize("ndd_type", (np.uint16, np.float32, np.float64))
@pytest.mark.parametrize("scalar_type", (int, float, np.uint16, np.float32, np.float64))
@pytest.mark.parametrize("meth", ("multiply", "divide"))
def test_scale_dtypes_with_units(ndd_type, scalar_type, meth):
nddata = NDDataRef(np.array([1, 2, 3, 4], dtype=ndd_type), unit=u.adu)
scalar = scalar_type(2)
out = getattr(nddata, meth)(scalar)
ref = STR_TO_OPERATOR[meth](nddata.data, scalar)
assert out.data.shape == ref.shape
assert out.data.dtype == ref.dtype
assert_array_almost_equal(out.data, ref)
# Covers adding scalar Quantity to NDData with default float dtypes for both.
# While these two classes live in different modules with their own defaults,
# we'd probably like to know about the unlikely event of their becoming
# inconsistent, which could break downstream assumptions. This also checks
# Quantity constructed in the common way, rather than programmatically as above.
@pytest.mark.parametrize("meth", ("add", "subtract"))
def test_add_quantity_default_dtypes(meth):
nddata = NDDataRef(np.array([1.0, 2.0, 3.0, 4.0]), unit=u.adu)
quantity = 2.0 * u.adu
out = getattr(nddata, meth)(quantity)
ref = STR_TO_OPERATOR[meth](nddata.data, quantity.value)
assert out.data.shape == nddata.data.shape
assert out.data.dtype == nddata.data.dtype
assert_array_equal(out.data, ref)
# Provide input for the following test sets without lots of replication:
NDDATA_REF_PARAMS = [
pytest.param(
NDDataRef(
np.array([1, 2, 3, 4], dtype="uint16"),
uncertainty=VarianceUncertainty(np.array([1, 2, 3, 4], dtype="uint16")),
mask=np.array([0, 1, 0, 0], dtype="uint8"),
),
id="u16dat_u16var_u8msk",
),
pytest.param(
NDDataRef(
np.array([1.0, 2.0, 3.0, 4.0], dtype="float32"),
uncertainty=StdDevUncertainty(
np.array([1.0, 1.41, 1.73, 2.0], dtype="float32")
),
mask=np.array([0, 0, 1, 0], dtype="uint16"),
),
id="f32dat_f32var_u16msk",
),
pytest.param(
NDDataRef(
np.array([1.0, 2.0, 3.0, 4.0], dtype="float64"),
uncertainty=VarianceUncertainty(
np.array([1.0, 2.0, 3.0, 4.0], dtype="float64")
),
mask=np.array([0, 1, 0, 0], dtype="uint16"),
),
id="f64dat_f64var_u16msk",
),
]
@pytest.fixture(params=NDDATA_REF_PARAMS)
def nddata_ref1(request):
return deepcopy(request.param)
@pytest.fixture(params=NDDATA_REF_PARAMS)
def nddata_ref2(request):
return deepcopy(request.param)
# Covers non-default dtypes + uncert + mask with various scalar types
@pytest.mark.parametrize(
"scalar_type",
(
pytest.param(int, id="int"),
pytest.param(float, id="float"),
pytest.param(np.uint16, id="uint16"),
pytest.param(np.float32, id="float32"),
pytest.param(lambda v: np.array(v, dtype=np.uint16), id="uint16_0D_array"),
pytest.param(lambda v: np.array(v, dtype=np.float32), id="float32_0D_array"),
),
)
@pytest.mark.parametrize("meth", STR_TO_OPERATOR)
def test_dtypes_uncert_mask_with_scalars(nddata_ref1, scalar_type, meth):
nddata = nddata_ref1
scalar = scalar_type(2)
out = getattr(nddata, meth)(scalar)
ref_dat = STR_TO_OPERATOR[meth](nddata.data, scalar)
if meth in ("multiply", "divide"):
vscale = scalar
if isinstance(nddata.uncertainty, VarianceUncertainty):
vscale = vscale * scalar # copy to avoid modifying scalar
ref_unc = STR_TO_OPERATOR[meth](nddata.uncertainty.array, vscale)
else:
ref_unc = nddata.uncertainty.array
ref_msk = nddata.mask
# Enforce the same behaviour as NumPy, rather than fixed behaviour:
assert out.data.shape == ref_dat.shape
assert out.data.dtype == ref_dat.dtype
if not (
NUMPY_LT_2_0
and nddata.uncertainty.array.dtype.kind in "biu"
and isinstance(scalar, (np.float16, np.float32))
):
# A quirk of NumPy 1 arithmetic causes int uncertainty (admittedly a corner
# case) to get cast to float64 when float32 is expected (see #18392):
assert out.uncertainty.array.dtype == ref_unc.dtype
assert out.mask.dtype == ref_msk.dtype
assert np.ma.allclose(out.data, ref_dat)
assert np.ma.allclose(out.uncertainty.array, ref_unc)
assert_array_equal(out.mask, ref_msk)
# Covers arithmetic with different dtype pairs + uncert + mask
@pytest.mark.parametrize("meth", STR_TO_OPERATOR)
def test_arithmetics_dtypes_uncert_mask(nddata_ref1, nddata_ref2, meth):
nd1 = nddata_ref1
nd2 = nddata_ref2
ref_dat = STR_TO_OPERATOR[meth](nd1.data, nd2.data)
# Deal with uncertainty, converting the nd2 uncertainty class to match
# nd1, otherwise arithmetic fails. With both operands being arrays, we
# cannot use NumPy as a reference for the "correct" output dtype for
# uncertainty, since it doesn't natively propagate errors and the result
# type depends on the exact calculation used, but we can check that the
# values are close those expected, given the input dtypes. Establishing
# the intended casting behaviour for uncertainty is left for other tests.
if isinstance(nd1.uncertainty, VarianceUncertainty):
if isinstance(nd2.uncertainty, StdDevUncertainty):
nd2.uncertainty = VarianceUncertainty(
np.multiply(nd2.uncertainty.array, nd2.uncertainty.array)
)
if meth in ("multiply", "divide"):
ref_unc = ref_dat**2 * (
nd1.uncertainty.array / nd1.data**2
+ nd2.uncertainty.array / nd2.data**2
)
else:
ref_unc = nd1.uncertainty.array + nd2.uncertainty.array
else:
if isinstance(nd2.uncertainty, VarianceUncertainty):
nd2.uncertainty = StdDevUncertainty(np.sqrt(nd2.uncertainty.array))
if meth in ("multiply", "divide"):
ref_unc = ref_dat * np.sqrt(
(nd1.uncertainty.array / nd1.data) ** 2
+ (nd2.uncertainty.array / nd2.data) ** 2
)
else:
ref_unc = np.sqrt(nd1.uncertainty.array**2 + nd2.uncertainty.array**2)
ref_msk = np.logical_or(nd1.mask, nd2.mask) # default op for arith mixin
out = getattr(nd1, meth)(nd2)
# Enforce the same behaviour as NumPy, rather than fixed behaviour:
assert out.data.shape == ref_dat.shape
assert out.data.dtype == ref_dat.dtype
# see above comment regarding uncertainty dtype
assert out.mask.dtype == ref_msk.dtype
assert np.ma.allclose(out.data, ref_dat)
assert np.ma.allclose(out.uncertainty.array, ref_unc)
assert_array_equal(out.mask, ref_msk)
| StdDevUncertaintyUncorrelated |
python | openai__openai-python | src/openai/types/responses/response_format_text_json_schema_config.py | {
"start": 286,
"end": 1414
} | class ____(BaseModel):
name: str
"""The name of the response format.
Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
of 64.
"""
schema_: Dict[str, object] = FieldInfo(alias="schema")
"""
The schema for the response format, described as a JSON Schema object. Learn how
to build JSON schemas [here](https://json-schema.org/).
"""
type: Literal["json_schema"]
"""The type of response format being defined. Always `json_schema`."""
description: Optional[str] = None
"""
A description of what the response format is for, used by the model to determine
how to respond in the format.
"""
strict: Optional[bool] = None
"""
Whether to enable strict schema adherence when generating the output. If set to
true, the model will always follow the exact schema defined in the `schema`
field. Only a subset of JSON Schema is supported when `strict` is `true`. To
learn more, read the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
"""
| ResponseFormatTextJSONSchemaConfig |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 880813,
"end": 881593
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for PullRequestTimelineItem."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("PullRequestTimelineItemEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("PullRequestTimelineItem"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| PullRequestTimelineConnection |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/scaffold/branch/claude/sdk_client.py | {
"start": 734,
"end": 905
} | class ____:
"""Output channel that discards all output."""
def write(self, text: str) -> None:
"""Discard all text output."""
pass
| NullOutputChannel |
python | bokeh__bokeh | tests/unit/bokeh/test_transform.py | {
"start": 2246,
"end": 2881
} | class ____:
def test_basic(self) -> None:
t = bt.dodge("foo", 0.5)
assert isinstance(t, Field)
assert t.field == "foo"
assert isinstance(t.transform, Dodge)
assert t.transform.value == 0.5
assert t.transform.range is None
def test_with_range(self) -> None:
r = FactorRange("a")
t = bt.dodge("foo", 0.5, range=r)
assert isinstance(t, Field)
assert t.field == "foo"
assert isinstance(t.transform, Dodge)
assert t.transform.value == 0.5
assert t.transform.range is r
assert t.transform.range.factors == ["a"]
| Test_dodge |
python | kamyu104__LeetCode-Solutions | Python/sum-of-beauty-of-all-substrings.py | {
"start": 31,
"end": 407
} | class ____(object):
def beautySum(self, s):
"""
:type s: str
:rtype: int
"""
result = 0
for i in xrange(len(s)):
lookup = [0]*26
for j in xrange(i, len(s)):
lookup[ord(s[j])-ord('a')] += 1
result += max(lookup) - min(x for x in lookup if x)
return result
| Solution |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/configuration_qwen2_5_omni.py | {
"start": 1433,
"end": 4864
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Qwen2_5OmniThinkerVision`]. It is used to instantiate a
Qwen2.5-VL vision encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the audio encoder of the Qwen2.5-VL
architecture.
e.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
depth (`int`, *optional*, defaults to 32):
Number of layers (depth) in the model.
hidden_size (`int`, *optional*, defaults to 3584):
The size of the hidden layers.
hidden_act (`str`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function used in the model. Supported options include `"quick_gelu"` and others as applicable.
mlp_ratio (`float`, *optional*, defaults to 4):
The ratio used to determine the size of the MLP (Multi-Layer Perceptron) hidden layer.
num_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer.
in_channels (`int`, *optional*, defaults to 3):
Number of input channels.
patch_size (`int`, *optional*, defaults to 14):
The size of the patches extracted from the input.
spatial_merge_size (`int`, *optional*, defaults to 2):
The size used for merging spatial dimensions.
temporal_patch_size (`int`, *optional*, defaults to 2):
The size used for patches along the temporal dimension.
Example:
```python
>>> from transformers import Qwen2_5OmniVisionEncoderConfig, Qwen2_5OmniVisionEncoder
>>> # Initializing a Qwen2_5OmniVisionEncoderConfig
>>> configuration = Qwen2_5OmniVisionEncoderConfig()
>>> # Initializing a Qwen2_5OmniVisionEncoder (with random weights)
>>> model = Qwen2_5OmniVisionEncoder(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "qwen2_5_omni_vision_encoder"
base_config_key = "vision_config"
def __init__(
self,
depth=32,
hidden_size=3584,
hidden_act="silu",
intermediate_size=3420,
num_heads=16,
in_channels=3,
patch_size=14,
spatial_merge_size=2,
temporal_patch_size=2,
window_size=112,
out_hidden_size=3584,
fullatt_block_indexes=[7, 15, 23, 31],
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.depth = depth
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.num_heads = num_heads
self.in_channels = in_channels
self.patch_size = patch_size
self.spatial_merge_size = spatial_merge_size
self.temporal_patch_size = temporal_patch_size
self.window_size = window_size
self.fullatt_block_indexes = fullatt_block_indexes
self.out_hidden_size = out_hidden_size
self.initializer_range = initializer_range
| Qwen2_5OmniVisionEncoderConfig |
python | ansible__ansible | lib/ansible/template/__init__.py | {
"start": 972,
"end": 17262
} | class ____:
"""Primary public API container for Ansible's template engine."""
def __init__(
self,
loader: _dataloader.DataLoader | None = None,
variables: _VariableContainer | None = None,
) -> None:
self._engine = _engine.TemplateEngine(loader=loader, variables=variables)
self._overrides = _jinja_bits.TemplateOverrides.DEFAULT
@classmethod
@_internal.experimental
def _from_template_engine(cls, engine: _engine.TemplateEngine) -> _t.Self:
"""
EXPERIMENTAL: For internal use within ansible-core only.
Create a `Templar` instance from the given `TemplateEngine` instance.
"""
templar = object.__new__(cls)
templar._engine = engine.copy()
templar._overrides = _jinja_bits.TemplateOverrides.DEFAULT
return templar
def resolve_variable_expression(
self,
expression: str,
*,
local_variables: dict[str, _t.Any] | None = None,
) -> _t.Any:
"""
Resolve a potentially untrusted string variable expression consisting only of valid identifiers, integers, dots, and indexing containing these.
Optional local variables may be provided, which can only be referenced directly by the given expression.
Valid: x, x.y, x[y].z, x[1], 1, x[y.z]
Error: 'x', x['y'], q('env')
"""
return self._engine.resolve_variable_expression(expression, local_variables=local_variables)
def evaluate_expression(
self,
expression: str,
*,
local_variables: dict[str, _t.Any] | None = None,
escape_backslashes: bool = True,
) -> _t.Any:
"""
Evaluate a trusted string expression and return its result.
Optional local variables may be provided, which can only be referenced directly by the given expression.
"""
return self._engine.evaluate_expression(expression, local_variables=local_variables, escape_backslashes=escape_backslashes)
def evaluate_conditional(self, conditional: str | bool) -> bool:
"""
Evaluate a trusted string expression or boolean and return its boolean result. A non-boolean result will raise `AnsibleBrokenConditionalError`.
The ALLOW_BROKEN_CONDITIONALS configuration option can temporarily relax this requirement, allowing truthy conditionals to succeed.
The ALLOW_EMBEDDED_TEMPLATES configuration option can temporarily enable inline Jinja template delimiter support (e.g., {{ }}, {% %}).
"""
return self._engine.evaluate_conditional(conditional)
@property
def basedir(self) -> str:
"""The basedir from DataLoader."""
# DTFIX-FUTURE: come up with a better way to handle this so it can be deprecated
return self._engine.basedir
@property
def available_variables(self) -> _VariableContainer:
"""Available variables this instance will use when templating."""
return self._engine.available_variables
@available_variables.setter
def available_variables(self, variables: _VariableContainer) -> None:
self._engine.available_variables = variables
@property
def _available_variables(self) -> _VariableContainer:
"""Deprecated. Use `available_variables` instead."""
# Commonly abused by numerous collection lookup plugins and the Ceph Ansible `config_template` action.
_display.deprecated(
msg='Direct access to the `_available_variables` internal attribute is deprecated.',
help_text='Use `available_variables` instead.',
version='2.23',
)
return self.available_variables
@property
def _loader(self) -> _dataloader.DataLoader:
"""Deprecated. Use `copy_with_new_env` to create a new instance."""
# Abused by cloud.common, community.general and felixfontein.tools collections to create a new Templar instance.
_display.deprecated(
msg='Direct access to the `_loader` internal attribute is deprecated.',
help_text='Use `copy_with_new_env` to create a new instance.',
version='2.23',
)
return self._engine._loader
@property
def environment(self) -> _environment.Environment:
"""Deprecated."""
_display.deprecated(
msg='Direct access to the `environment` attribute is deprecated.',
help_text='Consider using `copy_with_new_env` or passing `overrides` to `template`.',
version='2.23',
)
return self._engine.environment
def copy_with_new_env(
self,
*,
searchpath: str | _os.PathLike | _t.Sequence[str | _os.PathLike] | None = None,
available_variables: _VariableContainer | None = None,
**context_overrides: _t.Any,
) -> Templar:
"""Return a new templar based on the current one with customizations applied."""
if context_overrides.pop('environment_class', _UNSET) is not _UNSET:
_display.deprecated(
msg="The `environment_class` argument is ignored.",
version='2.23',
)
if context_overrides:
_display.deprecated(
msg='Passing Jinja environment overrides to `copy_with_new_env` is deprecated.',
help_text='Pass Jinja environment overrides to individual `template` calls.',
version='2.23',
)
templar = Templar(
loader=self._engine._loader,
variables=self._engine._variables if available_variables is None else available_variables,
)
# backward compatibility: filter out None values from overrides, even though it is a valid value for some of them
templar._overrides = self._overrides.merge({key: value for key, value in context_overrides.items() if value is not None})
if searchpath is not None:
templar._engine.environment.loader.searchpath = searchpath
return templar
@_contextlib.contextmanager
def set_temporary_context(
self,
*,
searchpath: str | _os.PathLike | _t.Sequence[str | _os.PathLike] | None = None,
available_variables: _VariableContainer | None = None,
**context_overrides: _t.Any,
) -> _t.Generator[None, None, None]:
"""Context manager used to set temporary templating context, without having to worry about resetting original values afterward."""
_display.deprecated(
msg='The `set_temporary_context` method on `Templar` is deprecated.',
help_text='Use the `copy_with_new_env` method on `Templar` instead.',
version='2.23',
)
targets = dict(
searchpath=self._engine.environment.loader,
available_variables=self._engine,
)
target_args = dict(
searchpath=searchpath,
available_variables=available_variables,
)
original: dict[str, _t.Any] = {}
previous_overrides = self._overrides
try:
for key, value in target_args.items():
if value is not None:
target = targets[key]
original[key] = getattr(target, key)
setattr(target, key, value)
# backward compatibility: filter out None values from overrides, even though it is a valid value for some of them
self._overrides = self._overrides.merge({key: value for key, value in context_overrides.items() if value is not None})
yield
finally:
for key, value in original.items():
setattr(targets[key], key, value)
self._overrides = previous_overrides
# noinspection PyUnusedLocal
def template(
self,
variable: _t.Any,
convert_bare: bool = _UNSET,
preserve_trailing_newlines: bool = True,
escape_backslashes: bool = True,
fail_on_undefined: bool = True,
overrides: dict[str, _t.Any] | None = None,
convert_data: bool = _UNSET,
disable_lookups: bool = _UNSET,
) -> _t.Any:
"""Templates (possibly recursively) any given data as input."""
# DTFIX-FUTURE: offer a public version of TemplateOverrides to support an optional strongly typed `overrides` argument
if convert_bare is not _UNSET:
# Skipping a deferred deprecation due to minimal usage outside ansible-core.
# Use `hasattr(templar, 'evaluate_expression')` to determine if `template` or `evaluate_expression` should be used.
_display.deprecated(
msg="Passing `convert_bare` to `template` is deprecated.",
help_text="Use `evaluate_expression` instead.",
version="2.23",
)
if convert_bare and isinstance(variable, str):
contains_filters = "|" in variable
first_part = variable.split("|")[0].split(".")[0].split("[")[0]
convert_bare = (contains_filters or first_part in self.available_variables) and not self.is_possibly_template(variable, overrides)
else:
convert_bare = False
else:
convert_bare = False
if fail_on_undefined is None:
# The pre-2.19 config fallback is ignored for content portability.
_display.deprecated(
msg="Falling back to `True` for `fail_on_undefined`.",
help_text="Use either `True` or `False` for `fail_on_undefined` when calling `template`.",
version="2.23",
)
fail_on_undefined = True
if convert_data is not _UNSET:
# Skipping a deferred deprecation due to minimal usage outside ansible-core.
# Use `hasattr(templar, 'evaluate_expression')` as a surrogate check to determine if `convert_data` is accepted.
_display.deprecated(
msg="Passing `convert_data` to `template` is deprecated.",
version="2.23",
)
if disable_lookups is not _UNSET:
# Skipping a deferred deprecation due to no known usage outside ansible-core.
# Use `hasattr(templar, 'evaluate_expression')` as a surrogate check to determine if `disable_lookups` is accepted.
_display.deprecated(
msg="Passing `disable_lookups` to `template` is deprecated.",
version="2.23",
)
try:
if convert_bare: # pre-2.19 compat
return self.evaluate_expression(variable, escape_backslashes=escape_backslashes)
return self._engine.template(
variable=variable,
options=_engine.TemplateOptions(
preserve_trailing_newlines=preserve_trailing_newlines,
escape_backslashes=escape_backslashes,
overrides=self._overrides.merge(overrides),
),
mode=_engine.TemplateMode.ALWAYS_FINALIZE,
)
except _errors.AnsibleUndefinedVariable:
if not fail_on_undefined:
return variable
raise
def is_template(self, data: _t.Any) -> bool:
"""
Evaluate the input data to determine if it contains a template, even if that template is invalid. Containers will be recursively searched.
Objects subject to template-time transforms that do not yield a template are not considered templates by this method.
Gating a conditional call to `template` with this method is redundant and inefficient -- request templating unconditionally instead.
"""
return self._engine.is_template(data, self._overrides)
def is_possibly_template(
self,
data: _t.Any,
overrides: dict[str, _t.Any] | None = None,
) -> bool:
"""
A lightweight check to determine if the given value is a string that looks like it contains a template, even if that template is invalid.
Returns `True` if the given value is a string that starts with a Jinja overrides header or if it contains template start strings.
Gating a conditional call to `template` with this method is redundant and inefficient -- request templating unconditionally instead.
"""
return isinstance(data, str) and _jinja_bits.is_possibly_template(data, self._overrides.merge(overrides))
def do_template(
self,
data: _t.Any,
preserve_trailing_newlines: bool = True,
escape_backslashes: bool = True,
fail_on_undefined: bool = True,
overrides: dict[str, _t.Any] | None = None,
disable_lookups: bool = _UNSET,
convert_data: bool = _UNSET,
) -> _t.Any:
"""Deprecated. Use `template` instead."""
_display.deprecated(
msg='The `do_template` method on `Templar` is deprecated.',
help_text='Use the `template` method on `Templar` instead.',
version='2.23',
)
if not isinstance(data, str):
return data
return self.template(
variable=data,
preserve_trailing_newlines=preserve_trailing_newlines,
escape_backslashes=escape_backslashes,
fail_on_undefined=fail_on_undefined,
overrides=overrides,
disable_lookups=disable_lookups,
convert_data=convert_data,
)
def generate_ansible_template_vars(
path: str,
fullpath: str | None = None,
dest_path: str | None = None,
) -> dict[str, object]:
"""
Generate and return a dictionary with variable metadata about the template specified by `fullpath`.
If `fullpath` is `None`, `path` will be used instead.
"""
# deprecated: description="deprecate `generate_ansible_template_vars`, collections should inline the necessary variables" core_version="2.23"
return _template_vars.generate_ansible_template_vars(path=path, fullpath=fullpath, dest_path=dest_path, include_ansible_managed=True)
def trust_as_template[T: str | _io.IOBase | _t.TextIO | _t.BinaryIO](value: T) -> T:
"""
Returns `value` tagged as trusted for templating.
Raises a `TypeError` if `value` is not a supported type.
"""
if isinstance(value, str):
return _tags.TrustedAsTemplate().tag(value) # type: ignore[return-value]
if isinstance(value, _io.IOBase): # covers TextIO and BinaryIO at runtime, but type checking disagrees
return _wrappers.TaggedStreamWrapper(value, _tags.TrustedAsTemplate())
raise TypeError(f"Trust cannot be applied to {_module_utils_datatag.native_type_name(value)}, only to 'str' or 'IOBase'.")
def is_trusted_as_template(value: object) -> bool:
"""
Returns `True` if `value` is a `str` or `IOBase` marked as trusted for templating, otherwise returns `False`.
Returns `False` for types which cannot be trusted for templating.
Containers are not recursed and will always return `False`.
This function should not be needed for production code, but may be useful in unit tests.
"""
return isinstance(value, _TRUSTABLE_TYPES) and _tags.TrustedAsTemplate.is_tagged_on(value)
def accept_args_markers[T: _t.Callable](plugin: T) -> T:
"""
A decorator to mark a Jinja plugin as capable of handling `Marker` values for its top-level arguments.
Non-decorated plugin invocation is skipped when a top-level argument is a `Marker`, with the first such value substituted as the plugin result.
This ensures that only plugins which understand `Marker` instances for top-level arguments will encounter them.
"""
plugin.accept_args_markers = True
return plugin
def accept_lazy_markers[T: _t.Callable](plugin: T) -> T:
"""
A decorator to mark a Jinja plugin as capable of handling `Marker` values retrieved from lazy containers.
Non-decorated plugins will trigger a `MarkerError` exception when attempting to retrieve a `Marker` from a lazy container.
This ensures that only plugins which understand lazy retrieval of `Marker` instances will encounter them.
"""
plugin.accept_lazy_markers = True
return plugin
get_first_marker_arg = _jinja_common.get_first_marker_arg
| Templar |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/configuration_qwen3_omni_moe.py | {
"start": 6545,
"end": 7775
} | class ____(PreTrainedConfig):
model_type = "qwen3_omni_moe_vision_encoder"
base_config_key = "vision_config"
def __init__(
self,
depth=27,
hidden_size=1152,
hidden_act="gelu_pytorch_tanh",
intermediate_size=4304,
num_heads=16,
in_channels=3,
patch_size=16,
spatial_merge_size=2,
temporal_patch_size=2,
out_hidden_size=3584,
num_position_embeddings=2304,
deepstack_visual_indexes=[8, 16, 24],
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.depth = depth
self.hidden_size = hidden_size
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.num_heads = num_heads
self.in_channels = in_channels
self.patch_size = patch_size
self.spatial_merge_size = spatial_merge_size
self.temporal_patch_size = temporal_patch_size
self.out_hidden_size = out_hidden_size
self.num_position_embeddings = num_position_embeddings
self.initializer_range = initializer_range
self.deepstack_visual_indexes = deepstack_visual_indexes
| Qwen3OmniMoeVisionEncoderConfig |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/core_tests/resource_tests/pythonic_resources/test_type_signatures.py | {
"start": 1028,
"end": 2272
} | class ____(ConfigurableResource):
inner: InnerResource
a_bool: bool
reveal_type(InnerResource.__init__)
reveal_type(OuterResource.__init__)
my_outer = OuterResource(inner=InnerResource(a_string="foo"), a_bool=True)
reveal_type(my_outer.inner)
"""
)
pyright_out = get_pyright_reveal_type_output(filename)
mypy_out = get_mypy_type_output(filename)
# Ensure constructor signature is correct (mypy doesn't yet support Pydantic model constructor type hints)
assert pyright_out[0] == "(self: InnerResource, *, a_string: str) -> None"
assert (
pyright_out[1] == "(self: OuterResource, *, inner: Any | PartialResource[Any],"
" a_bool: bool) -> None"
)
# Ensure that the retrieved type is the same as the type of the resource (no partial)
assert pyright_out[2] == "InnerResource"
assert mypy_out[2] == "test.InnerResource"
@pytest.mark.typesignature
def test_type_signatures_config_at_launch():
with tempfile.TemporaryDirectory() as tempdir:
filename = os.path.join(tempdir, "test.py")
with open(filename, "w") as f:
f.write(
"""
from dagster import ConfigurableResource
| OuterResource |
python | python-openxml__python-docx | src/docx/oxml/text/font.py | {
"start": 1007,
"end": 1269
} | class ____(BaseOxmlElement):
"""`<w:rFonts>` element.
Specifies typeface name for the various language types.
"""
ascii: str | None = OptionalAttribute("w:ascii", ST_String)
hAnsi: str | None = OptionalAttribute("w:hAnsi", ST_String)
| CT_Fonts |
python | huggingface__transformers | src/transformers/models/jamba/modeling_jamba.py | {
"start": 35311,
"end": 36606
} | class ____(PreTrainedModel):
config: JambaConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["JambaAttentionDecoderLayer", "JambaMambaDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_is_stateful = True
_can_record_outputs = {
"hidden_states": [JambaAttentionDecoderLayer, JambaMambaDecoderLayer],
"attentions": JambaAttention,
"router_logits": OutputRecorder(nn.Linear, layer_name="router"),
}
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, JambaMambaMixer):
A = torch.arange(1, module.ssm_state_size + 1)[None, :]
A = A.expand(module.intermediate_size, -1).contiguous()
init.copy_(module.A_log, torch.log(A))
init.ones_(module.D)
elif isinstance(module, JambaExperts):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
ALL_DECODER_LAYER_TYPES = {"attention": JambaAttentionDecoderLayer, "mamba": JambaMambaDecoderLayer}
@auto_docstring
| JambaPreTrainedModel |
python | pennersr__django-allauth | allauth/socialaccount/providers/microsoft/provider.py | {
"start": 372,
"end": 1441
} | class ____(OAuth2Provider):
id = "microsoft"
name = "Microsoft"
account_class = MicrosoftGraphAccount
oauth2_adapter_class = MicrosoftGraphOAuth2Adapter
def get_default_scope(self):
"""
Docs on Scopes and Permissions:
https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-permissions-and-consent#scopes-and-permissions
"""
return ["User.Read"]
def get_auth_params_from_request(self, request, action):
ret = super().get_auth_params_from_request(request, action)
if action == AuthAction.REAUTHENTICATE:
ret["prompt"] = "select_account"
return ret
def extract_uid(self, data):
return str(data["id"])
def extract_common_fields(self, data):
return dict(
email=data.get("mail") or data.get("userPrincipalName"),
username=data.get("mailNickname"),
last_name=data.get("surname"),
first_name=data.get("givenName"),
)
provider_classes = [MicrosoftGraphProvider]
| MicrosoftGraphProvider |
python | pytest-dev__pytest-django | tests/test_unittest.py | {
"start": 1671,
"end": 2356
} | class ____(SimpleTestCase):
"""Django test tags are converted to Pytest markers, at the class & method
levels."""
@pytest.fixture(autouse=True)
def gimme_my_markers(self, request: pytest.FixtureRequest) -> None:
self.markers = {m.name for m in request.node.iter_markers()}
@tag("tag3", "tag4") # type: ignore[misc]
def test_1(self) -> None:
assert self.markers == {"tag1", "tag2", "tag3", "tag4"}
def test_2(self) -> None:
assert self.markers == {"tag1", "tag2"}
@tag("tag5") # type: ignore[misc]
def test_3(self) -> None:
assert self.markers == {"tag1", "tag2", "tag5"}
@tag("tag1")
| TestDjangoTagsToPytestMarkers |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 479029,
"end": 479447
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field(BranchProtectionRuleConflict, graphql_name="node")
"""The item at the end of the edge."""
| BranchProtectionRuleConflictEdge |
python | getsentry__sentry | src/sentry/analytics/events/cron_monitor_created.py | {
"start": 242,
"end": 349
} | class ____(CronMonitorEvent):
pass
@analytics.eventclass("first_cron_monitor.created")
| CronMonitorCreated |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 145322,
"end": 148018
} | class ____(TestCase):
def test_even(self):
iterable = (str(x) for x in range(10))
actual = [''.join(c) for c in mi.ichunked(iterable, 5)]
expected = ['01234', '56789']
self.assertEqual(actual, expected)
def test_odd(self):
iterable = (str(x) for x in range(10))
actual = [''.join(c) for c in mi.ichunked(iterable, 4)]
expected = ['0123', '4567', '89']
self.assertEqual(actual, expected)
def test_zero(self):
iterable = []
actual = [list(c) for c in mi.ichunked(iterable, 0)]
expected = []
self.assertEqual(actual, expected)
def test_negative(self):
iterable = count()
with self.assertRaises(ValueError):
[list(c) for c in mi.ichunked(iterable, -1)]
def test_out_of_order(self):
iterable = map(str, count())
it = mi.ichunked(iterable, 4)
chunk_1 = next(it)
chunk_2 = next(it)
self.assertEqual(''.join(chunk_2), '4567')
self.assertEqual(''.join(chunk_1), '0123')
def test_laziness(self):
def gen():
yield 0
raise RuntimeError
yield from count(1)
it = mi.ichunked(gen(), 4)
chunk = next(it)
self.assertEqual(next(chunk), 0)
self.assertRaises(RuntimeError, next, it)
def test_memory_in_order(self):
gen_numbers = []
def gen():
for gen_number in count():
gen_numbers.append(gen_number)
yield gen_number
# No items should be kept in memory when a ichunked is first called
all_chunks = mi.ichunked(gen(), 4)
self.assertEqual(gen_numbers, [])
# The first item of each chunk should be generated on chunk generation
first_chunk = next(all_chunks)
self.assertEqual(gen_numbers, [0])
# If we don't read a chunk before getting its successor, its contents
# will be cached
second_chunk = next(all_chunks)
self.assertEqual(gen_numbers, [0, 1, 2, 3, 4])
# Check if we can read in cached values
self.assertEqual(list(first_chunk), [0, 1, 2, 3])
self.assertEqual(list(second_chunk), [4, 5, 6, 7])
# Again only the most recent chunk should have an item cached
third_chunk = next(all_chunks)
self.assertEqual(len(gen_numbers), 9)
# No new item should be cached when reading past the first number
next(third_chunk)
self.assertEqual(len(gen_numbers), 9)
# we should not be able to read spent chunks
self.assertEqual(list(first_chunk), [])
self.assertEqual(list(second_chunk), [])
| IchunkedTests |
python | getsentry__sentry | tests/sentry/incidents/endpoints/test_organization_alert_rule_index.py | {
"start": 5903,
"end": 6658
} | class ____(AlertRuleBase):
__test__ = Abstract(__module__, __qualname__)
endpoint = "sentry-api-0-organization-alert-rules"
def setUp(self) -> None:
super().setUp()
self.integration, _ = self.create_provider_integration_for(
self.organization,
self.user,
provider="slack",
name="Team A",
external_id="TXXXXXXX1",
metadata={"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx"},
)
self.slack_action = [
{
"type": "slack",
"targetIdentifier": "my-channel",
"targetType": "specific",
"integration": self.integration.id,
}
]
| AlertRuleIndexBase |
python | bokeh__bokeh | src/bokeh/models/widgets/inputs.py | {
"start": 10038,
"end": 10252
} | class ____(ToggleInput):
""" A checkbox widget. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| Checkbox |
python | huggingface__transformers | src/transformers/models/helium/modeling_helium.py | {
"start": 21546,
"end": 21809
} | class ____(GenericForTokenClassification, HeliumPreTrainedModel):
pass
__all__ = [
"HeliumPreTrainedModel",
"HeliumModel",
"HeliumForCausalLM",
"HeliumForSequenceClassification",
"HeliumForTokenClassification",
]
| HeliumForTokenClassification |
python | pytorch__pytorch | torch/nn/utils/parametrizations.py | {
"start": 1031,
"end": 13254
} | class ____(Module):
base: Tensor
def __init__(
self, weight, orthogonal_map: _OrthMaps, *, use_trivialization=True
) -> None:
super().__init__()
# Note [Householder complex]
# For complex tensors, it is not possible to compute the tensor `tau` necessary for
# linalg.householder_product from the reflectors.
# To see this, note that the reflectors have a shape like:
# 0 0 0
# * 0 0
# * * 0
# which, for complex matrices, give n(n-1) (real) parameters. Now, you need n^2 parameters
# to parametrize the unitary matrices. Saving tau on its own does not work either, because
# not every combination of `(A, tau)` gives a unitary matrix, meaning that if we optimise
# them as independent tensors we would not maintain the constraint
# An equivalent reasoning holds for rectangular matrices
if weight.is_complex() and orthogonal_map == _OrthMaps.householder:
raise ValueError(
"The householder parametrization does not support complex tensors."
)
self.shape = weight.shape
self.orthogonal_map = orthogonal_map
if use_trivialization:
self.register_buffer("base", None)
def forward(self, X: torch.Tensor) -> torch.Tensor:
n, k = X.size(-2), X.size(-1)
transposed = n < k
if transposed:
X = X.mT
n, k = k, n
# Here n > k and X is a tall matrix
if (
self.orthogonal_map == _OrthMaps.matrix_exp
or self.orthogonal_map == _OrthMaps.cayley
):
# We just need n x k - k(k-1)/2 parameters
X = X.tril()
if n != k:
# Embed into a square matrix
X = torch.cat(
[X, X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1
)
A = X - X.mH
# A is skew-symmetric (or skew-hermitian)
if self.orthogonal_map == _OrthMaps.matrix_exp:
Q = torch.matrix_exp(A)
elif self.orthogonal_map == _OrthMaps.cayley:
# Computes the Cayley retraction (I+A/2)(I-A/2)^{-1}
Id = torch.eye(n, dtype=A.dtype, device=A.device)
Q = torch.linalg.solve(
torch.add(Id, A, alpha=-0.5), torch.add(Id, A, alpha=0.5)
)
# Q is now orthogonal (or unitary) of size (..., n, n)
if n != k:
# pyrefly: ignore [unbound-name]
Q = Q[..., :k]
# Q is now the size of the X (albeit perhaps transposed)
else:
# X is real here, as we do not support householder with complex numbers
A = X.tril(diagonal=-1)
tau = 2.0 / (1.0 + (A * A).sum(dim=-2))
Q = torch.linalg.householder_product(A, tau)
# The diagonal of X is 1's and -1's
# We do not want to differentiate through this or update the diagonal of X hence the casting
Q = Q * X.diagonal(dim1=-2, dim2=-1).int().unsqueeze(-2)
if hasattr(self, "base"):
# pyrefly: ignore [unbound-name]
Q = self.base @ Q
if transposed:
# pyrefly: ignore [unbound-name]
Q = Q.mT
return Q # type: ignore[possibly-undefined]
@torch.autograd.no_grad()
def right_inverse(self, Q: torch.Tensor) -> torch.Tensor:
if Q.shape != self.shape:
raise ValueError(
f"Expected a matrix or batch of matrices of shape {self.shape}. "
f"Got a tensor of shape {Q.shape}."
)
Q_init = Q
n, k = Q.size(-2), Q.size(-1)
transpose = n < k
if transpose:
Q = Q.mT
n, k = k, n
# We always make sure to always copy Q in every path
if not hasattr(self, "base"):
# Note [right_inverse expm cayley]
# If we do not have use_trivialization=True, we just implement the inverse of the forward
# map for the Householder. To see why, think that for the Cayley map,
# we would need to find the matrix X \in R^{n x k} such that:
# Y = torch.cat([X.tril(), X.new_zeros(n, n - k).expand(*X.shape[:-2], -1, -1)], dim=-1)
# A = Y - Y.mH
# cayley(A)[:, :k]
# gives the original tensor. It is not clear how to do this.
# Perhaps via some algebraic manipulation involving the QR like that of
# Corollary 2.2 in Edelman, Arias and Smith?
if (
self.orthogonal_map == _OrthMaps.cayley
or self.orthogonal_map == _OrthMaps.matrix_exp
):
raise NotImplementedError(
"It is not possible to assign to the matrix exponential "
"or the Cayley parametrizations when use_trivialization=False."
)
# If parametrization == _OrthMaps.householder, make Q orthogonal via the QR decomposition.
# Here Q is always real because we do not support householder and complex matrices.
# See note [Householder complex]
A, tau = torch.geqrf(Q)
# We want to have a decomposition X = QR with diag(R) > 0, as otherwise we could
# decompose an orthogonal matrix Q as Q = (-Q)@(-Id), which is a valid QR decomposition
# The diagonal of Q is the diagonal of R from the qr decomposition
A.diagonal(dim1=-2, dim2=-1).sign_()
# Equality with zero is ok because LAPACK returns exactly zero when it does not want
# to use a particular reflection
A.diagonal(dim1=-2, dim2=-1)[tau == 0.0] *= -1
return A.mT if transpose else A
else:
if n == k:
# We check whether Q is orthogonal
if not _is_orthogonal(Q):
Q = _make_orthogonal(Q)
else: # Is orthogonal
Q = Q.clone()
else:
# Complete Q into a full n x n orthogonal matrix
N = torch.randn(
*(Q.size()[:-2] + (n, n - k)), dtype=Q.dtype, device=Q.device
)
Q = torch.cat([Q, N], dim=-1)
Q = _make_orthogonal(Q)
self.base = Q
# It is necessary to return the -Id, as we use the diagonal for the
# Householder parametrization. Using -Id makes:
# householder(torch.zeros(m,n)) == torch.eye(m,n)
# Poor man's version of eye_like
neg_Id = torch.zeros_like(Q_init)
neg_Id.diagonal(dim1=-2, dim2=-1).fill_(-1.0)
return neg_Id
def orthogonal(
module: Module,
name: str = "weight",
orthogonal_map: str | None = None,
*,
use_trivialization: bool = True,
) -> Module:
r"""Apply an orthogonal or unitary parametrization to a matrix or a batch of matrices.
Letting :math:`\mathbb{K}` be :math:`\mathbb{R}` or :math:`\mathbb{C}`, the parametrized
matrix :math:`Q \in \mathbb{K}^{m \times n}` is **orthogonal** as
.. math::
\begin{align*}
Q^{\text{H}}Q &= \mathrm{I}_n \mathrlap{\qquad \text{if }m \geq n}\\
QQ^{\text{H}} &= \mathrm{I}_m \mathrlap{\qquad \text{if }m < n}
\end{align*}
where :math:`Q^{\text{H}}` is the conjugate transpose when :math:`Q` is complex
and the transpose when :math:`Q` is real-valued, and
:math:`\mathrm{I}_n` is the `n`-dimensional identity matrix.
In plain words, :math:`Q` will have orthonormal columns whenever :math:`m \geq n`
and orthonormal rows otherwise.
If the tensor has more than two dimensions, we consider it as a batch of matrices of shape `(..., m, n)`.
The matrix :math:`Q` may be parametrized via three different ``orthogonal_map`` in terms of the original tensor:
- ``"matrix_exp"``/``"cayley"``:
the :func:`~torch.matrix_exp` :math:`Q = \exp(A)` and the `Cayley map`_
:math:`Q = (\mathrm{I}_n + A/2)(\mathrm{I}_n - A/2)^{-1}` are applied to a skew-symmetric
:math:`A` to give an orthogonal matrix.
- ``"householder"``: computes a product of Householder reflectors
(:func:`~torch.linalg.householder_product`).
``"matrix_exp"``/``"cayley"`` often make the parametrized weight converge faster than
``"householder"``, but they are slower to compute for very thin or very wide matrices.
If ``use_trivialization=True`` (default), the parametrization implements the "Dynamic Trivialization Framework",
where an extra matrix :math:`B \in \mathbb{K}^{n \times n}` is stored under
``module.parametrizations.weight[0].base``. This helps the
convergence of the parametrized layer at the expense of some extra memory use.
See `Trivializations for Gradient-Based Optimization on Manifolds`_ .
Initial value of :math:`Q`:
If the original tensor is not parametrized and ``use_trivialization=True`` (default), the initial value
of :math:`Q` is that of the original tensor if it is orthogonal (or unitary in the complex case)
and it is orthogonalized via the QR decomposition otherwise (see :func:`torch.linalg.qr`).
Same happens when it is not parametrized and ``orthogonal_map="householder"`` even when ``use_trivialization=False``.
Otherwise, the initial value is the result of the composition of all the registered
parametrizations applied to the original tensor.
.. note::
This function is implemented using the parametrization functionality
in :func:`~torch.nn.utils.parametrize.register_parametrization`.
.. _`Cayley map`: https://en.wikipedia.org/wiki/Cayley_transform#Matrix_map
.. _`Trivializations for Gradient-Based Optimization on Manifolds`: https://arxiv.org/abs/1909.09501
Args:
module (nn.Module): module on which to register the parametrization.
name (str, optional): name of the tensor to make orthogonal. Default: ``"weight"``.
orthogonal_map (str, optional): One of the following: ``"matrix_exp"``, ``"cayley"``, ``"householder"``.
Default: ``"matrix_exp"`` if the matrix is square or complex, ``"householder"`` otherwise.
use_trivialization (bool, optional): whether to use the dynamic trivialization framework.
Default: ``True``.
Returns:
The original module with an orthogonal parametrization registered to the specified
weight
Example::
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
>>> orth_linear = orthogonal(nn.Linear(20, 40))
>>> orth_linear
ParametrizedLinear(
in_features=20, out_features=40, bias=True
(parametrizations): ModuleDict(
(weight): ParametrizationList(
(0): _Orthogonal()
)
)
)
>>> # xdoctest: +IGNORE_WANT
>>> Q = orth_linear.weight
>>> torch.dist(Q.T @ Q, torch.eye(20))
tensor(4.9332e-07)
"""
weight = getattr(module, name, None)
if not isinstance(weight, Tensor):
raise ValueError(
f"Module '{module}' has no parameter or buffer with name '{name}'"
)
# We could implement this for 1-dim tensors as the maps on the sphere
# but I believe it'd bite more people than it'd help
if weight.ndim < 2:
raise ValueError(
"Expected a matrix or batch of matrices. "
f"Got a tensor of {weight.ndim} dimensions."
)
if orthogonal_map is None:
orthogonal_map = (
"matrix_exp"
if weight.size(-2) == weight.size(-1) or weight.is_complex()
else "householder"
)
orth_enum = getattr(_OrthMaps, orthogonal_map, None)
if orth_enum is None:
raise ValueError(
'orthogonal_map has to be one of "matrix_exp", "cayley", "householder". '
f"Got: {orthogonal_map}"
)
orth = _Orthogonal(weight, orth_enum, use_trivialization=use_trivialization)
parametrize.register_parametrization(module, name, orth, unsafe=True)
return module
| _Orthogonal |
python | readthedocs__readthedocs.org | readthedocs/core/migrations/0014_optout_email_build_image_deprecation.py | {
"start": 149,
"end": 1039
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("core", "0013_add_optout_email_config_file_deprecation"),
]
operations = [
migrations.AddField(
model_name="historicaluserprofile",
name="optout_email_build_image_deprecation",
field=models.BooleanField(
default=False,
null=True,
verbose_name="Opt-out from email about '\"build.image\" config key deprecation'",
),
),
migrations.AddField(
model_name="userprofile",
name="optout_email_build_image_deprecation",
field=models.BooleanField(
default=False,
null=True,
verbose_name="Opt-out from email about '\"build.image\" config key deprecation'",
),
),
]
| Migration |
python | crytic__slither | slither/detectors/variables/function_init_state_variables.py | {
"start": 1881,
"end": 4975
} | class ____(AbstractDetector):
"""
State variables initializing from an immediate function call (prior to constructor run).
"""
ARGUMENT = "function-init-state"
HELP = "Function initializing state variables"
IMPACT = DetectorClassification.INFORMATIONAL
CONFIDENCE = DetectorClassification.HIGH
WIKI = (
"https://github.com/crytic/slither/wiki/Detector-Documentation#function-initializing-state"
)
WIKI_TITLE = "Function Initializing State"
WIKI_DESCRIPTION = "Detects the immediate initialization of state variables through function calls that are not pure/constant, or that use non-constant state variable."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract StateVarInitFromFunction {
uint public v = set(); // Initialize from function (sets to 77)
uint public w = 5;
uint public x = set(); // Initialize from function (sets to 88)
address public shouldntBeReported = address(8);
constructor(){
// The constructor is run after all state variables are initialized.
}
function set() public returns(uint) {
// If this function is being used to initialize a state variable declared
// before w, w will be zero. If it is declared after w, w will be set.
if(w == 0) {
return 77;
}
return 88;
}
}
```
In this case, users might intend a function to return a value a state variable can initialize with, without realizing the context for the contract is not fully initialized.
In the example above, the same function sets two different values for state variables because it checks a state variable that is not yet initialized in one case, and is initialized in the other.
Special care must be taken when initializing state variables from an immediate function call so as not to incorrectly assume the state is initialized.
"""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Remove any initialization of state variables via non-constant state variables or function calls. If variables must be set upon contract deployment, locate initialization in the constructor instead."
def _detect(self) -> List[Output]:
"""
Detect state variables defined from an immediate function call (pre-contract deployment).
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func', 'shadow'}
"""
results = []
for contract in self.contracts:
state_variables = detect_function_init_state_vars(contract)
if state_variables:
for state_variable in state_variables:
info: DETECTOR_INFO = [
state_variable,
" is set pre-construction with a non-constant function or state variable:\n",
]
info += [f"\t- {state_variable.expression}\n"]
json = self.generate_result(info)
results.append(json)
return results
| FunctionInitializedState |
python | ray-project__ray | python/ray/air/util/object_extensions/arrow.py | {
"start": 864,
"end": 2829
} | class ____(pa.ExtensionType):
"""Defines a new Arrow extension type for Python objects.
We do not require a parametrized type, so the constructor does not
take any arguments
"""
def __init__(self) -> None:
# Defines the underlying storage type as the PyArrow LargeBinary type
super().__init__(pa.large_binary(), "ray.data.arrow_pickled_object")
def __arrow_ext_serialize__(self) -> bytes:
# Since there are no type parameters, we are free to return empty
return b""
@classmethod
def __arrow_ext_deserialize__(
cls, storage_type: pa.DataType, serialized: bytes
) -> "ArrowPythonObjectType":
return ArrowPythonObjectType()
def __arrow_ext_scalar_class__(self) -> type:
"""Returns the scalar class of the extension type. Indexing out of the
PyArrow extension array will return instances of this type.
"""
return ArrowPythonObjectScalar
def __arrow_ext_class__(self) -> type:
"""Returns the array type of the extension type. Selecting one array
out of the ChunkedArray that makes up a column in a Table with
this custom type will return an instance of this type.
"""
return ArrowPythonObjectArray
def to_pandas_dtype(self):
"""Pandas interoperability type. This describes the Pandas counterpart
to the Arrow type. See https://pandas.pydata.org/docs/development/extending.html
for more information.
"""
return ray.air.util.object_extensions.pandas.PythonObjectDtype()
def __reduce__(self):
# Earlier PyArrow versions require custom pickling behavior.
return self.__arrow_ext_deserialize__, (
self.storage_type,
self.__arrow_ext_serialize__(),
)
def __hash__(self) -> int:
return hash((type(self), self.storage_type.id, self.extension_name))
@PublicAPI(stability="alpha")
| ArrowPythonObjectType |
python | django__django | tests/postgres_tests/test_array.py | {
"start": 1669,
"end": 3352
} | class ____(PostgreSQLSimpleTestCase):
def test_get_field_display(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
models.CharField(max_length=16),
choices=[
["Media", [(["vinyl", "cd"], "Audio")]],
(("mp3", "mp4"), "Digital"),
],
)
tests = (
(["vinyl", "cd"], "Audio"),
(("mp3", "mp4"), "Digital"),
(("a", "b"), "('a', 'b')"),
(["c", "d"], "['c', 'd']"),
)
for value, display in tests:
with self.subTest(value=value, display=display):
instance = MyModel(field=value)
self.assertEqual(instance.get_field_display(), display)
def test_get_field_display_nested_array(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
ArrayField(models.CharField(max_length=16)),
choices=[
[
"Media",
[([["vinyl", "cd"], ("x",)], "Audio")],
],
((["mp3"], ("mp4",)), "Digital"),
],
)
tests = (
([["vinyl", "cd"], ("x",)], "Audio"),
((["mp3"], ("mp4",)), "Digital"),
((("a", "b"), ("c",)), "(('a', 'b'), ('c',))"),
([["a", "b"], ["c"]], "[['a', 'b'], ['c']]"),
)
for value, display in tests:
with self.subTest(value=value, display=display):
instance = MyModel(field=value)
self.assertEqual(instance.get_field_display(), display)
| BasicTests |
python | walkccc__LeetCode | solutions/386. Lexicographical Numbers/386.py | {
"start": 0,
"end": 292
} | class ____:
def lexicalOrder(self, n: int) -> list[int]:
ans = []
curr = 1
while len(ans) < n:
ans.append(curr)
if curr * 10 <= n:
curr *= 10
else:
while curr % 10 == 9 or curr == n:
curr //= 10
curr += 1
return ans
| Solution |
python | neetcode-gh__leetcode | python/0086-partition-list.py | {
"start": 0,
"end": 601
} | class ____:
def partition(self, head: Optional[ListNode], x: int) -> Optional[ListNode]:
less_head, bigger_head = ListNode(-1), ListNode(-1)
less_prev, bigger_prev = less_head, bigger_head
while head:
if head.val < x:
less_prev.next = head
less_prev = less_prev.next
else:
bigger_prev.next = head
bigger_prev = bigger_prev.next
head = head.next
less_prev.next = bigger_prev.next = None
less_prev.next = bigger_head.next
return less_head.next
| Solution |
python | huggingface__transformers | src/transformers/models/janus/modeling_janus.py | {
"start": 11298,
"end": 14674
} | class ____(nn.Module):
"""Attention Class for Janus Vision Encoder"""
def __init__(self, config: JanusVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
proj_dropout = config.projection_dropout
qk_norm = config.use_qk_norm
self.is_causal = False
# Janus has no MHA, hence for `eager_attention_forward` call setting `num_key_value_groups` to 1.
self.num_key_value_groups = 1
self.q_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=config.attention_bias)
self.projection_layer = nn.Linear(self.embed_dim, self.embed_dim)
self.projection_dropout = nn.Dropout(proj_dropout) if proj_dropout > 0 else nn.Identity()
self.q_norm = nn.LayerNorm(self.embed_dim) if qk_norm else nn.Identity()
self.k_norm = nn.LayerNorm(self.embed_dim) if qk_norm else nn.Identity()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
):
batch_size, seq_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.reshape(-1, self.num_heads, self.head_dim)
query_states = self.q_norm(query_states)
key_states = key_states.reshape(-1, self.num_heads, self.head_dim)
key_states = self.k_norm(key_states)
query_states = query_states.reshape(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.reshape(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scale,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, seq_len, self.embed_dim)
output = self.projection_layer(attn_output)
output = self.projection_dropout(output)
return output, attn_weights
| JanusVisionAttention |
python | tox-dev__tox | src/tox/tox_env/python/pip/req_file.py | {
"start": 330,
"end": 5687
} | class ____(RequirementsFile):
# these options are valid in requirements.txt, but not via pip cli and
# thus cannot be used in the testenv `deps` list
_illegal_options: Final[list[str]] = ["hash"]
def __init__(self, raw: str | list[str] | list[Requirement], root: Path) -> None:
super().__init__(root / "tox.ini", constraint=False)
got = raw if isinstance(raw, str) else "\n".join(str(i) for i in raw)
self._raw = self._normalize_raw(got)
self._unroll: tuple[list[str], list[str]] | None = None
self._req_parser_: RequirementsFile | None = None
def _extend_parser(self, parser: ArgumentParser) -> None: # noqa: PLR6301
parser.add_argument("--no-deps", action="store_true", dest="no_deps", default=False)
def _merge_option_line(self, base_opt: Namespace, opt: Namespace, filename: str) -> None:
super()._merge_option_line(base_opt, opt, filename)
if getattr(opt, "no_deps", False): # if the option comes from a requirements file this flag is missing there
base_opt.no_deps = True
def _option_to_args(self, opt: Namespace) -> list[str]:
result = super()._option_to_args(opt)
if getattr(opt, "no_deps", False):
result.append("--no-deps")
return result
@property
def _req_parser(self) -> RequirementsFile:
if self._req_parser_ is None:
self._req_parser_ = RequirementsFile(path=self._path, constraint=False)
return self._req_parser_
def _get_file_content(self, url: str) -> str:
if self._is_url_self(url):
return self._raw
return super()._get_file_content(url)
def _is_url_self(self, url: str) -> bool:
return url == str(self._path)
def _pre_process(self, content: str) -> ReqFileLines:
for at, line in super()._pre_process(content):
if line.startswith("-r") or (line.startswith("-c") and line[2].isalpha()):
found_line = f"{line[0:2]} {line[2:]}" # normalize
else:
found_line = line
yield at, found_line
def lines(self) -> list[str]:
return self._raw.splitlines()
@classmethod
def _normalize_raw(cls, raw: str) -> str:
# a line ending in an unescaped \ is treated as a line continuation and the newline following it is effectively
# ignored
raw = "".join(raw.replace("\r", "").split("\\\n"))
# for tox<4 supporting requirement/constraint files via -rreq.txt/-creq.txt
lines: list[str] = [cls._normalize_line(line) for line in raw.splitlines()]
adjusted = "\n".join(lines)
return f"{adjusted}\n" if raw.endswith("\\\n") else adjusted # preserve trailing newline if input has it
@classmethod
def _normalize_line(cls, line: str) -> str:
arg_match = next(
(
arg
for arg in ONE_ARG
if line.startswith(arg)
and len(line) > len(arg)
and not (line[len(arg)].isspace() or line[len(arg)] == "=")
),
None,
)
if arg_match is not None:
values = line[len(arg_match) :]
line = f"{arg_match} {values}"
# escape spaces
escape_match = next((e for e in ONE_ARG_ESCAPE if line.startswith(e) and line[len(e)].isspace()), None)
if escape_match is not None:
# escape not already escaped spaces
escaped = re.sub(r"(?<!\\)(\s)", r"\\\1", line[len(escape_match) + 1 :])
line = f"{line[: len(escape_match)]} {escaped}"
return line
def _parse_requirements(self, opt: Namespace, recurse: bool) -> list[ParsedRequirement]: # noqa: FBT001
# check for any invalid options in the deps list
# (requirements recursively included from other files are not checked)
requirements = super()._parse_requirements(opt, recurse)
for req in requirements:
if req.from_file != str(self.path):
continue
for illegal_option in self._illegal_options:
if req.options.get(illegal_option):
msg = f"Cannot use --{illegal_option} in deps list, it must be in requirements file. ({req})"
raise ValueError(msg)
return requirements
def unroll(self) -> tuple[list[str], list[str]]:
if self._unroll is None:
opts_dict = vars(self.options)
if not self.requirements and opts_dict:
msg = "no dependencies"
raise ValueError(msg)
result_opts: list[str] = [f"{key}={value}" for key, value in opts_dict.items()]
result_req = [str(req) for req in self.requirements]
self._unroll = result_opts, result_req
return self._unroll
def __iadd__(self, other: PythonDeps) -> PythonDeps: # noqa: PYI034
self._raw += "\n" + other._raw
return self
@classmethod
def factory(cls, root: Path, raw: object) -> PythonDeps:
if not (
isinstance(raw, str)
or (
isinstance(raw, list)
and (all(isinstance(i, str) for i in raw) or all(isinstance(i, Requirement) for i in raw))
)
):
raise TypeError(raw)
return cls(raw, root)
| PythonDeps |
python | django__django | tests/template_tests/filter_tests/test_cut.py | {
"start": 2117,
"end": 2625
} | class ____(SimpleTestCase):
def test_character(self):
self.assertEqual(cut("a string to be mangled", "a"), " string to be mngled")
def test_characters(self):
self.assertEqual(cut("a string to be mangled", "ng"), "a stri to be maled")
def test_non_matching_string(self):
self.assertEqual(
cut("a string to be mangled", "strings"), "a string to be mangled"
)
def test_non_string_input(self):
self.assertEqual(cut(123, "2"), "13")
| FunctionTests |
python | PrefectHQ__prefect | src/integrations/prefect-kubernetes/tests/test_worker.py | {
"start": 63236,
"end": 118968
} | class ____:
@pytest.fixture
async def default_configuration(self):
return await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(), {}
)
@pytest.fixture
def flow_run(self):
return FlowRun(flow_id=uuid.uuid4(), name="my-flow-run-name")
async def test_creates_job_by_building_a_manifest(
self,
default_configuration: KubernetesWorkerJobConfiguration,
flow_run,
mock_batch_client,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_completed_pod,
):
default_configuration.prepare_for_flow_run(flow_run)
expected_manifest = default_configuration.job_manifest
mock_watch.return_value.stream = mock.Mock(
side_effect=mock_pods_stream_that_returns_completed_pod
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run=flow_run, configuration=default_configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_with(
"default",
expected_manifest,
)
async def test_initiate_run_does_not_wait_for_job_completion(
self,
default_configuration: KubernetesWorkerJobConfiguration,
flow_run,
mock_batch_client,
mock_core_client,
):
"""
This test excludes the watch mock to ensure that the job is not watched.
"""
default_configuration.prepare_for_flow_run(flow_run)
expected_manifest = default_configuration.job_manifest
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker._initiate_run(
flow_run=flow_run, configuration=default_configuration
)
mock_core_client.return_value.list_namespaced_pod.assert_not_called()
mock_batch_client.return_value.create_namespaced_job.assert_called_with(
"default",
expected_manifest,
)
async def test_task_status_receives_job_pid(
self,
default_configuration: KubernetesWorkerJobConfiguration,
flow_run,
mock_batch_client,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_completed_pod,
):
mock_watch.return_value.stream = mock.Mock(
side_effect=mock_pods_stream_that_returns_completed_pod
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
fake_status = MagicMock(spec=anyio.abc.TaskStatus)
await k8s_worker.run(
flow_run=flow_run,
configuration=default_configuration,
task_status=fake_status,
)
expected_value = "mock-namespace:mock-job"
fake_status.started.assert_called_once_with(expected_value)
@pytest.mark.parametrize(
"job_name,clean_name",
[
("infra-run", "infra-run-"),
("infra-run-", "infra-run-"),
("_infra_run", "infra-run-"),
("...infra_run", "infra-run-"),
("._-infra_run", "infra-run-"),
("9infra-run", "9infra-run-"),
("-infra.run", "infra-run-"),
("infra*run", "infra-run-"),
("infra9.-foo_bar^x", "infra9-foo-bar-x-"),
],
)
async def test_job_name_creates_valid_name(
self,
default_configuration: KubernetesWorkerJobConfiguration,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_completed_pod,
mock_batch_client,
job_name,
clean_name,
):
default_configuration.name = job_name
default_configuration.prepare_for_flow_run(flow_run)
mock_watch.return_value.stream = mock.Mock(
side_effect=mock_pods_stream_that_returns_completed_pod
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run=flow_run, configuration=default_configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
call_name = mock_batch_client.return_value.create_namespaced_job.call_args[
0
][1]["metadata"]["generateName"]
assert call_name == clean_name
async def test_uses_image_variable(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_completed_pod,
mock_batch_client,
):
mock_watch.return_value.stream = mock.Mock(
side_effect=mock_pods_stream_that_returns_completed_pod
)
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
image = mock_batch_client.return_value.create_namespaced_job.call_args[0][
1
]["spec"]["template"]["spec"]["containers"][0]["image"]
assert image == "foo"
async def test_can_store_api_key_in_secret(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_completed_pod,
mock_batch_client,
enable_store_api_key_in_secret,
):
mock_watch.return_value.stream = mock.Mock(
side_effect=mock_pods_stream_that_returns_completed_pod
)
mock_core_client.return_value.read_namespaced_secret.side_effect = ApiException(
status=404
)
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
)
with temporary_settings(updates={PREFECT_API_KEY: "fake"}):
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
configuration.prepare_for_flow_run(flow_run=flow_run)
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
env = mock_batch_client.return_value.create_namespaced_job.call_args[0][
1
]["spec"]["template"]["spec"]["containers"][0]["env"]
assert {
"name": "PREFECT_API_KEY",
"valueFrom": {
"secretKeyRef": {
"name": f"prefect-{_slugify_name(k8s_worker.name)}-api-key",
"key": "value",
}
},
} in env
mock_core_client.return_value.create_namespaced_secret.assert_called_with(
namespace=configuration.namespace,
body=V1Secret(
api_version="v1",
kind="Secret",
metadata=V1ObjectMeta(
name=f"prefect-{_slugify_name(k8s_worker.name)}-api-key",
namespace=configuration.namespace,
),
data={
"value": base64.b64encode("fake".encode("utf-8")).decode(
"utf-8"
)
},
),
)
# Make sure secret gets deleted
assert await mock_core_client.return_value.delete_namespaced_secret(
name=f"prefect-{_slugify_name(k8s_worker.name)}-api-key",
namespace=configuration.namespace,
)
async def test_store_api_key_in_existing_secret(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
enable_store_api_key_in_secret,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
)
with temporary_settings(updates={PREFECT_API_KEY: "fake"}):
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
mock_core_client.return_value.read_namespaced_secret.return_value = (
V1Secret(
api_version="v1",
kind="Secret",
metadata=V1ObjectMeta(
name=f"prefect-{_slugify_name(k8s_worker.name)}-api-key",
namespace=configuration.namespace,
),
data={
"value": base64.b64encode("fake".encode("utf-8")).decode(
"utf-8"
)
},
)
)
configuration.prepare_for_flow_run(flow_run=flow_run)
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
env = mock_batch_client.return_value.create_namespaced_job.call_args[0][
1
]["spec"]["template"]["spec"]["containers"][0]["env"]
assert {
"name": "PREFECT_API_KEY",
"valueFrom": {
"secretKeyRef": {
"name": f"prefect-{_slugify_name(k8s_worker.name)}-api-key",
"key": "value",
}
},
} in env
mock_core_client.return_value.replace_namespaced_secret.assert_called_with(
name=f"prefect-{_slugify_name(k8s_worker.name)}-api-key",
namespace=configuration.namespace,
body=V1Secret(
api_version="v1",
kind="Secret",
metadata=V1ObjectMeta(
name=f"prefect-{_slugify_name(k8s_worker.name)}-api-key",
namespace=configuration.namespace,
),
data={
"value": base64.b64encode("fake".encode("utf-8")).decode(
"utf-8"
)
},
),
)
async def test_use_existing_secret_name(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
mock_api_key_secret_name_and_key: tuple[str, str],
):
mock_api_key_secret_name, mock_api_key_secret_key = (
mock_api_key_secret_name_and_key
)
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
)
with temporary_settings(updates={PREFECT_API_KEY: "fake"}):
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
mock_core_client.return_value.read_namespaced_secret.return_value = (
V1Secret(
api_version="v1",
kind="Secret",
metadata=V1ObjectMeta(
name=f"prefect-{_slugify_name(k8s_worker.name)}-api-key",
namespace=configuration.namespace,
),
data={
"value": base64.b64encode("fake".encode("utf-8")).decode(
"utf-8"
)
},
)
)
configuration.prepare_for_flow_run(flow_run=flow_run)
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
env = mock_batch_client.return_value.create_namespaced_job.call_args[0][
1
]["spec"]["template"]["spec"]["containers"][0]["env"]
assert {
"name": "PREFECT_API_KEY",
"valueFrom": {
"secretKeyRef": {
"name": mock_api_key_secret_name,
"key": mock_api_key_secret_key,
}
},
} in env
async def test_existing_secret_name_takes_precedence(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
mock_api_key_secret_name_and_key: tuple[str, str],
enable_store_api_key_in_secret,
):
mock_api_key_secret_name, mock_api_key_secret_key = (
mock_api_key_secret_name_and_key
)
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
)
with temporary_settings(updates={PREFECT_API_KEY: "fake"}):
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
mock_core_client.return_value.read_namespaced_secret.return_value = (
V1Secret(
api_version="v1",
kind="Secret",
metadata=V1ObjectMeta(
name=f"prefect-{_slugify_name(k8s_worker.name)}-api-key",
namespace=configuration.namespace,
),
data={
"value": base64.b64encode("fake".encode("utf-8")).decode(
"utf-8"
)
},
)
)
configuration.prepare_for_flow_run(flow_run=flow_run)
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
env = mock_batch_client.return_value.create_namespaced_job.call_args[0][
1
]["spec"]["template"]["spec"]["containers"][0]["env"]
assert {
"name": "PREFECT_API_KEY",
"valueFrom": {
"secretKeyRef": {
"name": mock_api_key_secret_name,
"key": mock_api_key_secret_key,
}
},
} in env
mock_core_client.return_value.replace_namespaced_secret.assert_not_called()
async def test_use_existing_auth_string_secret_name(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
mock_api_auth_string_secret_name_and_key: tuple[str, str],
):
mock_api_auth_string_secret_name, mock_api_auth_string_secret_key = (
mock_api_auth_string_secret_name_and_key
)
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
)
with temporary_settings(updates={PREFECT_API_AUTH_STRING: "fake"}):
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
mock_core_client.return_value.read_namespaced_secret.return_value = V1Secret(
api_version="v1",
kind="Secret",
metadata=V1ObjectMeta(
name=f"prefect-{_slugify_name(k8s_worker.name)}-api-auth-string",
namespace=configuration.namespace,
),
data={
"value": base64.b64encode("fake".encode("utf-8")).decode(
"utf-8"
)
},
)
configuration.prepare_for_flow_run(flow_run=flow_run)
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
env = mock_batch_client.return_value.create_namespaced_job.call_args[0][
1
]["spec"]["template"]["spec"]["containers"][0]["env"]
assert {
"name": "PREFECT_API_AUTH_STRING",
"valueFrom": {
"secretKeyRef": {
"name": mock_api_auth_string_secret_name,
"key": mock_api_auth_string_secret_key,
}
},
} in env
async def test_logs_a_warning_if_api_auth_string_is_set_but_no_secret_name_or_key_is_provided(
self,
flow_run,
mock_core_client,
caplog,
):
with temporary_settings(updates={PREFECT_API_AUTH_STRING: "fake"}):
configuration = (
await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
)
)
configuration.prepare_for_flow_run(flow_run=flow_run)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
assert (
"PREFECT_API_AUTH_STRING is set, but no secret name or key is provided"
in caplog.text
)
async def test_create_job_failure(
self,
flow_run,
mock_core_client,
mock_watch,
mock_batch_client,
):
response = MagicMock()
response.data = json.dumps(
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": 'jobs.batch is forbidden: User "system:serviceaccount:helm-test:prefect-worker-dev" cannot create resource "jobs" in API group "batch" in the namespace "prefect"',
"reason": "Forbidden",
"details": {"group": "batch", "kind": "jobs"},
"code": 403,
}
)
response.status = 403
response.reason = "Forbidden"
mock_batch_client.return_value.create_namespaced_job.side_effect = ApiException(
http_resp=response
)
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
with pytest.raises(
InfrastructureError,
match=re.escape(
"Unable to create Kubernetes job: Forbidden: jobs.batch is forbidden: User "
'"system:serviceaccount:helm-test:prefect-worker-dev" cannot '
'create resource "jobs" in API group "batch" in the namespace '
'"prefect"'
),
):
await k8s_worker.run(flow_run, configuration)
async def test_create_job_retries(
self,
flow_run,
mock_core_client,
mock_watch,
mock_batch_client,
):
MAX_ATTEMPTS = 3
response = MagicMock()
response.data = json.dumps(
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": 'jobs.batch is forbidden: User "system:serviceaccount:helm-test:prefect-worker-dev" cannot create resource "jobs" in API group "batch" in the namespace "prefect"',
"reason": "Forbidden",
"details": {"group": "batch", "kind": "jobs"},
"code": 403,
}
)
response.status = 403
response.reason = "Forbidden"
mock_batch_client.return_value.create_namespaced_job.side_effect = ApiException(
http_resp=response
)
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
with pytest.raises(
InfrastructureError,
match=re.escape(
"Unable to create Kubernetes job: Forbidden: jobs.batch is forbidden: User "
'"system:serviceaccount:helm-test:prefect-worker-dev" cannot '
'create resource "jobs" in API group "batch" in the namespace '
'"prefect"'
),
):
await k8s_worker.run(flow_run, configuration)
assert (
mock_batch_client.return_value.create_namespaced_job.call_count
== MAX_ATTEMPTS
)
async def test_create_job_failure_no_reason(
self,
flow_run,
mock_core_client,
mock_watch,
mock_batch_client,
):
response = MagicMock()
response.data = json.dumps(
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"message": 'jobs.batch is forbidden: User "system:serviceaccount:helm-test:prefect-worker-dev" cannot create resource "jobs" in API group "batch" in the namespace "prefect"',
"reason": "Forbidden",
"details": {"group": "batch", "kind": "jobs"},
"code": 403,
}
)
response.status = 403
response.reason = None
mock_batch_client.return_value.create_namespaced_job.side_effect = ApiException(
http_resp=response
)
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
with pytest.raises(
InfrastructureError,
match=re.escape(
"Unable to create Kubernetes job: jobs.batch is forbidden: User "
'"system:serviceaccount:helm-test:prefect-worker-dev" cannot '
'create resource "jobs" in API group "batch" in the namespace '
'"prefect"'
),
):
await k8s_worker.run(flow_run, configuration)
async def test_create_job_failure_no_message(
self,
flow_run,
mock_core_client,
mock_watch,
mock_batch_client,
):
response = MagicMock()
response.data = json.dumps(
{
"kind": "Status",
"apiVersion": "v1",
"metadata": {},
"status": "Failure",
"reason": "Forbidden",
"details": {"group": "batch", "kind": "jobs"},
"code": 403,
}
)
response.status = 403
response.reason = "Test"
mock_batch_client.return_value.create_namespaced_job.side_effect = ApiException(
http_resp=response
)
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
with pytest.raises(
InfrastructureError,
match=re.escape("Unable to create Kubernetes job: Test"),
):
await k8s_worker.run(flow_run, configuration)
async def test_create_job_failure_no_response_body(
self,
flow_run,
mock_core_client,
mock_watch,
mock_batch_client,
):
response = MagicMock()
response.data = None
response.status = 403
response.reason = "Test"
mock_batch_client.return_value.create_namespaced_job.side_effect = ApiException(
http_resp=response
)
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(), {"image": "foo"}
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
with pytest.raises(
InfrastructureError,
match=re.escape("Unable to create Kubernetes job: Test"),
):
await k8s_worker.run(flow_run, configuration)
async def test_allows_image_setting_from_manifest(
self,
default_configuration: KubernetesWorkerJobConfiguration,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
default_configuration.job_manifest["spec"]["template"]["spec"]["containers"][0][
"image"
] = "test"
default_configuration.prepare_for_flow_run(flow_run)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, default_configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
image = mock_batch_client.return_value.create_namespaced_job.call_args[0][
1
]["spec"]["template"]["spec"]["containers"][0]["image"]
assert image == "test"
async def test_uses_labels_setting(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(),
{"labels": {"foo": "foo", "bar": "bar"}},
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
labels = mock_batch_client.return_value.create_namespaced_job.call_args[0][
1
]["metadata"]["labels"]
assert labels["foo"] == "foo"
assert labels["bar"] == "bar"
async def test_sets_environment_variables(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(),
{"env": {"foo": "FOO", "bar": "BAR"}},
)
configuration.prepare_for_flow_run(flow_run)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
manifest = mock_batch_client.return_value.create_namespaced_job.call_args[
0
][1]
pod = manifest["spec"]["template"]["spec"]
env = pod["containers"][0]["env"]
assert env == [
{"name": key, "value": value}
for key, value in {
**configuration._base_environment(),
**configuration._base_flow_run_environment(flow_run),
"foo": "FOO",
"bar": "BAR",
"PREFECT_FLOW_RUN_EXECUTE_SIGTERM_BEHAVIOR": "reschedule",
}.items()
]
async def test_does_not_overwrite_sigterm_behavior_env(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(),
{"env": {"PREFECT_FLOW_RUN_EXECUTE_SIGTERM_BEHAVIOR": "die"}},
)
configuration.prepare_for_flow_run(flow_run)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
manifest = mock_batch_client.return_value.create_namespaced_job.call_args[
0
][1]
pod = manifest["spec"]["template"]["spec"]
env = pod["containers"][0]["env"]
assert env == [
{"name": key, "value": value}
for key, value in {
**configuration._base_environment(),
**configuration._base_flow_run_environment(flow_run),
"PREFECT_FLOW_RUN_EXECUTE_SIGTERM_BEHAVIOR": "die",
}.items()
]
async def test_uses_custom_env_list_from_base_template(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
# Create a custom base job template with list-style env
custom_base_template = KubernetesWorker.get_default_base_job_template()
custom_base_template["job_configuration"]["job_manifest"]["spec"]["template"][
"spec"
]["containers"][0]["env"] = [
{"name": "MYENV", "value": "foobarbaz"},
{
"name": "MYENVFROM",
"valueFrom": {"secretKeyRef": {"name": "something", "key": "SECRET"}},
},
]
# Create a KubernetesWorkerJobConfiguration using the custom template
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
custom_base_template,
{},
)
configuration.prepare_for_flow_run(flow_run)
# Run the worker with this configuration
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
created_job = mock_batch_client.return_value.create_namespaced_job.call_args[0][
1
]
created_env = created_job["spec"]["template"]["spec"]["containers"][0]["env"]
# Check if the custom environment variables are present
assert any(
env
for env in created_env
if env["name"] == "MYENV" and env["value"] == "foobarbaz"
)
assert any(
env
for env in created_env
if env["name"] == "MYENVFROM"
and env["valueFrom"]["secretKeyRef"]["name"] == "something"
and env["valueFrom"]["secretKeyRef"]["key"] == "SECRET"
)
assert any(env for env in created_env if env["name"] == "PREFECT__FLOW_RUN_ID")
async def test_merges_env_list_from_work_pool_and_deployment(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
):
"""Test that environment variables in list format from work pool and deployment are merged.
Regression test for https://github.com/PrefectHQ/prefect/issues/17406
"""
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
custom_base_template = KubernetesWorker.get_default_base_job_template()
custom_base_template["job_configuration"]["env"] = [
{"name": "WORK_POOL_ENV", "value": "work_pool_value"},
{
"name": "WORK_POOL_SECRET",
"valueFrom": {
"secretKeyRef": {"name": "work-pool-secret", "key": "SECRET_KEY"}
},
},
]
deployment_env = [{"name": "DEPLOYMENT_ENV", "value": "deployment_value"}]
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
custom_base_template,
{"env": deployment_env},
)
configuration.prepare_for_flow_run(flow_run)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
created_job = mock_batch_client.return_value.create_namespaced_job.call_args[0][
1
]
created_env = created_job["spec"]["template"]["spec"]["containers"][0]["env"]
# make sure both work pool and deployment env vars are present
assert any(
env
for env in created_env
if env["name"] == "WORK_POOL_ENV" and env["value"] == "work_pool_value"
)
assert any(
env
for env in created_env
if env["name"] == "WORK_POOL_SECRET"
and env["valueFrom"]["secretKeyRef"]["name"] == "work-pool-secret"
and env["valueFrom"]["secretKeyRef"]["key"] == "SECRET_KEY"
)
assert any(
env
for env in created_env
if env["name"] == "DEPLOYMENT_ENV" and env["value"] == "deployment_value"
)
# Also check that standard Prefect env vars are present
assert any(env for env in created_env if env["name"] == "PREFECT__FLOW_RUN_ID")
async def test_allows_unsetting_environment_variables(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(),
{"env": {"PREFECT_TEST_MODE": None}},
)
configuration.prepare_for_flow_run(flow_run)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
manifest = mock_batch_client.return_value.create_namespaced_job.call_args[
0
][1]
pod = manifest["spec"]["template"]["spec"]
env = pod["containers"][0]["env"]
env_names = {variable["name"] for variable in env}
assert "PREFECT_TEST_MODE" not in env_names
async def test_env_vars_from_work_pool_not_duplicated(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
):
"""Test that environment variables set in work pool are not duplicated.
Regression test for https://github.com/PrefectHQ/prefect/issues/19167
"""
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
# Create configuration with env vars set as work pool variables
# (this is what happens when you set env vars in the work pool UI)
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(),
{"env": [{"name": "TEST_VALUE", "value": "1"}]},
)
configuration.prepare_for_flow_run(flow_run)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
manifest = mock_batch_client.return_value.create_namespaced_job.call_args[
0
][1]
pod = manifest["spec"]["template"]["spec"]
env = pod["containers"][0]["env"]
# Count how many times TEST_VALUE appears
test_value_entries = [e for e in env if e.get("name") == "TEST_VALUE"]
assert len(test_value_entries) == 1, (
f"Expected TEST_VALUE to appear once, but it appeared "
f"{len(test_value_entries)} times: {test_value_entries}"
)
assert test_value_entries[0]["value"] == "1"
@pytest.mark.parametrize(
"given,expected",
[
("a-valid-dns-subdomain1/and-a-name", "a-valid-dns-subdomain1/and-a-name"),
(
"a-prefix-with-invalid$@*^$@-characters/and-a-name",
"a-prefix-with-invalid-characters/and-a-name",
),
(
"a-name-with-invalid$@*^$@-characters",
"a-name-with-invalid-characters",
),
("/a-name-that-starts-with-slash", "a-name-that-starts-with-slash"),
("a-prefix/and-a-name/-with-a-slash", "a-prefix/and-a-name-with-a-slash"),
(
"_a-name-that-starts-with-underscore",
"a-name-that-starts-with-underscore",
),
("-a-name-that-starts-with-dash", "a-name-that-starts-with-dash"),
(".a-name-that-starts-with-period", "a-name-that-starts-with-period"),
("a-name-that-ends-with-underscore_", "a-name-that-ends-with-underscore"),
("a-name-that-ends-with-dash-", "a-name-that-ends-with-dash"),
("a-name-that-ends-with-period.", "a-name-that-ends-with-period"),
(
"._.-a-name-with-trailing-leading-chars-__-.",
"a-name-with-trailing-leading-chars",
),
("a-prefix/and-a-name/-with-a-slash", "a-prefix/and-a-name-with-a-slash"),
# Truncation of the prefix
("a" * 300 + "/and-a-name", "a" * 253 + "/and-a-name"),
# Truncation of the name
("a" * 300, "a" * 63),
# Truncation of the prefix and name together
("a" * 300 + "/" + "b" * 100, "a" * 253 + "/" + "b" * 63),
# All invalid passes through
("$@*^$@", "$@*^$@"),
# All invalid passes through for prefix
("$@*^$@/name", "$@*^$@/name"),
],
)
async def test_sanitizes_user_label_keys(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
given,
expected,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(),
{
"labels": {given: "foo"},
},
)
configuration.prepare_for_flow_run(flow_run)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
labels = mock_batch_client.return_value.create_namespaced_job.call_args[0][
1
]["metadata"]["labels"]
assert labels[expected] == "foo"
@pytest.mark.parametrize(
"given,expected",
[
("valid-label-text", "valid-label-text"),
(
"text-with-invalid$@*^$@-characters",
"text-with-invalid-characters",
),
("_value-that-starts-with-underscore", "value-that-starts-with-underscore"),
("-value-that-starts-with-dash", "value-that-starts-with-dash"),
(".value-that-starts-with-period", "value-that-starts-with-period"),
("value-that-ends-with-underscore_", "value-that-ends-with-underscore"),
("value-that-ends-with-dash-", "value-that-ends-with-dash"),
("value-that-ends-with-period.", "value-that-ends-with-period"),
(
"._.-value-with-trailing-leading-chars-__-.",
"value-with-trailing-leading-chars",
),
# Truncation
("a" * 100, "a" * 63),
# All invalid passes through
("$@*^$@", "$@*^$@"),
# Uppercase
("VALUE-THAT-IS-UPPERCASE", "VALUE-THAT-IS-UPPERCASE"),
],
)
async def test_sanitizes_user_label_values(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
given,
expected,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(),
{"labels": {"foo": given}},
)
configuration.prepare_for_flow_run(flow_run)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
labels = mock_batch_client.return_value.create_namespaced_job.call_args[0][
1
]["metadata"]["labels"]
assert labels["foo"] == expected
async def test_uses_namespace_setting(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(),
{"namespace": "foo"},
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
namespace = mock_batch_client.return_value.create_namespaced_job.call_args[
0
][1]["metadata"]["namespace"]
assert namespace == "foo"
async def test_allows_namespace_setting_from_manifest(
self,
flow_run,
default_configuration,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
default_configuration.job_manifest["metadata"]["namespace"] = "test"
default_configuration.prepare_for_flow_run(flow_run)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, default_configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
namespace = mock_batch_client.return_value.create_namespaced_job.call_args[
0
][1]["metadata"]["namespace"]
assert namespace == "test"
async def test_uses_service_account_name_setting(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(),
{"service_account_name": "foo"},
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
service_account_name = (
mock_batch_client.return_value.create_namespaced_job.call_args[0][1][
"spec"
]["template"]["spec"]["serviceAccountName"]
)
assert service_account_name == "foo"
async def test_uses_finished_job_ttl_setting(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(),
{"finished_job_ttl": 123},
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
finished_job_ttl = (
mock_batch_client.return_value.create_namespaced_job.call_args[0][1][
"spec"
]["ttlSecondsAfterFinished"]
)
assert finished_job_ttl == 123
async def test_uses_specified_image_pull_policy(
self,
flow_run,
mock_core_client,
mock_watch,
mock_pods_stream_that_returns_running_pod,
mock_batch_client,
):
mock_watch.return_value.stream = mock_pods_stream_that_returns_running_pod
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(),
{"image_pull_policy": "IfNotPresent"},
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, configuration)
mock_batch_client.return_value.create_namespaced_job.assert_called_once()
call_image_pull_policy = (
mock_batch_client.return_value.create_namespaced_job.call_args[0][1][
"spec"
]["template"]["spec"]["containers"][0].get("imagePullPolicy")
)
assert call_image_pull_policy == "IfNotPresent"
@pytest.mark.usefixtures("mock_core_client_lean", "mock_cluster_config")
async def test_keepalive_enabled(
self,
):
configuration = await KubernetesWorkerJobConfiguration.from_template_and_values(
KubernetesWorker.get_default_base_job_template(),
{"image": "foo"},
)
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
async with k8s_worker._get_configured_kubernetes_client(
configuration
) as client:
assert (
client.rest_client.pool_manager._request_class
is KeepAliveClientRequest
)
async def test_defaults_to_incluster_config(
self,
flow_run,
default_configuration,
mock_core_client_lean,
mock_watch,
mock_cluster_config,
mock_batch_client,
mock_job,
mock_pod,
):
async def mock_stream(*args, **kwargs):
if kwargs["func"] == mock_core_client_lean.return_value.list_namespaced_pod:
yield {"object": mock_pod, "type": "MODIFIED"}
if kwargs["func"] == mock_core_client_lean.return_value.list_namespaced_job:
mock_job.status.completion_time = now("UTC").timestamp()
yield {"object": mock_job, "type": "MODIFIED"}
mock_watch.return_value.stream = mock_stream
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, default_configuration)
mock_cluster_config.load_incluster_config.assert_called_once()
assert not mock_cluster_config.load_kube_config_from_dict.called
async def test_uses_cluster_config_if_not_in_cluster(
self,
flow_run,
default_configuration,
mock_watch,
mock_cluster_config,
mock_batch_client,
mock_core_client_lean,
mock_job,
mock_pod,
):
async def mock_stream(*args, **kwargs):
if kwargs["func"] == mock_core_client_lean.return_value.list_namespaced_pod:
yield {"object": mock_pod, "type": "MODIFIED"}
if kwargs["func"] == mock_core_client_lean.return_value.list_namespaced_job:
mock_job.status.completion_time = now("UTC").timestamp()
yield {"object": mock_job, "type": "MODIFIED"}
mock_watch.return_value.stream = mock_stream
mock_cluster_config.load_incluster_config.side_effect = ConfigException()
async with KubernetesWorker(work_pool_name="test") as k8s_worker:
await k8s_worker.run(flow_run, default_configuration)
mock_cluster_config.new_client_from_config.assert_called_once()
class TestSubmit:
@pytest.fixture
async def work_pool(self):
async with prefect.get_client() as client:
work_pool = await client.create_work_pool(
WorkPoolCreate(
name=f"test-{uuid.uuid4()}",
base_job_template=KubernetesWorker.get_default_base_job_template(),
)
)
try:
yield work_pool
finally:
await client.delete_work_pool(work_pool.name)
@pytest.fixture(autouse=True)
async def mock_steps(self, work_pool: WorkPool):
UPLOAD_STEP = {
"prefect_mock.experimental.bundles.upload": {
"requires": "prefect-mock==0.5.5",
"bucket": "test-bucket",
"credentials_block_name": "my-creds",
}
}
EXECUTE_STEP = {
"prefect_mock.experimental.bundles.execute": {
"requires": "prefect-mock==0.5.5",
"bucket": "test-bucket",
"credentials_block_name": "my-creds",
}
}
async with prefect.get_client() as client:
await client.update_work_pool(
work_pool.name,
WorkPoolUpdate(
storage_configuration=WorkPoolStorageConfiguration(
bundle_execution_step=EXECUTE_STEP,
bundle_upload_step=UPLOAD_STEP,
),
),
)
@pytest.fixture
def test_flow(self):
@prefect.flow
def my_flow():
return "Hello, world!"
return my_flow
async def test_submit_adhoc_run(
self,
mock_batch_client,
mock_core_client,
default_configuration,
test_flow,
mock_run_process: AsyncMock,
work_pool: WorkPool,
monkeypatch: pytest.MonkeyPatch,
):
frozen_uuid = uuid.uuid4()
monkeypatch.setattr(uuid, "uuid4", lambda: frozen_uuid)
python_version_info = sys.version_info
async with KubernetesWorker(work_pool_name=work_pool.name) as k8s_worker:
future = await k8s_worker.submit(test_flow)
assert isinstance(future, PrefectFlowRunFuture)
expected_upload_command = [
"uv",
"run",
"--quiet",
"--with",
"prefect-mock==0.5.5",
"--python",
f"{python_version_info.major}.{python_version_info.minor}",
"-m",
"prefect_mock.experimental.bundles.upload",
"--bucket",
"test-bucket",
"--credentials-block-name",
"my-creds",
"--key",
str(frozen_uuid),
str(frozen_uuid),
]
mock_run_process.assert_called_once_with(
expected_upload_command,
cwd=ANY,
)
expected_execute_command = [
"uv",
"run",
"--with",
"prefect-mock==0.5.5",
"--python",
f"{python_version_info.major}.{python_version_info.minor}",
"-m",
"prefect_mock.experimental.bundles.execute",
"--bucket",
"test-bucket",
"--credentials-block-name",
"my-creds",
"--key",
str(frozen_uuid),
]
async with prefect.get_client() as client:
flow_run = await client.read_flow_run(future.flow_run_id)
assert flow_run.work_pool_name == work_pool.name
assert flow_run.work_queue_name == "default"
assert flow_run.job_variables == {
"command": " ".join(expected_execute_command)
}
async def test_submit_adhoc_run_failed_submission(
self,
mock_batch_client,
mock_core_client,
default_configuration,
test_flow,
mock_run_process: AsyncMock,
work_pool: WorkPool,
):
response = MagicMock()
response.data = None
response.status = 403
response.reason = "Test"
mock_batch_client.return_value.create_namespaced_job.side_effect = (
ApiException(http_resp=response)
)
async with KubernetesWorker(work_pool_name=work_pool.name) as k8s_worker:
future = await k8s_worker.submit(test_flow)
assert isinstance(future, PrefectFlowRunFuture)
async with prefect.get_client() as client:
flow_run = await client.read_flow_run(future.flow_run_id)
assert flow_run.state.is_crashed()
| TestKubernetesWorker |
python | streamlit__streamlit | lib/streamlit/vendor/pympler/asizeof.py | {
"start": 47398,
"end": 49007
} | class ____(object):
"""Internal type profile class."""
high = 0 # largest size
number = 0 # number of (unique) objects
objref = None # largest obj (weakref)
total = 0 # total size
weak = False # objref is weakref(obj)
def __cmp__(self, other):
if self.total < other.total:
return -1
elif self.total > other.total:
return +1
elif self.number < other.number:
return -1
elif self.number > other.number:
return +1
return 0
def __lt__(self, other): # for Python 3+
return self.__cmp__(other) < 0
def format(self, clip=0, grand=None):
"""Return format dict."""
if self.number > 1: # avg., plural
a, p = int(self.total / self.number), "s"
else:
a, p = self.total, _NN
o = self.objref
if self.weak:
o = o()
t = _SI2(self.total)
if grand:
t += " (%s)" % _p100(self.total, grand, prec=0)
return dict(
avg=_SI2(a),
high=_SI2(self.high),
lengstr=_lengstr(o),
obj=_repr(o, clip=clip),
plural=p,
total=t,
)
def update(self, obj, size):
"""Update this profile."""
self.number += 1
self.total += size
if self.high < size: # largest
self.high = size
try: # prefer using weak ref
self.objref, self.weak = Weakref.ref(obj), True
except TypeError:
self.objref, self.weak = obj, False
| _Prof |
python | catalyst-team__catalyst | catalyst/core/logger.py | {
"start": 130,
"end": 2607
} | class ____:
"""An abstraction that syncs experiment run with monitoring tools.
Args:
log_batch_metrics: boolean flag to log batch metrics.
log_epoch_metrics: boolean flag to log epoch metrics.
Abstraction, please check out implementations for more details:
- :py:mod:`catalyst.loggers.console.ConsoleLogger`
- :py:mod:`catalyst.loggers.mlflow.MLflowLogger`
- :py:mod:`catalyst.loggers.neptune.NeptuneLogger`
- :py:mod:`catalyst.loggers.tensorboard.TensorboardLogger`
"""
def __init__(self, log_batch_metrics: bool, log_epoch_metrics: bool) -> None:
self._log_batch_metrics = log_batch_metrics
self._log_epoch_metrics = log_epoch_metrics
@property
def logger(self) -> Any:
"""Internal logger/experiment/etc. from the monitoring system. # noqa: DAR401
Returns: # noqa: DAR201, DAR202
Any: internal logger/experiment/etc. from the monitoring system.
"""
raise NotImplementedError()
@property
def log_batch_metrics(self) -> bool:
"""Boolean flag to log batch metrics.
Returns:
bool: boolean flag to log batch metrics.
"""
return self._log_batch_metrics
@property
def log_epoch_metrics(self) -> bool:
"""Boolean flag to log epoch metrics.
Returns:
bool: boolean flag to log epoch metrics.
"""
return self._log_epoch_metrics
def log_artifact(
self,
tag: str,
runner: "IRunner",
artifact: object = None,
path_to_artifact: str = None,
scope: str = None,
) -> None:
"""Logs artifact (arbitrary file like audio, video, etc) to the logger."""
pass
def log_image(
self,
tag: str,
image: np.ndarray,
runner: "IRunner",
scope: str = None,
) -> None:
"""Logs image to the logger."""
pass
def log_hparams(self, hparams: Dict, runner: "IRunner" = None) -> None:
"""Logs hyperparameters to the logger."""
pass
def log_metrics(
self,
metrics: Dict[str, float],
scope: str,
runner: "IRunner",
) -> None:
"""Logs metrics to the logger."""
pass
def flush_log(self) -> None:
"""Flushes the logger."""
pass
def close_log(self) -> None:
"""Closes the logger."""
pass
__all__ = ["ILogger"]
| ILogger |
python | django__django | tests/template_tests/filter_tests/test_stringformat.py | {
"start": 170,
"end": 1113
} | class ____(SimpleTestCase):
"""
Notice that escaping is applied *after* any filters, so the string
formatting here only needs to deal with pre-escaped characters.
"""
@setup(
{
"stringformat01": (
'{% autoescape off %}.{{ a|stringformat:"5s" }}. .'
'{{ b|stringformat:"5s" }}.{% endautoescape %}'
)
}
)
def test_stringformat01(self):
output = self.engine.render_to_string(
"stringformat01", {"a": "a<b", "b": mark_safe("a<b")}
)
self.assertEqual(output, ". a<b. . a<b.")
@setup(
{"stringformat02": '.{{ a|stringformat:"5s" }}. .{{ b|stringformat:"5s" }}.'}
)
def test_stringformat02(self):
output = self.engine.render_to_string(
"stringformat02", {"a": "a<b", "b": mark_safe("a<b")}
)
self.assertEqual(output, ". a<b. . a<b.")
| StringformatTests |
python | pydantic__pydantic | pydantic-core/tests/test_schema_functions.py | {
"start": 616,
"end": 15059
} | class ____(int, Enum):
a = 1
b = 2
def ids_function(val):
if callable(val):
return val.__name__
elif isinstance(val, tuple) and len(val) == 2:
return '({})'.format(', '.join([repr(a) for a in val[0]] + [f'{k}={v!r}' for k, v in val[1].items()]))
else:
return repr(val)
def args(*args, **kwargs):
return args, kwargs
all_schema_functions = [
(core_schema.any_schema, args(), {'type': 'any'}),
(core_schema.any_schema, args(metadata=['foot', 'spa']), {'type': 'any', 'metadata': ['foot', 'spa']}),
(core_schema.none_schema, args(), {'type': 'none'}),
(core_schema.bool_schema, args(), {'type': 'bool'}),
(core_schema.bool_schema, args(strict=True), {'type': 'bool', 'strict': True}),
(core_schema.int_schema, args(), {'type': 'int'}),
(core_schema.int_schema, args(metadata={'fred'}), {'type': 'int', 'metadata': {'fred'}}),
(core_schema.int_schema, args(multiple_of=5, gt=10, lt=20), {'type': 'int', 'multiple_of': 5, 'gt': 10, 'lt': 20}),
(core_schema.float_schema, args(), {'type': 'float'}),
(core_schema.float_schema, args(multiple_of=5, gt=1.2), {'type': 'float', 'multiple_of': 5, 'gt': 1.2}),
(core_schema.str_schema, args(), {'type': 'str'}),
(core_schema.str_schema, args(min_length=5, max_length=10), {'type': 'str', 'min_length': 5, 'max_length': 10}),
(core_schema.bytes_schema, args(), {'type': 'bytes'}),
(core_schema.bytes_schema, args(min_length=5, ref='xx'), {'type': 'bytes', 'min_length': 5, 'ref': 'xx'}),
(core_schema.date_schema, args(), {'type': 'date'}),
(core_schema.date_schema, args(gt=date(2020, 1, 1)), {'type': 'date', 'gt': date(2020, 1, 1)}),
(core_schema.time_schema, args(), {'type': 'time', 'microseconds_precision': 'truncate'}),
(core_schema.datetime_schema, args(), {'type': 'datetime', 'microseconds_precision': 'truncate'}),
(core_schema.timedelta_schema, args(), {'type': 'timedelta', 'microseconds_precision': 'truncate'}),
(
core_schema.time_schema,
args(microseconds_precision='error'),
{'type': 'time', 'microseconds_precision': 'error'},
),
(
core_schema.datetime_schema,
args(microseconds_precision='error'),
{'type': 'datetime', 'microseconds_precision': 'error'},
),
(
core_schema.timedelta_schema,
args(microseconds_precision='error'),
{'type': 'timedelta', 'microseconds_precision': 'error'},
),
(core_schema.literal_schema, args(['a', 'b']), {'type': 'literal', 'expected': ['a', 'b']}),
(core_schema.missing_sentinel_schema, args(), {'type': 'missing-sentinel'}),
(
core_schema.enum_schema,
args(MyEnum, list(MyEnum.__members__.values())),
{'type': 'enum', 'cls': MyEnum, 'members': [MyEnum.a, MyEnum.b]},
),
(core_schema.is_instance_schema, args(int), {'type': 'is-instance', 'cls': int}),
(core_schema.callable_schema, args(), {'type': 'callable'}),
(core_schema.list_schema, args(), {'type': 'list'}),
(core_schema.list_schema, args({'type': 'int'}), {'type': 'list', 'items_schema': {'type': 'int'}}),
(core_schema.tuple_schema, args([]), {'type': 'tuple', 'items_schema': []}),
(
core_schema.set_schema,
args({'type': 'int'}, min_length=4),
{'type': 'set', 'items_schema': {'type': 'int'}, 'min_length': 4},
),
(
core_schema.frozenset_schema,
args({'type': 'int'}, max_length=5),
{'type': 'frozenset', 'items_schema': {'type': 'int'}, 'max_length': 5},
),
(core_schema.generator_schema, args({'type': 'int'}), {'type': 'generator', 'items_schema': {'type': 'int'}}),
(core_schema.dict_schema, args(), {'type': 'dict'}),
(
core_schema.dict_schema,
args({'type': 'str'}, {'type': 'int'}),
{'type': 'dict', 'keys_schema': {'type': 'str'}, 'values_schema': {'type': 'int'}},
),
(
core_schema.with_info_before_validator_function,
args(val_function, {'type': 'int'}),
{
'type': 'function-before',
'function': {'type': 'with-info', 'function': val_function},
'schema': {'type': 'int'},
},
),
(
core_schema.with_info_after_validator_function,
args(val_function, {'type': 'int'}),
{
'type': 'function-after',
'function': {'type': 'with-info', 'function': val_function},
'schema': {'type': 'int'},
},
),
(
core_schema.with_info_wrap_validator_function,
args(val_function, {'type': 'int'}),
{
'type': 'function-wrap',
'function': {'type': 'with-info', 'function': val_function},
'schema': {'type': 'int'},
},
),
(
core_schema.with_info_plain_validator_function,
args(val_function),
core_schema.with_info_plain_validator_function(val_function),
),
(
core_schema.with_default_schema,
args({'type': 'int'}, default=5),
{'type': 'default', 'schema': {'type': 'int'}, 'default': 5},
),
(
core_schema.with_default_schema,
args({'type': 'int'}, default=None),
{'type': 'default', 'schema': {'type': 'int'}, 'default': None},
),
(
core_schema.with_default_schema,
args({'type': 'int'}, default_factory=make_5),
{'type': 'default', 'schema': {'type': 'int'}, 'default_factory': make_5},
),
(core_schema.nullable_schema, args({'type': 'int'}), {'type': 'nullable', 'schema': {'type': 'int'}}),
(
core_schema.union_schema,
args([{'type': 'int'}, {'type': 'str'}]),
{'type': 'union', 'choices': [{'type': 'int'}, {'type': 'str'}]},
),
(
core_schema.union_schema,
args([{'type': 'int'}, {'type': 'str'}], custom_error_type='foobar', custom_error_message='This is Foobar'),
{
'type': 'union',
'choices': [{'type': 'int'}, {'type': 'str'}],
'custom_error_type': 'foobar',
'custom_error_message': 'This is Foobar',
},
),
(
core_schema.tagged_union_schema,
args({'foo': {'type': 'int'}, 'bar': {'type': 'str'}}, 'foo'),
{'type': 'tagged-union', 'choices': {'foo': {'type': 'int'}, 'bar': {'type': 'str'}}, 'discriminator': 'foo'},
),
(
core_schema.chain_schema,
args([{'type': 'int'}, {'type': 'str'}]),
{'type': 'chain', 'steps': [{'type': 'int'}, {'type': 'str'}]},
),
(
core_schema.typed_dict_field,
args({'type': 'int'}, required=True),
{'type': 'typed-dict-field', 'schema': {'type': 'int'}, 'required': True},
),
(
core_schema.typed_dict_schema,
args({'foo': core_schema.typed_dict_field({'type': 'int'})}),
{'type': 'typed-dict', 'fields': {'foo': {'type': 'typed-dict-field', 'schema': {'type': 'int'}}}},
),
(
core_schema.model_field,
args({'type': 'int'}, validation_alias='foobar'),
{'type': 'model-field', 'schema': {'type': 'int'}, 'validation_alias': 'foobar'},
),
(
core_schema.model_fields_schema,
args({'foo': core_schema.model_field({'type': 'int'})}),
{'type': 'model-fields', 'fields': {'foo': {'type': 'model-field', 'schema': {'type': 'int'}}}},
),
(
core_schema.model_schema,
args(MyModel, {'type': 'int'}),
{'type': 'model', 'cls': MyModel, 'schema': {'type': 'int'}},
),
(core_schema.arguments_parameter, args('foo', {'type': 'int'}), {'name': 'foo', 'schema': {'type': 'int'}}),
(
core_schema.arguments_schema,
args(
[
core_schema.arguments_parameter('foo', {'type': 'int'}),
core_schema.arguments_parameter('bar', {'type': 'str'}),
],
serialization=core_schema.format_ser_schema('d'),
),
{
'type': 'arguments',
'arguments_schema': [
{'name': 'foo', 'schema': {'type': 'int'}},
{'name': 'bar', 'schema': {'type': 'str'}},
],
'serialization': {'type': 'format', 'formatting_string': 'd'},
},
),
(
core_schema.arguments_v3_schema,
args(
[
core_schema.arguments_v3_parameter('foo', core_schema.int_schema()),
core_schema.arguments_v3_parameter('bar', core_schema.str_schema()),
],
serialization=core_schema.format_ser_schema('d'),
),
{
'type': 'arguments-v3',
'arguments_schema': [
{'name': 'foo', 'schema': {'type': 'int'}},
{'name': 'bar', 'schema': {'type': 'str'}},
],
'serialization': {'type': 'format', 'formatting_string': 'd'},
},
),
(
core_schema.call_schema,
args(core_schema.arguments_schema([core_schema.arguments_parameter('foo', {'type': 'int'})]), val_function),
{
'type': 'call',
'function': val_function,
'arguments_schema': {'type': 'arguments', 'arguments_schema': [{'name': 'foo', 'schema': {'type': 'int'}}]},
},
),
(
core_schema.custom_error_schema,
args(core_schema.int_schema(), 'foobar', custom_error_message='Hello'),
{
'type': 'custom-error',
'schema': {'type': 'int'},
'custom_error_type': 'foobar',
'custom_error_message': 'Hello',
},
),
(core_schema.json_schema, args({'type': 'int'}), {'type': 'json', 'schema': {'type': 'int'}}),
(core_schema.url_schema, args(), {'type': 'url'}),
(core_schema.multi_host_url_schema, args(), {'type': 'multi-host-url'}),
(
core_schema.lax_or_strict_schema,
args({'type': 'int'}, {'type': 'int'}),
{'type': 'lax-or-strict', 'lax_schema': {'type': 'int'}, 'strict_schema': {'type': 'int'}},
),
(
core_schema.json_or_python_schema,
args({'type': 'int'}, {'type': 'str'}),
{'type': 'json-or-python', 'json_schema': {'type': 'int'}, 'python_schema': {'type': 'str'}},
),
(core_schema.is_subclass_schema, args(MyModel), {'type': 'is-subclass', 'cls': MyModel}),
(
core_schema.definitions_schema,
args({'type': 'definition-ref', 'schema_ref': 'an-int'}, [{'type': 'int', 'ref': 'an-int'}]),
{
'type': 'definitions',
'schema': {'type': 'definition-ref', 'schema_ref': 'an-int'},
'definitions': [{'type': 'int', 'ref': 'an-int'}],
},
),
(core_schema.definition_reference_schema, args('foo'), {'type': 'definition-ref', 'schema_ref': 'foo'}),
(
core_schema.dataclass_args_schema,
args('Foo', [{'name': 'foo', 'type': 'dataclass-field', 'schema': {'type': 'int'}}]),
{
'type': 'dataclass-args',
'dataclass_name': 'Foo',
'fields': [{'name': 'foo', 'type': 'dataclass-field', 'schema': {'type': 'int'}}],
},
),
(
core_schema.dataclass_schema,
args(MyDataclass, {'type': 'int'}, ['foobar']),
{'type': 'dataclass', 'schema': {'type': 'int'}, 'fields': ['foobar'], 'cls': MyDataclass},
),
(
core_schema.dataclass_schema,
args(MyDataclass, {'type': 'int'}, ['foobar'], slots=True),
{'type': 'dataclass', 'schema': {'type': 'int'}, 'fields': ['foobar'], 'cls': MyDataclass, 'slots': True},
),
(core_schema.uuid_schema, args(), {'type': 'uuid'}),
(core_schema.decimal_schema, args(), {'type': 'decimal'}),
(core_schema.decimal_schema, args(multiple_of=5, gt=1.2), {'type': 'decimal', 'multiple_of': 5, 'gt': 1.2}),
(core_schema.complex_schema, args(), {'type': 'complex'}),
(core_schema.invalid_schema, args(), {'type': 'invalid'}),
]
@pytest.mark.parametrize('function,args_kwargs,expected_schema', all_schema_functions, ids=ids_function)
def test_schema_functions(function, args_kwargs, expected_schema):
args, kwargs = args_kwargs
schema = function(*args, **kwargs)
assert schema == expected_schema
if schema.get('type') in {None, 'definition-ref', 'typed-dict-field', 'model-field', 'invalid'}:
return
v = SchemaValidator(schema)
try:
v.validate_python('foobar')
except ValidationError:
pass
# also build the serializer, just to check it doesn't raise an error
SchemaSerializer(schema)
def test_all_schema_functions_used():
all_types: set[str] = set()
for schema_typeddict in core_schema.CoreSchema.__args__:
annotation = get_type_hints(schema_typeddict, include_extras=True)['type']
inspected_ann = inspect_annotation(annotation, annotation_source=AnnotationSource.TYPED_DICT)
annotation = inspected_ann.type
assert annotation is not UNKNOWN
all_types.add(get_args(annotation)[0])
types_used = {args['type'] for _, _, args in all_schema_functions if 'type' in args}
# isn't a CoreSchema type
types_used.remove('typed-dict-field')
types_used.remove('model-field')
assert all_types == types_used
def test_invalid_custom_error():
s = core_schema.union_schema([{'type': 'int'}, {'type': 'str'}], custom_error_type='foobar')
with pytest.raises(SchemaError, match=r"KeyError: 'custom_error_message'"):
SchemaValidator(s)
def test_invalid_custom_error_type():
s = core_schema.union_schema(
[{'type': 'int'}, {'type': 'str'}], custom_error_type='finite_number', custom_error_message='x'
)
msg = "custom_error.message should not be provided if 'custom_error_type' matches a known error"
with pytest.raises(SchemaError, match=msg):
SchemaValidator(s)
def repr_function(value, _info):
return repr(value)
@pytest.mark.parametrize('return_schema', [core_schema.str_schema(), core_schema.int_schema()])
def test_expected_serialization_types(return_schema):
SchemaSerializer(
core_schema.any_schema(
serialization=core_schema.plain_serializer_function_ser_schema(
repr_function, info_arg=True, return_schema=return_schema
)
)
)
def test_err_on_invalid() -> None:
with pytest.raises(SchemaError, match='Cannot construct schema with `InvalidSchema` member.'):
SchemaValidator(core_schema.invalid_schema())
| MyEnum |
python | pytorch__pytorch | test/distributed/tensor/test_init.py | {
"start": 1307,
"end": 10273
} | class ____(DTensorTestBase):
@property
def world_size(self):
return 4
def _run_init_op(self, init_op, dist_init_op, eq_op, *args, **kwargs):
# 1d mesh test
device_mesh = self.build_device_mesh()
placements_list = [[Shard(0)], [Shard(1)], [Shard(2)], [Replicate()]]
# even sharding
tensor_size = [4, 8, 12]
for placements in placements_list:
local_tensor_size = tensor_size.copy()
if isinstance(placements[0], Shard):
shard_dim = placements[0].dim
local_tensor_size[shard_dim] //= self.world_size
dist_tensor = dist_init_op(
tensor_size,
*args,
**kwargs,
device_mesh=device_mesh,
placements=placements,
)
ones_expected = init_op(local_tensor_size, *args, **kwargs)
eq_op(ones_expected, dist_tensor.to_local())
# uneven sharding
tensor_size = [5, 10, 15]
for placements in placements_list:
dist_tensor = dist_init_op(
tensor_size,
*args,
**kwargs,
device_mesh=device_mesh,
placements=placements,
)
if isinstance(placements[0], Shard):
shard_dim = placements[0].dim
exp_tensor_list = list(
torch.chunk(
init_op(tensor_size, *args, **kwargs),
self.world_size,
dim=shard_dim,
)
)
@maybe_run_for_local_tensor
def check_per_rank_chunk(rank, local_tensor):
if rank < len(exp_tensor_list):
eq_op(exp_tensor_list[rank], local_tensor)
check_per_rank_chunk(self.rank, dist_tensor.to_local())
else:
exp_tensor = init_op(tensor_size, *args, **kwargs)
eq_op(exp_tensor, dist_tensor.to_local())
# empty shape
local_tensor = dist_init_op(
[], *args, **kwargs, device_mesh=device_mesh, placements=[Replicate()]
).to_local()
expected_tensor = init_op([], *args, **kwargs)
eq_op(expected_tensor, local_tensor)
@with_comms
def test_ones(self):
self._run_init_op(
torch.ones,
torch.distributed.tensor.ones,
self.assertEqual,
requires_grad=True,
)
@with_comms
def test_empty(self):
self._run_init_op(
torch.empty,
torch.distributed.tensor.empty,
lambda x, y: (x.shape == y.shape)
and (x.dtype == y.dtype)
and (x.layout == y.layout),
requires_grad=True,
)
@with_comms
def test_full(self):
self._run_init_op(
torch.full,
torch.distributed.tensor.full,
self.assertEqual,
123.4,
requires_grad=True,
)
@with_comms
def test_zeros(self):
self._run_init_op(
torch.zeros,
torch.distributed.tensor.zeros,
self.assertEqual,
requires_grad=True,
)
@with_comms
def test_zeros_full_mesh(self):
# construct a gpu device 1d mesh
mesh = self.build_device_mesh()
placements = [Shard(0)]
size = [32, 3]
dist_tensor = zeros(size, device_mesh=mesh, placements=placements)
self.assertEqual(dist_tensor.size(), torch.Size(size))
local_tensor = dist_tensor.to_local()
self.assertEqual(local_tensor.size(), torch.Size([8, 3]))
local_tensor = torch.zeros(8, 3)
self.assertEqual(dist_tensor.to_local(), local_tensor)
self.assertEqual(dist_tensor.device.type, self.device_type)
# 1d sharded unevenly
size = [31, 3]
dist_tensor = zeros(size, device_mesh=mesh, placements=placements)
self.assertEqual(dist_tensor.size(), torch.Size(size))
local_tensor = dist_tensor.to_local()
@maybe_run_for_local_tensor
def check_per_rank_tensors(rank, local_tensor):
if rank <= 2:
self.assertEqual(local_tensor.size(), torch.Size([8, 3]))
self.assertEqual(torch.zeros(8, 3), local_tensor)
else:
self.assertEqual(local_tensor.size(), torch.Size([7, 3]))
self.assertEqual(torch.zeros(7, 3), local_tensor)
check_per_rank_tensors(self.rank, local_tensor)
# construct a gpu device mesh with 2d: shard, replicate
mesh = DeviceMesh(self.device_type, torch.arange(self.world_size).reshape(2, 2))
placements = [Shard(0), Replicate()]
size = [32, 4]
dist_tensor = zeros(size, device_mesh=mesh, placements=placements)
self.assertEqual(dist_tensor.size(), torch.Size(size))
local_tensor = dist_tensor.to_local()
self.assertEqual(local_tensor.size(), torch.Size([16, 4]))
self.assertEqual(local_tensor, torch.zeros([16, 4]))
# construct a gpu device mesh with 2d: shard, shard
placements = [Shard(0), Shard(1)]
size = [32, 4]
dist_tensor = zeros(size, device_mesh=mesh, placements=placements)
self.assertEqual(dist_tensor.size(), torch.Size(size))
local_tensor = dist_tensor.to_local()
self.assertEqual(local_tensor.size(), torch.Size([16, 2]))
self.assertEqual(local_tensor, torch.zeros([16, 2]))
# 2d sharded unevenly
placements = [Shard(0), Shard(1)]
size = [31, 3]
dist_tensor = zeros(size, device_mesh=mesh, placements=placements)
self.assertEqual(dist_tensor.size(), torch.Size(size))
local_tensor = dist_tensor.to_local()
if self.rank == 0:
self.assertEqual(local_tensor, torch.zeros([16, 2]))
elif self.rank == 1:
self.assertEqual(local_tensor, torch.zeros([16, 1]))
elif self.rank == 2:
self.assertEqual(local_tensor, torch.zeros([15, 2]))
elif self.rank == 3:
self.assertEqual(local_tensor, torch.zeros([15, 1]))
@with_comms
def test_zeros_submesh(self):
# default world_size is 4
# construct a gpu device 1d mesh, with no sub pg initialized
sub_mesh_list = [0, 3]
mesh = DeviceMesh(self.device_type, sub_mesh_list)
placements = [Shard(0)]
size = [32, 3]
dist_tensor = zeros(size, device_mesh=mesh, placements=placements)
self.assertEqual(dist_tensor.size(), torch.Size(size))
local_tensor = dist_tensor.to_local()
if self.rank in sub_mesh_list:
self.assertEqual(local_tensor.size(), torch.Size([16, 3]))
self.assertEqual(local_tensor, torch.zeros([16, 3]))
else:
self.assertEqual(local_tensor.size(), torch.Size([0]))
self.assertEqual(local_tensor, torch.zeros(0))
# construct a gpu device 1d mesh: unevenly, with subpg initialized
sub_mesh_list = [0, 1, 3]
mesh = DeviceMesh(self.device_type, sub_mesh_list)
placements = [Shard(0)]
size = [32, 3]
dist_tensor = zeros(size, device_mesh=mesh, placements=placements)
self.assertEqual(dist_tensor.size(), torch.Size(size))
local_tensor = dist_tensor.to_local()
if self.rank in sub_mesh_list:
if self.rank != 3:
self.assertEqual(local_tensor.size(), torch.Size([11, 3]))
self.assertEqual(local_tensor, torch.zeros([11, 3]))
else:
self.assertEqual(local_tensor.size(), torch.Size([10, 3]))
self.assertEqual(local_tensor, torch.zeros([10, 3]))
else:
self.assertEqual(local_tensor.size(), torch.Size([0]))
self.assertEqual(local_tensor, torch.tensor([]))
# construct a gpu device 2d mesh, with no subpg initialized
sub_mesh_list = [[0], [3]]
mesh = DeviceMesh(self.device_type, sub_mesh_list)
placements = [Shard(0), Shard(1)]
size = [32, 3]
dist_tensor = zeros(size, device_mesh=mesh, placements=placements)
self.assertEqual(dist_tensor.size(), torch.Size(size))
local_tensor = dist_tensor.to_local()
if self.rank in [0, 3]:
self.assertEqual(local_tensor.size(), torch.Size([16, 3]))
self.assertEqual(local_tensor, torch.zeros([16, 3]))
else:
self.assertEqual(local_tensor.size(), torch.Size([0]))
self.assertEqual(local_tensor, torch.tensor([]))
DTensorConstructorTestWithLocalTensor = create_local_tensor_test_class(
DTensorConstructorTest,
skipped_tests=[
# Non-contigous sub-meshes are not supported
"test_zeros_submesh",
],
)
if __name__ == "__main__":
run_tests()
| DTensorConstructorTest |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/qembeddingbag_test.py | {
"start": 182,
"end": 1344
} | class ____(op_bench.TorchBenchmarkBase):
def init(
self,
embeddingbags,
dim,
mode,
input_size,
offset,
sparse,
include_last_offset,
device,
):
self.embedding = nnq.EmbeddingBag(
num_embeddings=embeddingbags,
embedding_dim=dim,
mode=mode,
include_last_offset=include_last_offset,
).to(device=device)
numpy.random.seed((1 << 32) - 1)
self.input = torch.tensor(
numpy.random.randint(0, embeddingbags, input_size), device=device
).long()
offset = torch.LongTensor([offset], device=device)
self.offset = torch.cat(
(offset, torch.tensor([self.input.size(0)], dtype=torch.long)), 0
)
self.inputs = {"input": self.input, "offset": self.offset}
self.set_module_name("qEmbeddingBag")
def forward(self, input, offset):
return self.embedding(input, offset)
op_bench.generate_pt_test(configs.embeddingbag_short_configs, QEmbeddingBagBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| QEmbeddingBagBenchmark |
python | getsentry__sentry | tests/sentry/demo_mode/test_tasks.py | {
"start": 642,
"end": 13033
} | class ____(TestCase):
def setUp(self) -> None:
self.source_org = self.create_organization(slug="source_org")
self.target_org = self.create_organization(slug="target_org")
self.unrelated_org = self.create_organization(slug="unrelated_org")
self.empty_org = self.create_organization(slug="empty_org")
self.source_proj_foo = self.create_project(organization=self.source_org, slug="foo")
self.target_proj_foo = self.create_project(organization=self.target_org, slug="foo")
self.unrelated_proj_foo = self.create_project(organization=self.unrelated_org, slug="foo")
self.source_proj_bar = self.create_project(organization=self.source_org, slug="bar")
self.target_proj_baz = self.create_project(organization=self.target_org, slug="baz")
def set_up_artifact_bundle(
self,
organization: Organization,
project: Project,
date_uploaded: datetime | None = None,
) -> tuple[ArtifactBundle, ProjectArtifactBundle, ReleaseArtifactBundle]:
date_uploaded = date_uploaded or timezone.now()
artifact_bundle = self.create_artifact_bundle(org=organization, date_uploaded=date_uploaded)
project_artifact_bundle = ProjectArtifactBundle.objects.create(
organization_id=organization.id,
project_id=project.id,
artifact_bundle_id=artifact_bundle.id,
)
release_artifact_bundle = ReleaseArtifactBundle.objects.create(
organization_id=organization.id,
artifact_bundle_id=artifact_bundle.id,
dist_name="dist",
release_name="release",
)
return artifact_bundle, project_artifact_bundle, release_artifact_bundle
def set_up_proguard_artifact_release(
self,
organization: Organization,
project: Project,
date_added: datetime | None = None,
) -> ProguardArtifactRelease:
date_added = date_added or timezone.now()
proguard_artifact_release = ProguardArtifactRelease.objects.create(
organization_id=organization.id,
project_id=project.id,
release_name="release",
proguard_uuid=uuid1(),
project_debug_file=self.create_dif_file(project),
date_added=date_added,
)
return proguard_artifact_release
def last_three_days(self) -> datetime:
return timezone.now() - timedelta(days=3)
def test_sync_artifact_bundles_no_bundles(self) -> None:
_sync_artifact_bundles(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=self.last_three_days(),
)
assert not ArtifactBundle.objects.all().exists()
def test_sync_artifact_bundles_with_differences(self) -> None:
(source_artifact_bundle, _, __) = self.set_up_artifact_bundle(
self.source_org, self.source_proj_foo
)
assert not ArtifactBundle.objects.filter(organization_id=self.target_org.id).exists()
_sync_artifact_bundles(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=self.last_three_days(),
)
target_artifact_bundles = ArtifactBundle.objects.get(organization_id=self.target_org.id)
assert target_artifact_bundles.bundle_id == source_artifact_bundle.bundle_id
def test_sync_artifact_bundles_does_not_touch_other_orgs(self) -> None:
self.set_up_artifact_bundle(self.source_org, self.source_proj_foo)
self.set_up_artifact_bundle(self.unrelated_org, self.unrelated_proj_foo)
_sync_artifact_bundles(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=self.last_three_days(),
)
unrelated_artifact_bundles = ArtifactBundle.objects.filter(
organization_id=self.unrelated_org.id
)
assert unrelated_artifact_bundles.count() == 1
def test_sync_artifact_bundles_with_old_uploads(self) -> None:
self.set_up_artifact_bundle(
self.source_org, self.source_proj_foo, date_uploaded=timezone.now() - timedelta(days=2)
)
assert not ArtifactBundle.objects.filter(organization_id=self.target_org.id).exists()
_sync_artifact_bundles(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=timezone.now() - timedelta(days=1),
)
assert not ArtifactBundle.objects.filter(organization_id=self.target_org.id).exists()
def test_sync_artifact_bundles_only_once(self) -> None:
(source_artifact_bundle, _, __) = self.set_up_artifact_bundle(
self.source_org, self.source_proj_foo
)
_sync_artifact_bundles(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=self.last_three_days(),
)
_sync_artifact_bundles(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=self.last_three_days(),
)
_sync_artifact_bundles(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=self.last_three_days(),
)
target_artifact_bundles = ArtifactBundle.objects.filter(organization_id=self.target_org.id)
assert target_artifact_bundles.count() == 1
assert target_artifact_bundles[0].bundle_id == source_artifact_bundle.bundle_id
def test_sync_artifact_bundles_with_empty_org_does_not_fail(self) -> None:
self.set_up_artifact_bundle(self.source_org, self.source_proj_foo)
_sync_artifact_bundles(
source_org=self.source_org,
target_org=self.empty_org,
cutoff_date=self.last_three_days(),
)
def test_sync_project_artifact_bundles(self) -> None:
self.set_up_artifact_bundle(self.source_org, self.source_proj_foo)
_sync_artifact_bundles(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=self.last_three_days(),
)
target_project_artifact_bundle = ProjectArtifactBundle.objects.get(
organization_id=self.target_org.id,
project_id=self.target_proj_foo.id,
)
assert target_project_artifact_bundle.project_id == self.target_proj_foo.id
assert target_project_artifact_bundle.organization_id == self.target_org.id
def test_sync_release_artifact_bundles(self) -> None:
(_, __, source_release_bundle) = self.set_up_artifact_bundle(
self.source_org, self.source_proj_foo
)
_sync_artifact_bundles(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=self.last_three_days(),
)
target_release_bundle = ReleaseArtifactBundle.objects.get(
organization_id=self.target_org.id,
)
assert target_release_bundle.dist_name == source_release_bundle.dist_name
assert target_release_bundle.release_name == source_release_bundle.release_name
assert target_release_bundle.organization_id == self.target_org.id
@mock.patch("sentry.demo_mode.tasks._sync_release_artifact_bundle", side_effect=IntegrityError)
def test_sync_artifact_bundles_rolls_back_on_error(self, _: mock.MagicMock) -> None:
self.set_up_artifact_bundle(self.source_org, self.source_proj_foo)
_sync_artifact_bundles(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=self.last_three_days(),
)
assert not ArtifactBundle.objects.filter(organization_id=self.target_org.id).exists()
assert not ProjectArtifactBundle.objects.filter(organization_id=self.target_org.id).exists()
assert not ReleaseArtifactBundle.objects.filter(organization_id=self.target_org.id).exists()
def test_sync_project_debug_files(self) -> None:
source_project_debug_file = self.create_dif_file(self.source_proj_foo)
assert not ProjectDebugFile.objects.filter(
project_id=self.target_proj_foo.id,
debug_id=source_project_debug_file.debug_id,
).exists()
_sync_project_debug_files(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=self.last_three_days(),
)
target_project_debug_file = ProjectDebugFile.objects.get(
project_id=self.target_proj_foo.id,
debug_id=source_project_debug_file.debug_id,
)
assert target_project_debug_file.debug_id == source_project_debug_file.debug_id
assert target_project_debug_file.code_id == source_project_debug_file.code_id
assert target_project_debug_file.cpu_name == source_project_debug_file.cpu_name
def test_sync_project_debug_files_with_old_uploads(self) -> None:
source_project_debug_file = self.create_dif_file(
self.source_proj_foo,
date_accessed=timezone.now() - timedelta(days=2),
)
assert not ProjectDebugFile.objects.filter(
project_id=self.target_proj_foo.id,
debug_id=source_project_debug_file.debug_id,
).exists()
_sync_project_debug_files(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=self.last_three_days(),
)
assert ProjectDebugFile.objects.filter(
project_id=self.target_proj_foo.id,
debug_id=source_project_debug_file.debug_id,
).exists()
def test_sync_project_debug_files_with_empty_org_does_not_fail(self) -> None:
self.create_dif_file(self.source_proj_foo)
_sync_project_debug_files(
source_org=self.source_org,
target_org=self.empty_org,
cutoff_date=self.last_three_days(),
)
def test_sync_proguard_artifact_releases(self) -> None:
source_proguard_artifact_release = self.set_up_proguard_artifact_release(
self.source_org,
self.source_proj_foo,
)
assert not ProguardArtifactRelease.objects.filter(
organization_id=self.target_org.id,
proguard_uuid=source_proguard_artifact_release.proguard_uuid,
).exists()
_sync_proguard_artifact_releases(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=self.last_three_days(),
)
target_proguard_artifact_release = ProguardArtifactRelease.objects.get(
organization_id=self.target_org.id,
proguard_uuid=source_proguard_artifact_release.proguard_uuid,
)
assert (
target_proguard_artifact_release.release_name
== source_proguard_artifact_release.release_name
)
assert (
target_proguard_artifact_release.proguard_uuid
== source_proguard_artifact_release.proguard_uuid
)
assert target_proguard_artifact_release.project_id == self.target_proj_foo.id
def test_sync_proguard_artifact_releases_with_old_uploads(self) -> None:
source_proguard_artifact_release = self.set_up_proguard_artifact_release(
self.source_org,
self.source_proj_foo,
date_added=timezone.now() - timedelta(days=2),
)
assert not ProguardArtifactRelease.objects.filter(
organization_id=self.target_org.id,
proguard_uuid=source_proguard_artifact_release.proguard_uuid,
).exists()
_sync_artifact_bundles(
source_org=self.source_org,
target_org=self.target_org,
cutoff_date=self.last_three_days(),
)
assert not ProguardArtifactRelease.objects.filter(
organization_id=self.target_org.id,
proguard_uuid=source_proguard_artifact_release.proguard_uuid,
).exists()
def test_sync_proguard_artifact_releases_with_empty_org_does_not_fail(self) -> None:
self.set_up_proguard_artifact_release(self.source_org, self.source_proj_foo)
_sync_proguard_artifact_releases(
source_org=self.source_org,
target_org=self.empty_org,
cutoff_date=self.last_three_days(),
)
| SyncArtifactBundlesTest |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_data_condition_group.py | {
"start": 16484,
"end": 18442
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.dcg: DataConditionGroup = self.create_data_condition_group()
def create_slow_condition(self, condition_group: DataConditionGroup) -> DataCondition:
return self.create_data_condition(
condition_group=condition_group,
type=Condition.EVENT_FREQUENCY_COUNT,
comparison={
"interval": "1d",
"value": 7,
},
)
def test_get_slow_conditions_for_groups_basic(self) -> None:
condition = self.create_slow_condition(self.dcg)
assert get_slow_conditions_for_groups([self.dcg.id]) == {self.dcg.id: [condition]}
def test_get_slow_conditions_for_groups__no_slow_conditions(self) -> None:
self.create_data_condition(condition_group=self.dcg, type=Condition.EQUAL)
assert get_slow_conditions_for_groups([self.dcg.id]) == {self.dcg.id: []}
def test_multiple_dcgs(self) -> None:
dcg2 = self.create_data_condition_group()
condition1 = self.create_slow_condition(self.dcg)
condition2 = self.create_slow_condition(dcg2)
self.create_data_condition(condition_group=self.dcg, type=Condition.EQUAL)
condition4 = self.create_slow_condition(dcg2)
dcg3 = self.create_data_condition_group()
condition5 = self.create_slow_condition(dcg3)
assert get_slow_conditions_for_groups([self.dcg.id, dcg2.id]) == {
self.dcg.id: [condition1],
dcg2.id: [condition2, condition4],
}
assert get_slow_conditions_for_groups([self.dcg.id, dcg2.id, dcg3.id]) == {
self.dcg.id: [condition1],
dcg2.id: [condition2, condition4],
dcg3.id: [condition5],
}
# Constants to make TestTriggerResult easier to read
TRUE = TriggerResult.TRUE
FALSE = TriggerResult.FALSE
ERR = ConditionError(msg="test error")
| TestGetSlowConditionsForGroups |
python | joke2k__faker | tests/providers/test_company.py | {
"start": 14254,
"end": 14339
} | class ____(TestFilPh):
"""Test tl_PH company provider methods"""
pass
| TestTlPh |
python | coleifer__peewee | pwiz.py | {
"start": 483,
"end": 8215
} | class ____(object):
def __init__(self, *_, **__): pass
"""
DATABASE_ALIASES = {
CockroachDatabase: ['cockroach', 'cockroachdb', 'crdb'],
MySQLDatabase: ['mysql', 'mysqldb'],
PostgresqlDatabase: ['postgres', 'postgresql'],
SqliteDatabase: ['sqlite', 'sqlite3'],
}
DATABASE_MAP = dict((value, key)
for key in DATABASE_ALIASES
for value in DATABASE_ALIASES[key])
def make_introspector(database_type, database_name, **kwargs):
if database_type not in DATABASE_MAP:
err('Unrecognized database, must be one of: %s' %
', '.join(DATABASE_MAP.keys()))
sys.exit(1)
schema = kwargs.pop('schema', None)
DatabaseClass = DATABASE_MAP[database_type]
db = DatabaseClass(database_name, **kwargs)
return Introspector.from_database(db, schema=schema)
def print_models(introspector, tables=None, preserve_order=False,
include_views=False, ignore_unknown=False, snake_case=True):
database = introspector.introspect(table_names=tables,
include_views=include_views,
snake_case=snake_case)
db_kwargs = introspector.get_database_kwargs()
header = HEADER % (
introspector.get_additional_imports(),
introspector.get_database_class().__name__,
introspector.get_database_name().replace('\\', '\\\\'),
', **%s' % repr(db_kwargs) if db_kwargs else '')
print_(header)
if not ignore_unknown:
print_(UNKNOWN_FIELD)
print_(BASE_MODEL)
def _print_table(table, seen, accum=None):
accum = accum or []
foreign_keys = database.foreign_keys[table]
for foreign_key in foreign_keys:
dest = foreign_key.dest_table
# In the event the destination table has already been pushed
# for printing, then we have a reference cycle.
if dest in accum and table not in accum:
print_('# Possible reference cycle: %s' % dest)
# If this is not a self-referential foreign key, and we have
# not already processed the destination table, do so now.
if dest not in seen and dest not in accum:
seen.add(dest)
if dest != table:
_print_table(dest, seen, accum + [table])
print_('class %s(BaseModel):' % database.model_names[table])
columns = database.columns[table].items()
if not preserve_order:
columns = sorted(columns)
primary_keys = database.primary_keys[table]
for name, column in columns:
skip = all([
name in primary_keys,
name == 'id',
len(primary_keys) == 1,
column.field_class in introspector.pk_classes])
if skip:
continue
if column.primary_key and len(primary_keys) > 1:
# If we have a CompositeKey, then we do not want to explicitly
# mark the columns as being primary keys.
column.primary_key = False
is_unknown = column.field_class is UnknownField
if is_unknown and ignore_unknown:
disp = '%s - %s' % (column.name, column.raw_column_type or '?')
print_(' # %s' % disp)
else:
print_(' %s' % column.get_field())
print_('')
print_(' class Meta:')
print_(' table_name = \'%s\'' % table)
multi_column_indexes = database.multi_column_indexes(table)
if multi_column_indexes:
print_(' indexes = (')
for fields, unique in sorted(multi_column_indexes):
print_(' ((%s), %s),' % (
', '.join("'%s'" % field for field in fields),
unique,
))
print_(' )')
if introspector.schema:
print_(' schema = \'%s\'' % introspector.schema)
if len(primary_keys) > 1:
pk_field_names = sorted([
field.name for col, field in columns
if col in primary_keys])
pk_list = ', '.join("'%s'" % pk for pk in pk_field_names)
print_(' primary_key = CompositeKey(%s)' % pk_list)
elif not primary_keys:
print_(' primary_key = False')
print_('')
seen.add(table)
seen = set()
for table in sorted(database.model_names.keys()):
if table not in seen:
if not tables or table in tables:
_print_table(table, seen)
def print_header(cmd_line, introspector):
timestamp = datetime.datetime.now()
print_('# Code generated by:')
print_('# python -m pwiz %s' % cmd_line)
print_('# Date: %s' % timestamp.strftime('%B %d, %Y %I:%M%p'))
print_('# Database: %s' % introspector.get_database_name())
print_('# Peewee version: %s' % peewee_version)
print_('')
def err(msg):
sys.stderr.write('\033[91m%s\033[0m\n' % msg)
sys.stderr.flush()
def get_option_parser():
parser = OptionParser(usage='usage: %prog [options] database_name')
ao = parser.add_option
ao('-H', '--host', dest='host')
ao('-p', '--port', dest='port', type='int')
ao('-u', '--user', dest='user')
ao('-P', '--password', dest='password', action='store_true')
engines = sorted(DATABASE_MAP)
ao('-e', '--engine', dest='engine', choices=engines,
help=('Database type, e.g. sqlite, mysql, postgresql or cockroachdb. '
'Default is "postgresql".'))
ao('-s', '--schema', dest='schema')
ao('-t', '--tables', dest='tables',
help=('Only generate the specified tables. Multiple table names should '
'be separated by commas.'))
ao('-v', '--views', dest='views', action='store_true',
help='Generate model classes for VIEWs in addition to tables.')
ao('-i', '--info', dest='info', action='store_true',
help=('Add database information and other metadata to top of the '
'generated file.'))
ao('-o', '--preserve-order', action='store_true', dest='preserve_order',
help='Model definition column ordering matches source table.')
ao('-I', '--ignore-unknown', action='store_true', dest='ignore_unknown',
help='Ignore fields whose type cannot be determined.')
ao('-L', '--legacy-naming', action='store_true', dest='legacy_naming',
help='Use legacy table- and column-name generation.')
return parser
def get_connect_kwargs(options):
ops = ('host', 'port', 'user', 'schema')
kwargs = dict((o, getattr(options, o)) for o in ops if getattr(options, o))
if options.password:
kwargs['password'] = getpass()
return kwargs
if __name__ == '__main__':
raw_argv = sys.argv
parser = get_option_parser()
options, args = parser.parse_args()
if len(args) < 1:
err('Missing required parameter "database"')
parser.print_help()
sys.exit(1)
connect = get_connect_kwargs(options)
database = args[-1]
tables = None
if options.tables:
tables = [table.strip() for table in options.tables.split(',')
if table.strip()]
engine = options.engine
if engine is None:
engine = 'sqlite' if os.path.exists(database) else 'postgresql'
introspector = make_introspector(engine, database, **connect)
if options.info:
cmd_line = ' '.join(raw_argv[1:])
print_header(cmd_line, introspector)
print_models(introspector, tables, options.preserve_order, options.views,
options.ignore_unknown, not options.legacy_naming)
| UnknownField |
python | plotly__plotly.py | plotly/graph_objs/funnel/_outsidetextfont.py | {
"start": 233,
"end": 17188
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "funnel"
_path_str = "funnel.outsidetextfont"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Outsidetextfont object
Sets the font used for `text` lying outside the bar.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.funnel.Outsidetextfont`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Outsidetextfont
"""
super().__init__("outsidetextfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.funnel.Outsidetextfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnel.Outsidetextfont`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Outsidetextfont |
python | aio-libs__aiohttp | aiohttp/client_reqrep.py | {
"start": 31433,
"end": 49169
} | class ____(ClientRequestBase):
_EMPTY_BODY = payload.PAYLOAD_REGISTRY.get(b"", disposition=None)
_body = _EMPTY_BODY
_continue = None # waiter future for '100 Continue' response
GET_METHODS = {
hdrs.METH_GET,
hdrs.METH_HEAD,
hdrs.METH_OPTIONS,
hdrs.METH_TRACE,
}
DEFAULT_HEADERS = {
hdrs.ACCEPT: "*/*",
hdrs.ACCEPT_ENCODING: _gen_default_accept_encoding(),
}
def __init__(
self,
method: str,
url: URL,
*,
params: Query,
headers: CIMultiDict[str],
skip_auto_headers: Iterable[str] | None,
data: Any,
cookies: BaseCookie[str],
auth: BasicAuth | None,
version: HttpVersion,
compress: str | bool,
chunked: bool | None,
expect100: bool,
loop: asyncio.AbstractEventLoop,
response_class: type[ClientResponse],
proxy: URL | None,
proxy_auth: BasicAuth | None,
timer: BaseTimerContext,
session: "ClientSession",
ssl: SSLContext | bool | Fingerprint,
proxy_headers: CIMultiDict[str] | None,
traces: list["Trace"],
trust_env: bool,
server_hostname: str | None,
**kwargs: object,
):
# kwargs exists so authors of subclasses should expect to pass through unknown
# arguments. This allows us to safely add new arguments in future releases.
# But, we should never receive unknown arguments here in the parent class, this
# would indicate an argument has been named wrong or similar in the subclass.
assert not kwargs, "Unexpected arguments to ClientRequest"
if params:
url = url.extend_query(params)
super().__init__(method, url, headers=headers, auth=auth, loop=loop, ssl=ssl)
if proxy is not None:
assert type(proxy) is URL, proxy
self._session = session
self.chunked = chunked
self.response_class = response_class
self._timer = timer
self.server_hostname = server_hostname
self.version = version
self._update_auto_headers(skip_auto_headers)
self._update_cookies(cookies)
self._update_content_encoding(data, compress)
self._update_proxy(proxy, proxy_auth, proxy_headers)
self._update_body_from_data(data)
if data is not None or self.method not in self.GET_METHODS:
self._update_transfer_encoding()
self._update_expect_continue(expect100)
self._traces = traces
@property
def body(self) -> payload.Payload:
return self._body
@property
def skip_auto_headers(self) -> CIMultiDict[None]:
return self._skip_auto_headers or CIMultiDict()
@property
def connection_key(self) -> ConnectionKey:
if proxy_headers := self.proxy_headers:
h: int | None = hash(tuple(proxy_headers.items()))
else:
h = None
url = self.url
return tuple.__new__(
ConnectionKey,
(
url.raw_host or "",
url.port,
url.scheme in _SSL_SCHEMES,
self._ssl,
self.proxy,
self.proxy_auth,
h,
),
)
@property
def session(self) -> "ClientSession":
"""Return the ClientSession instance.
This property provides access to the ClientSession that initiated
this request, allowing middleware to make additional requests
using the same session.
"""
return self._session
def _update_auto_headers(self, skip_auto_headers: Iterable[str] | None) -> None:
if skip_auto_headers is not None:
self._skip_auto_headers = CIMultiDict(
(hdr, None) for hdr in sorted(skip_auto_headers)
)
used_headers = self.headers.copy()
used_headers.extend(self._skip_auto_headers) # type: ignore[arg-type]
else:
# Fast path when there are no headers to skip
# which is the most common case.
used_headers = self.headers
for hdr, val in self.DEFAULT_HEADERS.items():
if hdr not in used_headers:
self.headers[hdr] = val
if hdrs.USER_AGENT not in used_headers:
self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE
def _update_cookies(self, cookies: BaseCookie[str]) -> None:
"""Update request cookies header."""
if not cookies:
return
c = SimpleCookie()
if hdrs.COOKIE in self.headers:
# parse_cookie_header for RFC 6265 compliant Cookie header parsing
c.update(parse_cookie_header(self.headers.get(hdrs.COOKIE, "")))
del self.headers[hdrs.COOKIE]
for name, value in cookies.items():
# Use helper to preserve coded_value exactly as sent by server
c[name] = preserve_morsel_with_coded_value(value)
self.headers[hdrs.COOKIE] = c.output(header="", sep=";").strip()
def _update_content_encoding(self, data: Any, compress: bool | str) -> None:
"""Set request content encoding."""
self.compress = None
if not data:
return
if self.headers.get(hdrs.CONTENT_ENCODING):
if compress:
raise ValueError(
"compress can not be set if Content-Encoding header is set"
)
elif compress:
self.compress = compress if isinstance(compress, str) else "deflate"
self.headers[hdrs.CONTENT_ENCODING] = self.compress
self.chunked = True # enable chunked, no need to deal with length
def _update_transfer_encoding(self) -> None:
"""Analyze transfer-encoding header."""
te = self.headers.get(hdrs.TRANSFER_ENCODING, "").lower()
if "chunked" in te:
if self.chunked:
raise ValueError(
"chunked can not be set "
'if "Transfer-Encoding: chunked" header is set'
)
elif self.chunked:
if hdrs.CONTENT_LENGTH in self.headers:
raise ValueError(
"chunked can not be set if Content-Length header is set"
)
self.headers[hdrs.TRANSFER_ENCODING] = "chunked"
def _update_body_from_data(self, body: Any) -> None:
"""Update request body from data."""
if body is None:
self._body = self._EMPTY_BODY
# Set Content-Length to 0 when body is None for methods that expect a body
if (
self.method not in self.GET_METHODS
and not self.chunked
and hdrs.CONTENT_LENGTH not in self.headers
):
self.headers[hdrs.CONTENT_LENGTH] = "0"
return
# FormData
if isinstance(body, FormData):
body = body()
else:
try:
body = payload.PAYLOAD_REGISTRY.get(body, disposition=None)
except payload.LookupError:
boundary = None
if hdrs.CONTENT_TYPE in self.headers:
boundary = parse_mimetype(
self.headers[hdrs.CONTENT_TYPE]
).parameters.get("boundary")
body = FormData(body, boundary=boundary)()
self._body = body
# enable chunked encoding if needed
if not self.chunked and hdrs.CONTENT_LENGTH not in self.headers:
if (size := body.size) is not None:
self.headers[hdrs.CONTENT_LENGTH] = str(size)
else:
self.chunked = True
# copy payload headers
assert body.headers
headers = self.headers
skip_headers = self._skip_auto_headers
for key, value in body.headers.items():
if key in headers or (skip_headers is not None and key in skip_headers):
continue
headers[key] = value
def _update_body(self, body: Any) -> None:
"""Update request body after its already been set."""
# Remove existing Content-Length header since body is changing
if hdrs.CONTENT_LENGTH in self.headers:
del self.headers[hdrs.CONTENT_LENGTH]
# Remove existing Transfer-Encoding header to avoid conflicts
if self.chunked and hdrs.TRANSFER_ENCODING in self.headers:
del self.headers[hdrs.TRANSFER_ENCODING]
# Now update the body using the existing method
self._update_body_from_data(body)
# Update transfer encoding headers if needed (same logic as __init__)
if body is not None or self.method not in self.GET_METHODS:
self._update_transfer_encoding()
async def update_body(self, body: Any) -> None:
"""
Update request body and close previous payload if needed.
This method safely updates the request body by first closing any existing
payload to prevent resource leaks, then setting the new body.
IMPORTANT: Always use this method instead of setting request.body directly.
Direct assignment to request.body will leak resources if the previous body
contains file handles, streams, or other resources that need cleanup.
Args:
body: The new body content. Can be:
- bytes/bytearray: Raw binary data
- str: Text data (will be encoded using charset from Content-Type)
- FormData: Form data that will be encoded as multipart/form-data
- Payload: A pre-configured payload object
- AsyncIterable: An async iterable of bytes chunks
- File-like object: Will be read and sent as binary data
- None: Clears the body
Usage:
# CORRECT: Use update_body
await request.update_body(b"new request data")
# WRONG: Don't set body directly
# request.body = b"new request data" # This will leak resources!
# Update with form data
form_data = FormData()
form_data.add_field('field', 'value')
await request.update_body(form_data)
# Clear body
await request.update_body(None)
Note:
This method is async because it may need to close file handles or
other resources associated with the previous payload. Always await
this method to ensure proper cleanup.
Warning:
Setting request.body directly is highly discouraged and can lead to:
- Resource leaks (unclosed file handles, streams)
- Memory leaks (unreleased buffers)
- Unexpected behavior with streaming payloads
It is not recommended to change the payload type in middleware. If the
body was already set (e.g., as bytes), it's best to keep the same type
rather than converting it (e.g., to str) as this may result in unexpected
behavior.
See Also:
- update_body_from_data: Synchronous body update without cleanup
- body property: Direct body access (STRONGLY DISCOURAGED)
"""
# Close existing payload if it exists and needs closing
if self._body is not None:
await self._body.close()
self._update_body(body)
def _update_expect_continue(self, expect: bool = False) -> None:
if expect:
self.headers[hdrs.EXPECT] = "100-continue"
elif (
hdrs.EXPECT in self.headers
and self.headers[hdrs.EXPECT].lower() == "100-continue"
):
expect = True
if expect:
self._continue = self.loop.create_future()
def _update_proxy(
self,
proxy: URL | None,
proxy_auth: BasicAuth | None,
proxy_headers: CIMultiDict[str] | None,
) -> None:
self.proxy = proxy
if proxy is None:
self.proxy_auth = None
self.proxy_headers = None
return
if proxy_auth and not isinstance(proxy_auth, BasicAuth):
raise ValueError("proxy_auth must be None or BasicAuth() tuple")
self.proxy_auth = proxy_auth
self.proxy_headers = proxy_headers
def _create_response(self, task: asyncio.Task[None] | None) -> ClientResponse:
return self.response_class(
self.method,
self.original_url,
writer=task,
continue100=self._continue,
timer=self._timer,
traces=self._traces,
loop=self.loop,
session=self._session,
request_headers=self.headers,
original_url=self.original_url,
)
def _create_writer(self, protocol: BaseProtocol) -> StreamWriter:
writer = StreamWriter(
protocol,
self.loop,
on_chunk_sent=(
functools.partial(self._on_chunk_request_sent, self.method, self.url)
if self._traces
else None
),
on_headers_sent=(
functools.partial(self._on_headers_request_sent, self.method, self.url)
if self._traces
else None
),
)
if self.compress:
writer.enable_compression(self.compress)
if self.chunked is not None:
writer.enable_chunking()
return writer
def _should_write(self, protocol: BaseProtocol) -> bool:
return (
self.body.size != 0 or self._continue is not None or protocol.writing_paused
)
async def _write_bytes(
self,
writer: AbstractStreamWriter,
conn: "Connection",
content_length: int | None,
) -> None:
"""
Write the request body to the connection stream.
This method handles writing different types of request bodies:
1. Payload objects (using their specialized write_with_length method)
2. Bytes/bytearray objects
3. Iterable body content
Args:
writer: The stream writer to write the body to
conn: The connection being used for this request
content_length: Optional maximum number of bytes to write from the body
(None means write the entire body)
The method properly handles:
- Waiting for 100-Continue responses if required
- Content length constraints for chunked encoding
- Error handling for network issues, cancellation, and other exceptions
- Signaling EOF and timeout management
Raises:
ClientOSError: When there's an OS-level error writing the body
ClientConnectionError: When there's a general connection error
asyncio.CancelledError: When the operation is cancelled
"""
# 100 response
if self._continue is not None:
# Force headers to be sent before waiting for 100-continue
writer.send_headers()
await writer.drain()
await self._continue
protocol = conn.protocol
assert protocol is not None
try:
await self._body.write_with_length(writer, content_length)
except OSError as underlying_exc:
reraised_exc = underlying_exc
# Distinguish between timeout and other OS errors for better error reporting
exc_is_not_timeout = underlying_exc.errno is not None or not isinstance(
underlying_exc, asyncio.TimeoutError
)
if exc_is_not_timeout:
reraised_exc = ClientOSError(
underlying_exc.errno,
f"Can not write request body for {self.url !s}",
)
set_exception(protocol, reraised_exc, underlying_exc)
except asyncio.CancelledError:
# Body hasn't been fully sent, so connection can't be reused
conn.close()
raise
except Exception as underlying_exc:
set_exception(
protocol,
ClientConnectionError(
"Failed to send bytes into the underlying connection "
f"{conn !s}: {underlying_exc!r}",
),
underlying_exc,
)
else:
# Successfully wrote the body, signal EOF and start response timeout
await writer.write_eof()
protocol.start_timeout()
async def _close(self) -> None:
if self._writer_task is not None:
try:
await self._writer_task
except asyncio.CancelledError:
if (
sys.version_info >= (3, 11)
and (task := asyncio.current_task())
and task.cancelling()
):
raise
def _terminate(self) -> None:
if self._writer_task is not None:
if not self.loop.is_closed():
self._writer_task.cancel()
self._writer_task.remove_done_callback(self._reset_writer)
self._writer_task = None
async def _on_chunk_request_sent(self, method: str, url: URL, chunk: bytes) -> None:
for trace in self._traces:
await trace.send_request_chunk_sent(method, url, chunk)
async def _on_headers_request_sent(
self, method: str, url: URL, headers: "CIMultiDict[str]"
) -> None:
for trace in self._traces:
await trace.send_request_headers(method, url, headers)
| ClientRequest |
python | walkccc__LeetCode | solutions/60. Permutation Sequence/60.py | {
"start": 0,
"end": 383
} | class ____:
def getPermutation(self, n: int, k: int) -> str:
ans = ''
nums = [i + 1 for i in range(n)]
fact = [1] * (n + 1) # fact[i] := i!
for i in range(2, n + 1):
fact[i] = fact[i - 1] * i
k -= 1 # 0-indexed
for i in reversed(range(n)):
j = k // fact[i]
k %= fact[i]
ans += str(nums[j])
nums.pop(j)
return ans
| Solution |
python | ray-project__ray | doc/source/ray-core/doc_code/cgraph_quickstart.py | {
"start": 50,
"end": 2455
} | class ____:
def echo(self, msg):
return msg
# __simple_actor_end__
# __ray_core_usage_start__
import time
a = SimpleActor.remote()
# warmup
for _ in range(5):
msg_ref = a.echo.remote("hello")
ray.get(msg_ref)
start = time.perf_counter()
msg_ref = a.echo.remote("hello")
ray.get(msg_ref)
end = time.perf_counter()
print(f"Execution takes {(end - start) * 1000 * 1000} us")
# __ray_core_usage_end__
# __dag_usage_start__
import ray.dag
with ray.dag.InputNode() as inp:
# Note that it uses `bind` instead of `remote`.
# This returns a ray.dag.DAGNode, instead of the usual ray.ObjectRef.
dag = a.echo.bind(inp)
# warmup
for _ in range(5):
msg_ref = dag.execute("hello")
ray.get(msg_ref)
start = time.perf_counter()
# `dag.execute` runs the DAG and returns an ObjectRef. You can use `ray.get` API.
msg_ref = dag.execute("hello")
ray.get(msg_ref)
end = time.perf_counter()
print(f"Execution takes {(end - start) * 1000 * 1000} us")
# __dag_usage_end__
# __cgraph_usage_start__
dag = dag.experimental_compile()
# warmup
for _ in range(5):
msg_ref = dag.execute("hello")
ray.get(msg_ref)
start = time.perf_counter()
# `dag.execute` runs the DAG and returns CompiledDAGRef. Similar to
# ObjectRefs, you can use the ray.get API.
msg_ref = dag.execute("hello")
ray.get(msg_ref)
end = time.perf_counter()
print(f"Execution takes {(end - start) * 1000 * 1000} us")
# __cgraph_usage_end__
# __teardown_start__
dag.teardown()
# __teardown_end__
# __cgraph_bind_start__
a = SimpleActor.remote()
b = SimpleActor.remote()
with ray.dag.InputNode() as inp:
# Note that it uses `bind` instead of `remote`.
# This returns a ray.dag.DAGNode, instead of the usual ray.ObjectRef.
dag = a.echo.bind(inp)
dag = b.echo.bind(dag)
dag = dag.experimental_compile()
print(ray.get(dag.execute("hello")))
# __cgraph_bind_end__
dag.teardown()
# __cgraph_multi_output_start__
import ray.dag
a = SimpleActor.remote()
b = SimpleActor.remote()
with ray.dag.InputNode() as inp:
# Note that it uses `bind` instead of `remote`.
# This returns a ray.dag.DAGNode, instead of the usual ray.ObjectRef.
dag = ray.dag.MultiOutputNode([a.echo.bind(inp), b.echo.bind(inp)])
dag = dag.experimental_compile()
print(ray.get(dag.execute("hello")))
# __cgraph_multi_output_end__
dag.teardown()
# __cgraph_async_compile_start__
import ray
@ray.remote
| SimpleActor |
python | bokeh__bokeh | src/bokeh/models/callbacks.py | {
"start": 2268,
"end": 2911
} | class ____(Callback):
''' Open a URL in a new or current tab or window.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
url = String("http://", help="""
The URL to direct the web browser to. This can be a template string,
which will be formatted with data from the data source.
""")
same_tab = Bool(False, help="""
Open URL in a new (`False`, default) or current (`True`) tab or window.
For `same_tab=False`, whether tab or window will be opened is browser
dependent.
""")
@abstract
| OpenURL |
python | openai__openai-python | src/openai/resources/videos.py | {
"start": 30223,
"end": 30937
} | class ____:
def __init__(self, videos: Videos) -> None:
self._videos = videos
self.create = to_streamed_response_wrapper(
videos.create,
)
self.retrieve = to_streamed_response_wrapper(
videos.retrieve,
)
self.list = to_streamed_response_wrapper(
videos.list,
)
self.delete = to_streamed_response_wrapper(
videos.delete,
)
self.download_content = to_custom_streamed_response_wrapper(
videos.download_content,
StreamedBinaryAPIResponse,
)
self.remix = to_streamed_response_wrapper(
videos.remix,
)
| VideosWithStreamingResponse |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_overlap.py | {
"start": 2529,
"end": 2889
} | class ____:
def __init__(self) -> None:
self.data = []
def add(self, new_data):
if len(self.data) < 10:
self.data.append(new_data)
else:
self.data = sorted(self.data)
if new_data < self.data[-1]:
self.data[-1] = new_data
def avg(self):
return mean(self.data)
| Min10 |
python | dagster-io__dagster | python_modules/libraries/dagstermill/dagstermill/examples/repository.py | {
"start": 3592,
"end": 7676
} | class ____(Config):
farewell: str = "goodbye"
goodbye_config_struct = test_nb_op(
name="goodbye_config_struct",
path=nb_test_path("print_dagstermill_context_op_config"),
output_notebook_name="notebook",
config_schema=GoodbyeConfig,
)
@job(resource_defs=common_resource_defs)
def hello_world_config_job_struct() -> None:
hello_world_config_struct()
goodbye_config_struct()
@job(resource_defs=common_resource_defs)
def alias_config_job():
hello_world_config.alias("aliased_greeting")()
goodbye_config.alias("aliased_goodbye")()
@op(ins={"notebook": In()})
def load_notebook(notebook):
return notebook
@job(resource_defs=common_resource_defs)
def hello_world_with_output_notebook_job():
notebook = hello_world()
load_notebook(notebook)
hello_world_no_output_notebook = test_nb_op(
name="hello_world_no_output_notebook",
path=nb_test_path("hello_world"),
output_notebook_name=None,
)
@job
def hello_world_no_output_notebook_no_file_manager_job():
hello_world_no_output_notebook()
@job(resource_defs=common_resource_defs)
def hello_world_no_output_notebook_job():
hello_world_no_output_notebook()
hello_world_output = test_nb_op("hello_world_output", outs={DEFAULT_OUTPUT: Out(str)})
@job(resource_defs=common_resource_defs)
def hello_world_output_job():
hello_world_output()
hello_world_explicit_yield = test_nb_op(
"hello_world_explicit_yield", outs={DEFAULT_OUTPUT: Out(str)}
)
@job(resource_defs=common_resource_defs)
def hello_world_explicit_yield_job():
hello_world_explicit_yield()
hello_logging = test_nb_op("hello_logging")
@job(resource_defs=common_resource_defs)
def hello_logging_job():
hello_logging()
add_two_numbers = test_nb_op(
"add_two_numbers",
ins={"a": In(int), "b": In(int)},
outs={DEFAULT_OUTPUT: Out(int)},
)
mult_two_numbers = test_nb_op(
"mult_two_numbers", ins={"a": In(int), "b": In(int)}, outs={DEFAULT_OUTPUT: Out(int)}
)
@op
def return_one():
return 1
@op
def return_two():
return 2
@op
def return_three():
return 3
@op
def return_four():
return 4
@job(resource_defs=common_resource_defs)
def add_job():
add_two_numbers(return_one(), return_two())
@job(resource_defs=common_resource_defs)
def double_add_job():
add_two_numbers.alias("add_two_numbers_1")(return_one(), return_two())
add_two_numbers.alias("add_two_numbers_2")(return_three(), return_four())
@op
def load_constant(config: int) -> int:
return config
@job(resource_defs=common_resource_defs)
def notebook_dag_job():
a = load_constant.alias("load_a")()
b = load_constant.alias("load_b")()
num, _ = add_two_numbers(a, b)
mult_two_numbers(num, b)
error_notebook = test_nb_op("error_notebook", save_notebook_on_failure=True)
@job(resource_defs=common_resource_defs)
def error_job():
error_notebook()
if DAGSTER_PANDAS_PRESENT and SKLEARN_PRESENT and MATPLOTLIB_PRESENT:
# We need type-ignores here because type checkers don't understand the `*_PRESENT` kwargs.
clean_data = test_nb_op("clean_data", outs={DEFAULT_OUTPUT: Out(DataFrame)}) # pyright: ignore[reportPossiblyUnboundVariable]
# FIXME add an output to this
tutorial_LR = test_nb_op("tutorial_LR", ins={"df": In(DataFrame)}) # pyright: ignore[reportPossiblyUnboundVariable]
tutorial_RF = test_nb_op("tutorial_RF", ins={"df": In(DataFrame)}) # pyright: ignore[reportPossiblyUnboundVariable]
@job(resource_defs=common_resource_defs)
def tutorial_job():
dfr, _ = clean_data()
# FIXME get better names for these
tutorial_LR(dfr)
tutorial_RF(dfr)
@op(required_resource_keys={"list"})
def resource_op(context):
context.resources.list.append("Hello, op!")
return True
hello_world_resource = test_nb_op(
"hello_world_resource",
ins={"nonce": In()},
required_resource_keys={"list"},
)
hello_world_resource_with_exception = test_nb_op(
"hello_world_resource_with_exception",
ins={"nonce": In()},
required_resource_keys={"list"},
)
| GoodbyeConfig |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 2358,
"end": 4380
} | class ____(test_util.TensorFlowTestCase):
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to HSV and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_hsv(batch0)
batch2 = image_ops.hsv_to_rgb(batch1)
split0 = array_ops_stack.unstack(batch0)
split1 = list(map(image_ops.rgb_to_hsv, split0))
split2 = list(map(image_ops.hsv_to_rgb, split1))
join1 = array_ops_stack.stack(split1)
join2 = array_ops_stack.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1)
self.assertAllClose(batch2, join2)
self.assertAllClose(batch2, inp)
def testRGBToHSVRoundTrip(self):
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for nptype in [np.float32, np.float64]:
rgb_np = np.array(data, dtype=nptype).reshape([2, 2, 3]) / 255.
with self.cached_session():
hsv = image_ops.rgb_to_hsv(rgb_np)
rgb = image_ops.hsv_to_rgb(hsv)
rgb_tf = self.evaluate(rgb)
self.assertAllClose(rgb_tf, rgb_np)
def testRGBToHSVDataTypes(self):
# Test case for GitHub issue 54855.
data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
for dtype in [
dtypes.float32, dtypes.float64, dtypes.float16, dtypes.bfloat16
]:
with self.cached_session(use_gpu=False):
rgb = math_ops.cast(
np.array(data, np.float32).reshape([2, 2, 3]) / 255., dtype=dtype)
hsv = image_ops.rgb_to_hsv(rgb)
val = image_ops.hsv_to_rgb(hsv)
out = self.evaluate(val)
self.assertAllClose(rgb, out, atol=1e-2)
| RGBToHSVTest |
python | FactoryBoy__factory_boy | tests/test_faker.py | {
"start": 143,
"end": 349
} | class ____:
def __init__(self, expected):
self.expected = expected
self.random = random.Random()
def format(self, provider, **kwargs):
return self.expected[provider]
| MockFaker |
python | sympy__sympy | sympy/solvers/simplex.py | {
"start": 2575,
"end": 2908
} | class ____(Exception):
"""
A linear programming problem is said to be unbounded if its objective
function can assume arbitrarily large values.
Example
=======
Suppose you want to maximize
2x
subject to
x >= 0
There's no upper limit that 2x can take.
"""
pass
| UnboundedLPError |
python | pytorch__pytorch | test/torch_np/test_basic.py | {
"start": 11132,
"end": 11932
} | class ____(TestCase):
"""Smoke test generic problems with normalizations."""
def test_unknown_args(self):
# Check that unknown args to decorated functions fail
a = w.arange(7) % 2 == 0
# unknown positional args
with assert_raises(TypeError):
w.nonzero(a, "kaboom")
# unknown kwarg
with assert_raises(TypeError):
w.nonzero(a, oops="ouch")
def test_too_few_args_positional(self):
with assert_raises(TypeError):
w.nonzero()
def test_unknown_args_with_defaults(self):
# check a function 5 arguments and 4 defaults: this should work
w.eye(3)
# five arguments, four defaults: this should fail
with assert_raises(TypeError):
w.eye()
| TestNormalizations |
python | huggingface__transformers | src/transformers/models/qwen3_vl_moe/modular_qwen3_vl_moe.py | {
"start": 17998,
"end": 18058
} | class ____(Qwen3VLVisionModel):
pass
| Qwen3VLMoeVisionModel |
python | walkccc__LeetCode | solutions/1461. Check If a String Contains All Binary Codes of Size K/1461.py | {
"start": 0,
"end": 465
} | class ____:
def hasAllCodes(self, s: str, k: int) -> bool:
n = 1 << k
if len(s) < n:
return False
# used[i] := True if i is a substring of `s`
used = [0] * n
windowStr = 0 if k == 1 else int(s[0:k - 1], 2)
for i in range(k - 1, len(s)):
# Include the s[i].
windowStr = (windowStr << 1) + int(s[i])
# Discard the s[i - k].
windowStr &= n - 1
used[windowStr] = True
return all(u for u in used)
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-rki-covid/source_rki_covid/source.py | {
"start": 1727,
"end": 2334
} | class ____(RkiCovidStream):
"""Docs: https://api.corona-zahlen.org/states"""
primary_key = None
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
if response.json():
for key, value in response.json().get("data").items():
yield value
return [{}]
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return "states/"
# class that contains source age-groups in germany. | full-refresh
| GermanyStates |
python | pytorch__pytorch | torch/_inductor/runtime/autotune_cache.py | {
"start": 14808,
"end": 21143
} | class ____:
_bundler: _AutotuneCacheBundlerImpl | None = None
def __init__(self) -> None:
pass
# Call this before we start any autotune computation for an inductor python
# file. On a cache hit it copies the individual results into the local
# autotune caches.
@classmethod
def begin_compile(
cls,
inductor_meta: _InductorMetaTy,
*,
code: str | None = None,
code_hash: str | None = None,
) -> None:
assert cls._bundler is None
if code is not None:
assert code_hash is None, "Cannot specify both code and code_hash"
code_hash = _comment_stripped_hash(code)
assert code_hash is not None
if not _AutotuneCacheBundlerImpl._should_use_bundled_autotune_remote_cache(
inductor_meta
):
return
cache = create_cache(
"bundled-autotune-v1",
_AutotuneCacheBundlerImpl._get_is_fbcode(inductor_meta),
"FbRemoteBundledAutotuneCache",
"RemoteBundledAutotuneCache",
)
if not cache:
return
# We're starting a compilation phase. We have a cache key for the code
# we're compiling. We'll get the individual autotune bundles later (via
# self.put()). For now create the AutotuneCacheBundler and try to load
# from the cache.
salt = "bundled-autotune-best-configs-v1"
backend_hash = _AutotuneCacheBundlerImpl._get_backend_hash(inductor_meta)
# TODO: The autotune cache includes configs_hash in the key. The problem
# is that the configs_hash includes info from the individual pointwise()
# calls (size_hints, for example) which we can't know yet. I *think*
# that info is basically present in the `code_hash` (since it's a
# parameter to the pointwise decorator) - but is there other info we
# need to include from inductor_meta?
key = code_hash + backend_hash + salt
key = hashlib.sha256(key.encode("utf-8")).hexdigest()
bundler = _AutotuneCacheBundlerImpl(key, cache)
if not bundler._load_cache():
# We couldn't load from the cache - so save the data so we can store
# the saved autotunes.
cls._bundler = bundler
# If we get a cache hit don't bother saving any of the individual
# autotune results.
# Call this after all individual autotune results are finished for a
# inductor python file. If we gathered any individual results then we bundle
# those and put it into the cache.
@classmethod
def end_compile(cls) -> None:
if bundler := cls._bundler:
cls._bundler = None
bundler.end_compile()
@classmethod
def sync(cls) -> None:
if bundler := cls._bundler:
bundler.sync()
@classmethod
def put(cls, filename: str, data: JsonDataTy) -> None:
if bundler := cls._bundler:
# The filename comes in as something like
# "/tmp/tmp{random}/{aa}/{basename}.py" (where aa is
# basename[1:3]). Strip it down and make sure that it looks like a path
# we could reconstruct (because it's possible for the caller to
# customize the path).
basename = os.path.basename(filename)
# TODO: check cache_dir() vs filename, then strip dirname
bundler.put(basename, data)
# Remove the comments from the code (which include things like run ids and file
# paths) and then hash the result.
def _comment_stripped_hash(code: str) -> str:
code = re.sub(r"#.*$", "", code, count=0, flags=re.MULTILINE)
return torch._inductor.codecache.code_hash(code)
def _should_use_remote_autotune_cache(inductor_meta: _InductorMetaTy) -> bool:
if (config := inductor_meta.get("autotune_remote_cache")) is not None:
return bool(config)
if not inductor_meta.get("is_fbcode"):
return False
if torch._utils_internal.is_fb_unit_test():
return False
if inductor_meta.get("is_hip"):
return False
try:
from torch._inductor.fb.remote_cache import REMOTE_CACHE_VERSION
except ModuleNotFoundError:
return False
return REMOTE_CACHE_VERSION >= torch._utils_internal.justknobs_getval_int(
"pytorch/remote_cache:autotune_memcache_version"
)
def _load_cached_autotuning(
best_config: dict[str, JsonDataTy],
configs_hash: str,
configs: list[Config],
inductor_meta: _InductorMetaTy,
) -> Config | None:
if best_config is None:
return None
if best_config.pop("configs_hash", None) != configs_hash:
return None
# Remove time taken for comparison
best_config.pop("time_taken_ms", None)
best_config.pop("triton_cache_hash", None)
if inductor_meta.get("coordinate_descent_tuning") and best_config.pop(
"found_by_coordesc", False
):
num_warps = best_config.pop("num_warps")
num_stages = best_config.pop("num_stages")
# Extract common arguments
config_args = {
"num_warps": num_warps,
"num_stages": num_stages,
}
if HAS_WARP_SPEC:
config_args.update(
{
"num_consumer_groups": best_config.pop("num_consumer_groups", 0),
"num_buffers_warp_spec": best_config.pop(
"num_buffers_warp_spec", 0
),
}
)
# Create the triton_config with the appropriate arguments
# pyrefly: ignore [bad-argument-count]
triton_config = Config(best_config, **config_args)
# pyrefly: ignore [missing-attribute]
triton_config.found_by_coordesc = True
return triton_config
matching_configs = [
cfg
for cfg in configs
# pyrefly: ignore [missing-attribute]
if all(val == best_config.get(key) for key, val in cfg.kwargs.items())
# pyrefly: ignore [missing-attribute]
and cfg.num_warps == best_config.get("num_warps")
# pyrefly: ignore [missing-attribute]
and cfg.num_stages == best_config.get("num_stages")
]
if len(matching_configs) != 1:
return None
return matching_configs[0]
| AutotuneCacheBundler |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/config.py | {
"start": 308,
"end": 2402
} | class ____:
def __init__(self) -> None:
oauth_fixture: Dict[str, Any] = find_template("oauth", __file__)
self._access_token: str = oauth_fixture["access_token"]
self._refresh_token: str = oauth_fixture["refresh_token"]
self._client_id: str = CLIENT_ID
self._client_secret: str = CLIENT_SECRET
self._region: str = REGION
self._report_wait_timeout: str = REPORT_WAIT_TIMEOUT
self._profiles: str = PROFILES
self._start_date: str = None
def with_client_id(self, client_id: str) -> "ConfigBuilder":
self._client_id = client_id
return self
def with_client_secret(self, client_secret: str) -> "ConfigBuilder":
self._client_secret = client_secret
return self
def with_access_token(self, access_token: str) -> "ConfigBuilder":
self._access_token = access_token
return self
def with_refresh_token(self, refresh_token: str) -> "ConfigBuilder":
self._refresh_token = refresh_token
return self
def with_region(self, region: str) -> "ConfigBuilder":
self._region = region
return self
def with_report_wait_timeout(self, report_wait_timeout: int) -> "ConfigBuilder":
self._report_wait_timeout = report_wait_timeout
return self
def with_profiles(self, profiles: List[int]) -> "ConfigBuilder":
self._profiles = profiles
return self
def with_start_date(self, start_date: datetime.date) -> "ConfigBuilder":
self._start_date = start_date.isoformat()
return self
def build(self) -> Dict[str, Any]:
config = {
"client_id": self._client_id,
"client_secret": self._client_secret,
"access_token": self._access_token,
"refresh_token": self._refresh_token,
"region": self._region,
"report_wait_timeout": self._report_wait_timeout,
"profiles": self._profiles,
}
if self._start_date:
config["start_date"] = self._start_date
return config
| ConfigBuilder |
python | PyCQA__pylint | tests/functional/ext/docparams/raise/missing_raises_doc_required_exc_inheritance.py | {
"start": 111,
"end": 152
} | class ____(NameError):
pass
| CustomError |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_webagg_core.py | {
"start": 2598,
"end": 3746
} | class ____(backend_bases.TimerBase):
def __init__(self, *args, **kwargs):
self._timer = None
super().__init__(*args, **kwargs)
def _timer_start(self):
import tornado
self._timer_stop()
if self._single:
ioloop = tornado.ioloop.IOLoop.instance()
self._timer = ioloop.add_timeout(
datetime.timedelta(milliseconds=self.interval),
self._on_timer)
else:
self._timer = tornado.ioloop.PeriodicCallback(
self._on_timer,
max(self.interval, 1e-6))
self._timer.start()
def _timer_stop(self):
import tornado
if self._timer is None:
return
elif self._single:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self._timer)
else:
self._timer.stop()
self._timer = None
def _timer_set_interval(self):
# Only stop and restart it if the timer has already been started
if self._timer is not None:
self._timer_stop()
self._timer_start()
| TimerTornado |
python | django-crispy-forms__django-crispy-forms | crispy_forms/layout.py | {
"start": 634,
"end": 3583
} | class ____(TemplateNameMixin):
def __getitem__(self, slice):
return self.fields[slice]
def __setitem__(self, slice, value):
self.fields[slice] = value
def __delitem__(self, slice):
del self.fields[slice]
def __len__(self):
return len(self.fields)
def __getattr__(self, name):
"""
This allows us to access self.fields list methods like append or insert, without
having to declare them one by one
"""
# Check necessary for unpickling, see #107
if "fields" in self.__dict__ and hasattr(self.fields, name):
return getattr(self.fields, name)
else:
return object.__getattribute__(self, name)
def get_field_names(self, index=None):
"""
Returns a list of Pointers. First parameter is the location of the
field, second one the name of the field. Example::
[
Pointer([0,1,2], 'field_name1'),
Pointer([0,3], 'field_name2'),
]
"""
return self.get_layout_objects(str, index=None, greedy=True)
def get_layout_objects(self, *LayoutClasses, index=None, max_level=0, greedy=False):
"""
Returns a list of Pointers pointing to layout objects of any type matching
`LayoutClasses`::
[
Pointer([0,1,2], 'div']),
Pointer([0,3], 'field_name'),
]
:param max_level: An integer that indicates max level depth to reach when
traversing a layout.
:param greedy: Boolean that indicates whether to be greedy. If set, max_level
is skipped.
"""
pointers = []
if index is not None and not isinstance(index, list):
index = [index]
elif index is None:
index = []
str_class = len(LayoutClasses) == 1 and LayoutClasses[0] == str
for i, layout_object in enumerate(self.fields):
if isinstance(layout_object, LayoutClasses):
if str_class:
pointers.append(Pointer(index + [i], layout_object))
else:
pointers.append(Pointer(index + [i], layout_object.__class__.__name__.lower()))
# If it's a layout object and we haven't reached the max depth limit or greedy
# we recursive call
if hasattr(layout_object, "get_field_names") and (len(index) < max_level or greedy):
new_kwargs = {"index": index + [i], "max_level": max_level, "greedy": greedy}
pointers = pointers + layout_object.get_layout_objects(*LayoutClasses, **new_kwargs)
return pointers
def get_rendered_fields(self, form, context, template_pack=TEMPLATE_PACK, **kwargs):
return SafeString(
"".join(render_field(field, form, context, template_pack=template_pack, **kwargs) for field in self.fields)
)
| LayoutObject |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_django/DJ008.py | {
"start": 2819,
"end": 3159
} | class ____(models.Model):
new_field = models.CharField(max_length=10)
class Meta:
abstract = False
def __str__(self):
return self.new_field
@property
def my_brand_new_property(self):
return 1
def my_beautiful_method(self):
return 2
# Subclass with its own __str__
| AbstractTestModel5 |
python | getsentry__sentry | src/sentry/services/nodestore/django/models.py | {
"start": 203,
"end": 676
} | class ____(BaseModel):
__relocation_scope__ = RelocationScope.Excluded
id = models.CharField(max_length=40, primary_key=True)
# TODO(dcramer): this being pickle and not JSON has the ability to cause
# hard errors as it accepts other serialization than native JSON
data = models.TextField()
timestamp = models.DateTimeField(default=timezone.now, db_index=True)
__repr__ = sane_repr("timestamp")
class Meta:
app_label = "nodestore"
| Node |
python | ApeWorX__ape | src/ape/managers/converters.py | {
"start": 4727,
"end": 6047
} | class ____(ConverterAPI):
"""
A converter that converts an integer address to an :class:`~ape.types.address.AddressType`.
"""
_cache: dict[int, Union[AddressType, bool]] = {}
def is_convertible(self, value: Any) -> bool:
if not isinstance(value, int):
return False
elif isinstance(self._cache.get(value), str):
return True
val = self._convert(value)
self._cache[value] = val
return isinstance(val, str)
def convert(self, value: Any) -> AddressType:
err_msg = f"Failed to convert '{value}' to 'AddressType'."
if cached_val := self._cache.get(value):
if not isinstance(cached_val, str):
# Shouldn't get here in normal execution.
raise ConversionError(err_msg)
return cached_val
# Shouldn't get here in normal execution.
res = self._convert(value)
self._cache[value] = res
if not isinstance(res, str):
raise ConversionError(err_msg)
return res
def _convert(self, value: int) -> Union[AddressType, bool]:
try:
val = Address.__eth_pydantic_validate__(value)
except Exception:
return False
return AddressType(to_checksum_address(val))
| IntAddressConverter |
python | doocs__leetcode | solution/0300-0399/0395.Longest Substring with At Least K Repeating Characters/Solution.py | {
"start": 0,
"end": 697
} | class ____:
def longestSubstring(self, s: str, k: int) -> int:
def dfs(l, r):
cnt = Counter(s[l : r + 1])
split = next((c for c, v in cnt.items() if v < k), '')
if not split:
return r - l + 1
i = l
ans = 0
while i <= r:
while i <= r and s[i] == split:
i += 1
if i >= r:
break
j = i
while j <= r and s[j] != split:
j += 1
t = dfs(i, j - 1)
ans = max(ans, t)
i = j
return ans
return dfs(0, len(s) - 1)
| Solution |
python | astropy__astropy | astropy/io/votable/tests/test_vo.py | {
"start": 13166,
"end": 24727
} | class ____:
def setup_class(self):
with np.errstate(over="ignore"):
# https://github.com/astropy/astropy/issues/13341
self.votable = parse(get_pkg_data_filename("data/regression.xml"))
self.table = self.votable.get_first_table()
self.array = self.table.array
self.mask = self.table.array.mask
def test_string_test(self):
assert issubclass(self.array["string_test"].dtype.type, np.object_)
assert_array_equal(
self.array["string_test"],
["String & test", "String & test", "XXXX", "", ""],
)
def test_fixed_string_test(self):
assert issubclass(self.array["string_test_2"].dtype.type, np.str_)
assert_array_equal(
self.array["string_test_2"], ["Fixed stri", "0123456789", "XXXX", "", ""]
)
def test_unicode_test(self):
assert issubclass(self.array["unicode_test"].dtype.type, np.object_)
assert_array_equal(
self.array["unicode_test"],
["Ceçi n'est pas un pipe", "வணக்கம்", "XXXX", "", ""],
)
def test_fixed_unicode_test(self):
assert issubclass(self.array["fixed_unicode_test"].dtype.type, np.str_)
assert_array_equal(
self.array["fixed_unicode_test"],
["Ceçi n'est", "வணக்கம்", "0123456789", "", ""],
)
def test_unsignedByte(self):
assert issubclass(self.array["unsignedByte"].dtype.type, np.uint8)
assert_array_equal(self.array["unsignedByte"], [128, 255, 0, 255, 255])
assert not np.any(self.mask["unsignedByte"])
def test_short(self):
assert issubclass(self.array["short"].dtype.type, np.int16)
assert_array_equal(self.array["short"], [4096, 32767, -4096, 32767, 32767])
assert not np.any(self.mask["short"])
def test_int(self):
assert issubclass(self.array["int"].dtype.type, np.int32)
assert_array_equal(
self.array["int"], [268435456, 2147483647, -268435456, 268435455, 123456789]
)
assert_array_equal(self.mask["int"], [False, False, False, False, True])
def test_long(self):
assert issubclass(self.array["long"].dtype.type, np.int64)
assert_array_equal(
self.array["long"],
[
922337203685477,
123456789,
-1152921504606846976,
1152921504606846975,
123456789,
],
)
assert_array_equal(self.mask["long"], [False, True, False, False, True])
def test_double(self):
assert issubclass(self.array["double"].dtype.type, np.float64)
assert_array_equal(
self.array["double"], [8.9990234375, 0.0, np.inf, np.nan, -np.inf]
)
assert_array_equal(self.mask["double"], [False, False, False, True, False])
def test_float(self):
assert issubclass(self.array["float"].dtype.type, np.float32)
assert_array_equal(self.array["float"], [1.0, 0.0, np.inf, np.inf, np.nan])
assert_array_equal(self.mask["float"], [False, False, False, False, True])
def test_array(self):
assert issubclass(self.array["array"].dtype.type, np.object_)
match = [
[],
[[42, 32], [12, 32]],
[[12, 34], [56, 78], [87, 65], [43, 21]],
[[-1, 23]],
[[31, -1]],
]
for a, b in zip(self.array["array"], match):
# assert issubclass(a.dtype.type, np.int64)
# assert a.shape[1] == 2
for a0, b0 in zip(a, b):
assert issubclass(a0.dtype.type, np.int64)
assert_array_equal(a0, b0)
assert self.array.data["array"][3].mask[0][0]
assert self.array.data["array"][4].mask[0][1]
def test_bit(self):
assert issubclass(self.array["bit"].dtype.type, np.bool_)
assert_array_equal(self.array["bit"], [True, False, True, False, False])
def test_bit_mask(self):
assert_array_equal(self.mask["bit"], [False, False, False, False, True])
def test_bitarray(self):
assert issubclass(self.array["bitarray"].dtype.type, np.bool_)
assert self.array["bitarray"].shape == (5, 3, 2)
assert_array_equal(
self.array["bitarray"],
[
[[True, False], [True, True], [False, True]],
[[False, True], [False, False], [True, True]],
[[True, True], [True, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
],
)
def test_bitarray_mask(self):
assert_array_equal(
self.mask["bitarray"],
[
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[False, False], [False, False], [False, False]],
[[True, True], [True, True], [True, True]],
[[True, True], [True, True], [True, True]],
],
)
def test_bitvararray(self):
assert issubclass(self.array["bitvararray"].dtype.type, np.object_)
match = [
[True, True, True],
[False, False, False, False, False],
[True, False, True, False, True],
[],
[],
]
for a, b in zip(self.array["bitvararray"], match):
assert_array_equal(a, b)
match_mask = [
[False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
False,
False,
]
for a, b in zip(self.array["bitvararray"], match_mask):
assert_array_equal(a.mask, b)
def test_bitvararray2(self):
assert issubclass(self.array["bitvararray2"].dtype.type, np.object_)
match = [
[],
[
[[False, True], [False, False], [True, False]],
[[True, False], [True, False], [True, False]],
],
[[[True, True], [True, True], [True, True]]],
[],
[],
]
for a, b in zip(self.array["bitvararray2"], match):
for a0, b0 in zip(a, b):
assert a0.shape == (3, 2)
assert issubclass(a0.dtype.type, np.bool_)
assert_array_equal(a0, b0)
def test_floatComplex(self):
assert issubclass(self.array["floatComplex"].dtype.type, np.complex64)
assert_array_equal(
self.array["floatComplex"],
[np.nan + 0j, 0 + 0j, 0 + -1j, np.nan + 0j, np.nan + 0j],
)
assert_array_equal(self.mask["floatComplex"], [True, False, False, True, True])
def test_doubleComplex(self):
assert issubclass(self.array["doubleComplex"].dtype.type, np.complex128)
assert_array_equal(
self.array["doubleComplex"],
[np.nan + 0j, 0 + 0j, 0 + -1j, np.nan + (np.inf * 1j), np.nan + 0j],
)
assert_array_equal(self.mask["doubleComplex"], [True, False, False, True, True])
def test_doubleComplexArray(self):
assert issubclass(self.array["doubleComplexArray"].dtype.type, np.object_)
assert [len(x) for x in self.array["doubleComplexArray"]] == [0, 2, 2, 0, 0]
def test_boolean(self):
assert issubclass(self.array["boolean"].dtype.type, np.bool_)
assert_array_equal(self.array["boolean"], [True, False, True, False, False])
def test_boolean_mask(self):
assert_array_equal(self.mask["boolean"], [False, False, False, False, True])
def test_boolean_array(self):
assert issubclass(self.array["booleanArray"].dtype.type, np.bool_)
assert_array_equal(
self.array["booleanArray"],
[
[True, True, True, True],
[True, True, False, True],
[True, True, False, True],
[False, False, False, False],
[False, False, False, False],
],
)
def test_boolean_array_mask(self):
assert_array_equal(
self.mask["booleanArray"],
[
[False, False, False, False],
[False, False, False, False],
[False, False, True, False],
[True, True, True, True],
[True, True, True, True],
],
)
def test_nulls(self):
assert_array_equal(self.array["nulls"], [0, -9, 2, -9, -9])
assert_array_equal(self.mask["nulls"], [False, True, False, True, True])
def test_nulls_array(self):
assert_array_equal(
self.array["nulls_array"],
[
[[-9, -9], [-9, -9]],
[[0, 1], [2, 3]],
[[-9, 0], [-9, 1]],
[[0, -9], [1, -9]],
[[-9, -9], [-9, -9]],
],
)
assert_array_equal(
self.mask["nulls_array"],
[
[[True, True], [True, True]],
[[False, False], [False, False]],
[[True, False], [True, False]],
[[False, True], [False, True]],
[[True, True], [True, True]],
],
)
def test_double_array(self):
assert issubclass(self.array["doublearray"].dtype.type, np.object_)
assert len(self.array["doublearray"][0]) == 0
assert_array_equal(
self.array["doublearray"][1], [0, 1, np.inf, -np.inf, np.nan, 0, -1]
)
assert_array_equal(
self.array.data["doublearray"][1].mask,
[False, False, False, False, False, False, True],
)
def test_bit_array2(self):
assert_array_equal(
self.array["bitarray2"][0],
[
True,
True,
True,
True,
False,
False,
False,
False,
True,
True,
True,
True,
False,
False,
False,
False,
],
)
def test_bit_array2_mask(self):
assert not np.any(self.mask["bitarray2"][0])
assert np.all(self.mask["bitarray2"][1:])
def test_get_coosys_by_id(self):
coosys = self.votable.get_coosys_by_id("J2000")
assert coosys.system == "eq_FK5"
def test_get_field_by_utype(self):
fields = list(self.votable.get_fields_by_utype("myint"))
assert fields[0].name == "int"
assert fields[0].values.min == -1000
def test_get_info_by_id(self):
info = self.votable.get_info_by_id("QUERY_STATUS")
assert info.value == "OK"
if self.votable.version != "1.1":
info = self.votable.get_info_by_id("ErrorInfo")
assert info.value == "One might expect to find some INFO here, too..."
def test_repr(self):
assert "3 tables" in repr(self.votable)
assert (
repr(list(self.votable.iter_fields_and_params())[0])
== '<PARAM ID="awesome" arraysize="*" datatype="float" '
'name="INPUT" unit="deg" value="[0.0 0.0]"/>'
)
# Smoke test
repr(list(self.votable.iter_groups()))
# Resource
assert repr(self.votable.resources) == "[</>]"
# Table
assert repr(self.table).startswith("<VOTable")
| TestParse |
python | sqlalchemy__sqlalchemy | test/engine/test_reconnect.py | {
"start": 11028,
"end": 26179
} | class ____(fixtures.TestBase):
def setup_test(self):
self.dbapi = MockDBAPI()
self.db = testing_engine(
"postgresql+psycopg2://foo:bar@localhost/test",
options=dict(module=self.dbapi, _initialize=False),
)
self.mock_connect = call(
host="localhost", password="bar", user="foo", dbname="test"
)
# monkeypatch disconnect checker
self.db.dialect.is_disconnect = lambda e, conn, cursor: isinstance(
e, MockDisconnect
)
def teardown_test(self):
self.dbapi.dispose()
def test_reconnect(self):
"""test that an 'is_disconnect' condition will invalidate the
connection, and additionally dispose the previous connection
pool and recreate."""
# make a connection
conn = self.db.connect()
# connection works
conn.execute(select(1))
# create a second connection within the pool, which we'll ensure
# also goes away
conn2 = self.db.connect()
conn2.close()
# two connections opened total now
assert len(self.dbapi.connections) == 2
# set it to fail
self.dbapi.shutdown()
# force windows monotonic timer to definitely increment
time.sleep(0.5)
# close on DBAPI connection occurs here, as it is detected
# as invalid.
assert_raises(tsa.exc.DBAPIError, conn.execute, select(1))
# assert was invalidated
assert not conn.closed
assert conn.invalidated
# close shouldn't break
conn.close()
# ensure one connection closed...
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], []],
)
conn = self.db.connect()
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], [call()], []],
)
conn.execute(select(1))
conn.close()
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], [call()], []],
)
def test_invalidate_on_execute_trans(self):
conn = self.db.connect()
trans = conn.begin()
self.dbapi.shutdown()
assert_raises(tsa.exc.DBAPIError, conn.execute, select(1))
eq_([c.close.mock_calls for c in self.dbapi.connections], [[call()]])
assert not conn.closed
assert conn.invalidated
assert trans.is_active
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select(1),
)
assert trans.is_active
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
trans.commit,
)
# now it's inactive...
assert not trans.is_active
# but still associated with the connection
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select(1),
)
assert not trans.is_active
# still can't commit... error stays the same
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
trans.commit,
)
trans.rollback()
assert not trans.is_active
conn.execute(select(1))
assert not conn.invalidated
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], []],
)
def test_invalidate_on_commit_trans(self):
conn = self.db.connect()
trans = conn.begin()
self.dbapi.shutdown("commit")
assert_raises(tsa.exc.DBAPIError, trans.commit)
assert not conn.closed
assert conn.invalidated
assert not trans.is_active
# error stays consistent
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select(1),
)
assert not trans.is_active
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
trans.commit,
)
assert not trans.is_active
assert_raises_message(
tsa.exc.PendingRollbackError,
"Can't reconnect until invalid transaction is rolled back",
conn.execute,
select(1),
)
assert not trans.is_active
trans.rollback()
assert not trans.is_active
conn.execute(select(1))
assert not conn.invalidated
def test_commit_fails_contextmanager(self):
# this test is also performed in test/engine/test_transaction.py
# using real connections
conn = self.db.connect()
def go():
with conn.begin():
self.dbapi.shutdown("commit_no_disconnect")
assert_raises(tsa.exc.DBAPIError, go)
assert not conn.in_transaction()
def test_commit_fails_trans(self):
# this test is also performed in test/engine/test_transaction.py
# using real connections
conn = self.db.connect()
trans = conn.begin()
self.dbapi.shutdown("commit_no_disconnect")
assert_raises(tsa.exc.DBAPIError, trans.commit)
assert not conn.closed
assert not conn.invalidated
assert not trans.is_active
# error stays consistent
assert_raises_message(
tsa.exc.PendingRollbackError,
r"Can't reconnect until invalid transaction is rolled back. "
r"Please rollback\(\) fully before proceeding",
conn.execute,
select(1),
)
assert not trans.is_active
assert_raises_message(
tsa.exc.PendingRollbackError,
r"Can't reconnect until invalid transaction is rolled back. "
r"Please rollback\(\) fully before proceeding",
trans.commit,
)
assert not trans.is_active
assert_raises_message(
tsa.exc.PendingRollbackError,
r"Can't reconnect until invalid transaction is rolled back. "
r"Please rollback\(\) fully before proceeding",
conn.execute,
select(1),
)
assert not trans.is_active
trans.rollback()
assert not trans.is_active
conn.execute(select(1))
assert not conn.invalidated
def test_invalidate_dont_call_finalizer(self):
conn = self.db.connect()
finalizer = mock.Mock()
conn.connection._connection_record.finalize_callback.append(finalizer)
conn.invalidate()
assert conn.invalidated
eq_(finalizer.call_count, 0)
def test_conn_reusable(self):
conn = self.db.connect()
conn.execute(select(1))
eq_(self.dbapi.connect.mock_calls, [self.mock_connect])
self.dbapi.shutdown()
with expect_raises(tsa.exc.DBAPIError):
conn.execute(select(1))
assert not conn.closed
assert conn.invalidated
eq_([c.close.mock_calls for c in self.dbapi.connections], [[call()]])
# trans was autobegin. they have to call rollback
with expect_raises(tsa.exc.PendingRollbackError):
conn.execute(select(1))
# ok
conn.rollback()
# now we are good
# test reconnects
conn.execute(select(1))
assert not conn.invalidated
eq_(
[c.close.mock_calls for c in self.dbapi.connections],
[[call()], []],
)
def test_invalidated_close(self):
conn = self.db.connect()
self.dbapi.shutdown()
assert_raises(tsa.exc.DBAPIError, conn.execute, select(1))
conn.close()
assert conn.closed
assert not conn.invalidated
assert_raises_message(
tsa.exc.ResourceClosedError,
"This Connection is closed",
conn.execute,
select(1),
)
def test_noreconnect_execute(self):
conn = self.db.connect()
self.dbapi.shutdown("execute_no_disconnect")
# raises error
assert_raises_message(
tsa.exc.DBAPIError,
"something broke on execute but we didn't lose the connection",
conn.execute,
select(1),
)
assert not conn.closed
assert not conn.invalidated
conn.close()
def test_noreconnect_rollback(self):
# this test changes in 2.x due to autobegin.
conn = self.db.connect()
conn.execute(select(1))
self.dbapi.shutdown("rollback_no_disconnect")
# previously, running a select() here which would fail would then
# trigger autorollback which would also fail, this is not the
# case now as autorollback does not normally occur
with expect_raises_message(
tsa.exc.DBAPIError,
r"something broke on rollback but we didn't lose the connection",
):
conn.rollback()
assert not conn.closed
assert not conn.invalidated
conn.close()
assert_raises_message(
tsa.exc.ResourceClosedError,
"This Connection is closed",
conn.execute,
select(1),
)
def test_reconnect_on_reentrant(self):
conn = self.db.connect()
conn.execute(select(1))
assert len(self.dbapi.connections) == 1
self.dbapi.shutdown("rollback")
assert_raises_message(
tsa.exc.DBAPIError,
"Lost the DB connection on rollback",
conn.rollback,
)
assert not conn.closed
assert conn.invalidated
def test_check_disconnect_no_cursor(self):
conn = self.db.connect()
result = conn.execute(select(1))
result.cursor.close()
conn.close()
assert_raises_message(
tsa.exc.DBAPIError, "cursor closed", list, result
)
def test_dialect_initialize_once(self):
from sqlalchemy.engine.url import URL
from sqlalchemy.engine.default import DefaultDialect
dbapi = self.dbapi
class MyURL(URL):
def _get_entrypoint(self):
return Dialect
def get_dialect(self):
return Dialect
class Dialect(DefaultDialect):
initialize = Mock()
engine = create_engine(MyURL.create("foo://"), module=dbapi)
engine.connect()
# note that the dispose() call replaces the old pool with a new one;
# this is to test that even though a single pool is using
# dispatch.exec_once(), by replacing the pool with a new one, the event
# would normally fire again onless once=True is set on the original
# listen as well.
engine.dispose()
engine.connect()
eq_(Dialect.initialize.call_count, 1)
def test_dialect_initialize_retry_if_exception(self):
from sqlalchemy.engine.url import URL
from sqlalchemy.engine.default import DefaultDialect
dbapi = self.dbapi
class MyURL(URL):
def _get_entrypoint(self):
return Dialect
def get_dialect(self):
return Dialect
class Dialect(DefaultDialect):
initialize = Mock()
# note that the first_connect hook is only invoked when the pool
# makes a new DBAPI connection, and not when it checks out an existing
# connection. So there is a dependency here that if the initializer
# raises an exception, the pool-level connection attempt is also
# failed, meaning no DBAPI connection is pooled. If the first_connect
# exception raise did not prevent the connection from being pooled,
# there could be the case where the pool could return that connection
# on a subsequent attempt without initialization having proceeded.
Dialect.initialize.side_effect = TypeError
engine = create_engine(MyURL.create("foo://"), module=dbapi)
assert_raises(TypeError, engine.connect)
eq_(Dialect.initialize.call_count, 1)
is_true(engine.pool._pool.empty())
assert_raises(TypeError, engine.connect)
eq_(Dialect.initialize.call_count, 2)
is_true(engine.pool._pool.empty())
engine.dispose()
assert_raises(TypeError, engine.connect)
eq_(Dialect.initialize.call_count, 3)
is_true(engine.pool._pool.empty())
Dialect.initialize.side_effect = None
conn = engine.connect()
eq_(Dialect.initialize.call_count, 4)
conn.close()
is_false(engine.pool._pool.empty())
conn = engine.connect()
eq_(Dialect.initialize.call_count, 4)
conn.close()
is_false(engine.pool._pool.empty())
engine.dispose()
conn = engine.connect()
eq_(Dialect.initialize.call_count, 4)
conn.close()
is_false(engine.pool._pool.empty())
def test_invalidate_conn_w_contextmanager_interrupt(self):
# test [ticket:3803]
pool = self.db.pool
conn = self.db.connect()
self.dbapi.shutdown("interrupt")
def go():
with conn.begin():
conn.execute(select(1))
assert_raises(MockExitIsh, go)
assert conn.invalidated
eq_(pool._invalidate_time, 0) # pool not invalidated
conn.execute(select(1))
assert not conn.invalidated
def test_invalidate_conn_interrupt_nodisconnect_workaround(self):
# test [ticket:3803] workaround for no disconnect on keyboard interrupt
@event.listens_for(self.db, "handle_error")
def cancel_disconnect(ctx):
ctx.is_disconnect = False
pool = self.db.pool
conn = self.db.connect()
self.dbapi.shutdown("interrupt_dont_break")
def go():
with conn.begin():
conn.execute(select(1))
assert_raises(MockExitIsh, go)
assert not conn.invalidated
eq_(pool._invalidate_time, 0) # pool not invalidated
conn.execute(select(1))
assert not conn.invalidated
def test_invalidate_conn_w_contextmanager_disconnect(self):
# test [ticket:3803] change maintains old behavior
pool = self.db.pool
conn = self.db.connect()
self.dbapi.shutdown("execute")
def go():
with conn.begin():
conn.execute(select(1))
assert_raises(exc.DBAPIError, go) # wraps a MockDisconnect
assert conn.invalidated
ne_(pool._invalidate_time, 0) # pool is invalidated
conn.execute(select(1))
assert not conn.invalidated
| MockReconnectTest |
python | redis__redis-py | redis/event.py | {
"start": 4432,
"end": 5191
} | class ____:
"""
Event that will be fired after pooled connection instances was created.
"""
def __init__(
self,
connection_pools: List,
client_type: ClientType,
credential_provider: Optional[CredentialProvider] = None,
):
self._connection_pools = connection_pools
self._client_type = client_type
self._credential_provider = credential_provider
@property
def connection_pools(self):
return self._connection_pools
@property
def client_type(self) -> ClientType:
return self._client_type
@property
def credential_provider(self) -> Union[CredentialProvider, None]:
return self._credential_provider
| AfterPooledConnectionsInstantiationEvent |
python | lepture__authlib | authlib/integrations/django_oauth2/resource_protector.py | {
"start": 388,
"end": 1958
} | class ____(_ResourceProtector):
def acquire_token(self, request, scopes=None, **kwargs):
"""A method to acquire current valid token with the given scope.
:param request: Django HTTP request instance
:param scopes: a list of scope values
:return: token object
"""
req = DjangoJsonRequest(request)
# backward compatibility
kwargs["scopes"] = scopes
for claim in kwargs:
if isinstance(kwargs[claim], str):
kwargs[claim] = [kwargs[claim]]
token = self.validate_request(request=req, **kwargs)
token_authenticated.send(sender=self.__class__, token=token)
return token
def __call__(self, scopes=None, optional=False, **kwargs):
claims = kwargs
# backward compatibility
claims["scopes"] = scopes
def wrapper(f):
@functools.wraps(f)
def decorated(request, *args, **kwargs):
try:
token = self.acquire_token(request, **claims)
request.oauth_token = token
except MissingAuthorizationError as error:
if optional:
request.oauth_token = None
return f(request, *args, **kwargs)
return return_error_response(error)
except OAuth2Error as error:
return return_error_response(error)
return f(request, *args, **kwargs)
return decorated
return wrapper
| ResourceProtector |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_skew.py | {
"start": 593,
"end": 1751
} | class ____(maxis.XTick):
def draw(self, renderer):
with ExitStack() as stack:
for artist in [self.gridline, self.tick1line, self.tick2line,
self.label1, self.label2]:
stack.callback(artist.set_visible, artist.get_visible())
needs_lower = transforms.interval_contains(
self.axes.lower_xlim, self.get_loc())
needs_upper = transforms.interval_contains(
self.axes.upper_xlim, self.get_loc())
self.tick1line.set_visible(
self.tick1line.get_visible() and needs_lower)
self.label1.set_visible(
self.label1.get_visible() and needs_lower)
self.tick2line.set_visible(
self.tick2line.get_visible() and needs_upper)
self.label2.set_visible(
self.label2.get_visible() and needs_upper)
super().draw(renderer)
def get_view_interval(self):
return self.axes.xaxis.get_view_interval()
# This class exists to provide two separate sets of intervals to the tick,
# as well as create instances of the custom tick
| SkewXTick |
python | google__jax | jax/_src/pallas/fuser/block_spec.py | {
"start": 21088,
"end": 66822
} | class ____(Protocol):
def __call__(
self,
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec | tuple[pallas_core.BlockSpec, ...],
**params: Any,
) -> Sequence[pallas_core.BlockSpec]:
...
def register_pull_block_spec_rule(
prim: core.Primitive,
) -> Callable[[PullBlockSpecRuleFn], PullBlockSpecRuleFn]:
def wrapper(
f: PullBlockSpecRuleFn,
) -> PullBlockSpecRuleFn:
pull_block_spec_rules[prim] = f
return f
return wrapper
# Primitive rule implementations
def _eltwise_eval_rule(prim, ctx, x, **params):
del ctx
return prim.bind(x, **params)
def _eltwise_pull_rule(
prim: core.Primitive,
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
**params,
) -> Sequence[pallas_core.BlockSpec]:
del prim, ctx, params
return [block_spec]
def _eltwise_usage_rule(
prim: core.Primitive, ctx: UsageRuleContext, used_out: set[Usage], **params
) -> Sequence[set[Usage]]:
del ctx, prim, params
return [used_out]
def _pull_bcast_block_spec(
block_spec: pallas_core.BlockSpec, i: int
) -> pallas_core.BlockSpec:
def new_index_map(*args):
idx = block_spec.index_map(*args)
assert len(idx) == len(block_spec.block_shape)
idx = util.tuple_update(idx, i, 0)
return idx
if block_spec.block_shape[i] is None:
return pallas_core.BlockSpec(block_spec.block_shape, new_index_map)
# TODO(wdvi): This is a hack needed since lowering rules require block shape
# to contain either all pl.Element or none
bcast_dim_block_shape = 1
if isinstance(block_spec.block_shape[i], pallas_core.Element):
bcast_dim_block_shape = pallas_core.Element(1)
new_block_shape = util.tuple_update( # pytype: disable=wrong-arg-types
block_spec.block_shape, i, bcast_dim_block_shape
)
return pallas_core.BlockSpec(new_block_shape, new_index_map)
def _push_bcast_block_spec(
block_spec: pallas_core.BlockSpec,
i: int,
size: int,
) -> pallas_core.BlockSpec:
bcast_dim_block_shape = size
if isinstance(block_spec.block_shape[i], pallas_core.Element):
bcast_dim_block_shape = pallas_core.Element(size)
new_block_shape = util.tuple_update( # pytype: disable=wrong-arg-types
block_spec.block_shape, i, bcast_dim_block_shape
)
return pallas_core.BlockSpec(new_block_shape, block_spec.index_map)
def _binop_usage_rule(prim, ctx, used_out: set[Usage]):
del prim
if used_out == {Usage.SCALAR_PREFETCH}:
return [{Usage.SCALAR_PREFETCH}, {Usage.SCALAR_PREFETCH}]
elif used_out == {Usage.REGULAR}:
usage = [{Usage.REGULAR} for _ in ctx.avals_in]
return usage
else:
return [set()] * len(ctx.avals_in)
def _binop_eval_rule(prim, ctx, x, y):
del ctx
return prim.bind(x, y)
def _binop_pull_rule(prim, ctx: PullRuleContext, block_spec):
l_block_spec = block_spec
r_block_spec = block_spec
left_aval, right_aval = ctx.avals_in
assert isinstance(left_aval, core.ShapedArray)
assert isinstance(right_aval, core.ShapedArray)
@ctx.set_eval_function
def _eval_function(_, x, y):
sp_index = 0
if x is None:
x = ctx.scalar_prefetch_fn()[sp_index]
sp_index += 1
if y is None:
y = ctx.scalar_prefetch_fn()[sp_index]
return prim.bind(x, y)
if not right_aval.shape:
return [block_spec, pallas_core.no_block_spec]
if not left_aval.shape:
return [pallas_core.no_block_spec, block_spec]
for i, (l, r) in enumerate(
zip(left_aval.shape, right_aval.shape, strict=True)
):
if l == 1 and r != 1:
l_block_spec = _pull_bcast_block_spec(l_block_spec, i)
if r == 1 and l != 1:
r_block_spec = _pull_bcast_block_spec(r_block_spec, i)
return [l_block_spec, r_block_spec]
def register_default_eval_rule(prim: core.Primitive):
def default_rule(ctx, *args, **params):
assert all(bs is pallas_core.no_block_spec for bs in ctx.out_block_specs)
return prim.bind(*args, **params)
register_eval_rule(prim)(default_rule)
def register_binop_rule(prim: core.Primitive):
register_pull_block_spec_rule(prim)(functools.partial(_binop_pull_rule, prim))
register_usage_rule(prim)(functools.partial(_binop_usage_rule, prim))
register_eval_rule(prim)(functools.partial(_binop_eval_rule, prim))
register_default_eval_rule(state_primitives.get_p)
register_binop_rule(lax.mul_p)
register_binop_rule(lax.add_p)
register_binop_rule(lax.sub_p)
register_binop_rule(lax.div_p)
register_binop_rule(lax.max_p)
register_binop_rule(lax.lt_p)
register_binop_rule(lax.le_p)
register_binop_rule(lax.eq_p)
register_binop_rule(lax.gt_p)
register_binop_rule(lax.ge_p)
register_binop_rule(lax.or_p)
register_binop_rule(lax.xor_p)
register_binop_rule(lax.and_p)
register_binop_rule(lax.shift_right_logical_p)
register_binop_rule(ad_util.add_any_p)
register_binop_rule(lax.pow_p)
@register_eval_rule(lax.select_n_p)
def _select_n_eval_rule(ctx: KernelEvalContext, *args):
return jax.lax.select_n(*args)
@register_pull_block_spec_rule(lax.select_n_p)
def _select_n_pull_block_spec_rule(
ctx: PullRuleContext, block_spec: pallas_core.BlockSpec
) -> Sequence[pallas_core.BlockSpec]:
in_aval = ctx.avals_in[0]
assert isinstance(in_aval, core.ShapedArray)
if in_aval.shape:
return [block_spec] * len(ctx.avals_in)
return [pallas_core.no_block_spec, *[block_spec] * (len(ctx.avals_in) - 1)]
@register_eval_rule(lax.squeeze_p)
def _squeeze_eval_rule(ctx: KernelEvalContext, x: jax.Array, **params: Any):
del ctx, params
return x
@register_pull_block_spec_rule(lax.squeeze_p)
def _squeeze_block_spec(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
*,
dimensions: tuple[int, ...],
) -> Sequence[pallas_core.BlockSpec]:
del ctx
if block_spec is pallas_core.no_block_spec:
return [pallas_core.no_block_spec]
def new_index_map(*args):
idx = tuple(block_spec.index_map(*args))
assert len(idx) == len(block_spec.block_shape)
for dim in dimensions:
idx = util.tuple_insert(idx, dim, 0)
return idx
new_block_shape = tuple(block_spec.block_shape)
for dim in dimensions:
new_block_shape = util.tuple_insert(new_block_shape, dim, None)
return [pallas_core.BlockSpec(new_block_shape, new_index_map)]
@register_eval_rule(lax.slice_p)
def _slice_eval_rule(ctx, x, **params):
del params
out_block_shape = ctx.out_block_specs[0].block_shape
assert len(x.shape) == sum(
1
for bs in out_block_shape
if not (bs is None or isinstance(bs, pallas_core.Squeezed))
)
return x
def _offset_indexer(
bs: pallas_core.BlockDim | int | None,
indexer,
slice_start,
slice_size,
):
# Short-circuit if the slice start is just at zero.
if isinstance(slice_start, int) and slice_start == 0:
return indexer
match bs:
case None | pallas_core.Squeezed():
return indexer + slice_start
case pallas_core.Element(block_size):
_maybe_static_check(
slice_start % block_size == 0,
f'slice_start is not a multiple of block_size {block_size}',
)
_maybe_static_check(
slice_size % block_size == 0,
f'slice_size is not a multiple of block_size {block_size}',
)
return indexer + slice_start
case int() | pallas_core.Blocked():
block_size = _block_size(bs)
_maybe_static_check(
slice_start % block_size == 0,
f'slice_start is not a multiple of block_size {block_size}',
)
_maybe_static_check(
slice_size % block_size == 0,
f'slice_size is not a multiple of block_size {block_size}',
)
# indexer is a block index so we need to offset it by the block offset.
return indexer + slice_start // block_size
case pallas_core.BoundedSlice(block_size):
assert isinstance(indexer, indexing.Slice)
_maybe_static_check(
indexer.start % block_size == 0,
f'slice_start is not a multiple of block_size {block_size}',
)
_maybe_static_check(
indexer.size % block_size == 0,
f'slice_size is not a multiple of block_size {block_size}',
)
return indexing.ds(indexer.start + slice_start, indexer.size)
case _:
raise ValueError(f'Unsupported block size {bs}')
def _maybe_static_check(pred: bool, msg: str):
# Tries to emit a static error if possible, otherwise falls back to runtime.
from jax.experimental import checkify
if isinstance(pred, jax.Array):
checkify.check(pred, msg, debug=True)
else:
if not pred:
raise ValueError(msg)
@register_pull_block_spec_rule(lax.slice_p)
def _slice_rule(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
*,
start_indices: tuple[int, ...],
limit_indices: tuple[int, ...],
strides: tuple[int, ...] | None,
):
del ctx
if strides is not None and not all(stride == 1 for stride in strides):
raise NotImplementedError('strides are not supported yet')
slice_sizes = tuple(
int(end - start) for start, end in zip(start_indices, limit_indices)
)
# Do some basic checks
for bs, slice_start, slice_size in zip(
block_spec.block_shape, start_indices, slice_sizes
):
match bs:
case None | pallas_core.Squeezed():
continue
case pallas_core.BoundedSlice() | pallas_core.Element():
block_size = _block_size(bs)
# Require that block_size no bigger than the slice.
if block_size > slice_size:
raise ValueError(
f'Block size {block_size} is larger than the slice size'
f' {slice_size}'
)
case _:
block_size = _block_size(bs)
assert slice_start % block_size == 0, (
start_indices,
block_spec.block_shape,
)
assert slice_size % block_size == 0, (
slice_sizes,
block_spec.block_shape,
)
def new_index_map(*args):
idx = block_spec.index_map(*args)
assert len(idx) == len(block_spec.block_shape)
idx = tuple(
_offset_indexer(bs, i, start, size)
for bs, i, start, size in zip(
block_spec.block_shape, idx, start_indices, slice_sizes, strict=True
)
)
return idx
return [pallas_core.BlockSpec(block_spec.block_shape, new_index_map)]
@register_usage_rule(lax.dynamic_slice_p)
def _dynamic_slice_usage_rule(ctx, used_out: set[Usage], **params):
del params
if used_out == {Usage.SCALAR_PREFETCH}:
raise NotImplementedError('scalar prefetch not supported yet')
elif used_out == {Usage.REGULAR}:
usage = [used_out] + [{Usage.SCALAR_PREFETCH}] * (len(ctx.avals_in) - 1)
return usage
else:
return [set()] * len(ctx.avals_in)
@register_eval_rule(lax.dynamic_slice_p)
def _dynamic_slice_eval_rule(ctx, x, *args, **params):
del ctx, params
return x
@register_pull_block_spec_rule(lax.dynamic_slice_p)
def _dynamic_slice_rule(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
*,
slice_sizes: tuple[int, ...],
):
def new_index_map(*args):
slice_starts = ctx.scalar_prefetch_fn()
if len(slice_starts) != len(block_spec.block_shape):
raise ValueError(
f'Expected {len(block_spec.block_shape)} slice starts, got'
f' {len(slice_starts)}'
)
idx = block_spec.index_map(*args)
assert len(idx) == len(block_spec.block_shape)
# Once we have the indices, we need to offset them by the dynamic slice
# indices. The dynamic slice indices index the full array. For example,
# let's say we have a [l, m, n] array and are provided 3 dynamic slice
# start indices [i, j, k] with sizes [s_l, s_m, s_n]. To perform the slice,
# we need to compute the indices of the block that correspond to that slice
# in the [l, m, n] array. If we have block sizes [b_l, b_m, b_n], we require
# that i % b_l == 0, j % b_m == 0, k % b_n == 0 and the slice sizes are
# multiples of the block sizes. The indices of the block that correspond to
# the slice are then given by (i // b_l, j // b_m, k // b_n).
# We then add these block indices to block indices produced by the index
# map
block_indices = tuple(
_offset_indexer(s, i, start, size)
for i, s, start, size in zip(
idx, block_spec.block_shape, slice_starts, slice_sizes, strict=True
)
)
return block_indices
new_block_spec = pallas_core.BlockSpec(block_spec.block_shape, new_index_map)
return [new_block_spec] + [pallas_core.no_block_spec] * (
len(ctx.avals_in) - 1
)
@register_pull_block_spec_rule(state_primitives.swap_p)
def _swap_pull_rule(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
**kwargs,
):
del ctx, kwargs
# The output and val block spec are the same.
return [block_spec, block_spec]
@register_eval_rule(state_primitives.swap_p)
def _swap_eval_rule(ctx: KernelEvalContext, ref, val, *idx, tree):
indexers = tree_util.tree_unflatten(tree, idx)
ref_aval, _ = ctx.avals_in[:2]
indexers_avals = tree_util.tree_unflatten(tree, ctx.avals_in[2:])
assert hasattr(ref_aval, 'shape')
if len(indexers) > 1:
raise NotImplementedError('swap not supported yet')
if not indexers_avals:
indexer_aval = indexing.NDIndexer.make_trivial_indexer(ref_aval.shape)
else:
indexer_aval = indexers_avals[0]
for idx_aval, size in zip(indexer_aval.indices, ref_aval.shape, strict=True):
if not isinstance(idx_aval, indexing.Slice):
raise NotImplementedError('swap not supported yet')
if not isinstance(idx_aval.start, int):
raise NotImplementedError('swap not supported yet')
if not isinstance(idx_aval.size, int):
raise NotImplementedError('swap not supported yet')
if idx_aval.stride != 1:
raise NotImplementedError('swap not supported yet')
if idx_aval.start != 0:
raise NotImplementedError('swap not supported yet')
if idx_aval.size != size:
raise NotImplementedError('swap not supported yet')
# We have a pure slice so now we can just re-index the ref according to the
# block indices.
block_spec = ctx.out_block_specs[0]
block_idx = ctx.get_out_block_indices()[0]
def _slice(i, b):
if not isinstance(b, int):
raise NotImplementedError('swap not supported yet')
return i if b is None else indexing.ds(i * b, b)
indexer = tuple(
_slice(i, b)
for i, b in zip(block_idx, block_spec.block_shape, strict=True)
)
return ref.swap(val, idx=indexer)
@register_pull_block_spec_rule(state_primitives.get_p)
def _get_pull_rule(
ctx: PullRuleContext, block_spec: pallas_core.BlockSpec, *, tree
):
ref_aval = ctx.avals_in[0]
assert hasattr(ref_aval, 'shape')
indexers_avals = tree_util.tree_unflatten(tree, ctx.avals_in[1:])
if len(indexers_avals) > 1:
raise NotImplementedError('get not supported yet')
if not indexers_avals:
indexer_aval = indexing.NDIndexer.make_trivial_indexer(ref_aval.shape)
else:
indexer_aval = indexers_avals[0]
block_shape_iter = iter(block_spec.block_shape)
block_shape = []
if not all(
bd is None
or isinstance(bd, (int, pallas_core.Blocked, pallas_core.Squeezed))
for bd in block_spec.block_shape
):
raise NotImplementedError('get not supported yet')
for idx_aval, size in zip(indexer_aval.indices, ref_aval.shape, strict=True):
if not isinstance(idx_aval, indexing.Slice):
assert hasattr(idx_aval, 'shape') and not idx_aval.shape
block_shape.append(pallas_core.Squeezed())
continue
if not isinstance(idx_aval.start, int):
raise NotImplementedError('get not supported yet')
if not isinstance(idx_aval.size, int):
raise NotImplementedError('get not supported yet')
if idx_aval.stride != 1:
raise NotImplementedError('get not supported yet')
if idx_aval.start != 0:
raise NotImplementedError('get not supported yet')
if idx_aval.size != size:
raise NotImplementedError('get not supported yet')
bd = next(block_shape_iter)
block_shape.append(_block_size(bd))
assert next(block_shape_iter, None) is None
def new_index_map(*args):
idx = block_spec.index_map(*args)
idx_iter = iter(idx)
indices = tuple(
0
if (bd is None or isinstance(bd, pallas_core.Squeezed))
else next(idx_iter)
for bd in range(len(block_shape))
)
assert next(idx_iter, None) is None
return indices
new_block_spec = pallas_core.BlockSpec(block_shape, new_index_map)
return ([new_block_spec]
+ [pallas_core.no_block_spec] * (len(ctx.avals_in) - 1))
@register_eval_rule(state_primitives.get_p)
def _get_eval_rule(ctx: KernelEvalContext, ref, *idx, tree):
indexers = tree_util.tree_unflatten(tree, idx)
ref_aval = ctx.avals_in[0]
indexers_avals = tree_util.tree_unflatten(tree, ctx.avals_in[1:])
ref_block_spec = ctx.in_block_specs[0]
assert hasattr(ref_aval, 'shape')
if len(indexers) > 1:
raise NotImplementedError('get not supported yet')
if not indexers:
indexer = indexing.NDIndexer.make_trivial_indexer(ref_aval.shape)
indexer_aval = indexer
else:
indexer = indexers[0]
indexer_aval = indexers_avals[0]
block_indexer = []
def _slice(i, b):
match b:
case int():
return indexing.ds(i * b, b)
case pallas_core.Blocked(bs):
return indexing.ds(i * bs, bs)
case pallas_core.Squeezed() | None:
return i
case _:
raise NotImplementedError('get not supported yet')
if ref_block_spec is pallas_core.no_block_spec:
# Short-circuit if the ref is not blocked.
return state_primitives.get_p.bind(ref, *idx, tree=tree)
block_idx_iter = iter(ctx.get_out_block_indices()[0])
for idx_aval, size, idx, bd in zip(
indexer_aval.indices,
ref_aval.shape,
indexer.indices,
ref_block_spec.block_shape,
strict=True,
):
if not isinstance(idx_aval, indexing.Slice):
assert hasattr(idx_aval, 'shape') and not idx_aval.shape, idx_aval
assert bd is None or isinstance(bd, pallas_core.Squeezed)
block_indexer.append(idx)
continue
if not isinstance(idx_aval.start, int):
raise NotImplementedError('get not supported yet')
if not isinstance(idx_aval.size, int):
raise NotImplementedError('get not supported yet')
if idx_aval.stride != 1:
raise NotImplementedError('get not supported yet')
if idx_aval.start != 0:
raise NotImplementedError('get not supported yet')
if idx_aval.size != size:
raise NotImplementedError('get not supported yet')
bidx = next(block_idx_iter)
block_indexer.append(_slice(bidx, bd))
assert next(block_idx_iter, None) is None
return ref.get(idx=tuple(block_indexer))
@register_eval_rule(lax.concatenate_p)
def _concatenate_eval_rule(ctx: KernelEvalContext, *args, dimension):
# We now handle the case where each of the concatenated array dimensions
# divides the block size.
block_spec = ctx.out_block_specs[0]
block_shape = block_spec.block_shape
is_element_block = [isinstance(bd, pallas_core.Element) for bd in block_shape]
if any(is_element_block):
raise NotImplementedError(
'Concatenation with Element indexing is not yet supported.'
)
block_dim = block_shape[dimension]
if block_dim is None:
block_dim = 1
if block_dim == sum(aval.shape[dimension] for aval in ctx.avals_in): # pytype: disable=attribute-error
# Handle special case if the block contains all of the concatenated
# array.
return jax.lax.concatenate(args, dimension=dimension)
num_blocks = []
for aval in ctx.avals_in:
assert isinstance(aval, core.ShapedArray)
if aval.shape[dimension] % block_dim != 0:
raise ValueError(
f'Shape along concat dimension {dimension} must be divisible by the'
f' block shape {block_shape[dimension]} for all children. Got shape'
f' {aval.shape}.'
)
num_blocks.append(aval.shape[dimension] // block_dim)
ends = np.cumsum(num_blocks).astype(np.int32)
starts = np.concatenate(([0], ends[:-1])).astype(np.int32)
block_indices = ctx.get_out_block_indices()[0]
block_idx = block_indices[dimension]
valid_index = 0
for i in range(len(ctx.avals_in)):
start, end = starts[i], ends[i]
is_valid = (start <= block_idx) & (block_idx < end)
valid_index = jax.lax.select(is_valid, i, valid_index)
out_dtype = args[0].dtype
args = [a.astype(jnp.float32) if a.dtype == jnp.bfloat16 else a for a in args]
valid_block = jax.lax.select_n(valid_index, *args)
return valid_block.astype(out_dtype)
@register_pull_block_spec_rule(lax.concatenate_p)
def _concatenate_rule(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
*,
dimension: int,
):
block_shape = block_spec.block_shape
is_element_block = [isinstance(bd, pallas_core.Element) for bd in block_shape]
if any(is_element_block):
raise NotImplementedError(
'Concatenation with Element indexing is not yet supported.'
)
num_blocks = []
block_dim = block_shape[dimension]
if block_dim is None or isinstance(block_dim, pallas_core.Squeezed):
block_dim = 1
if block_dim == sum(aval.shape[dimension] for aval in ctx.avals_in): # pytype: disable=attribute-error
# Handle special case if the block contains all of the concatenated
# array.
new_shapes = [
util.tuple_update( # pytype: disable=wrong-arg-types
block_spec.block_shape, dimension, aval.shape[dimension] # pytype: disable=attribute-error
)
for aval in ctx.avals_in
]
new_block_specs = [
block_spec.replace(block_shape=shape) for shape in new_shapes
]
return new_block_specs
# We now handle the case where each of the concatenated array dimensions
# divides the block size.
for aval in ctx.avals_in:
assert isinstance(aval, core.ShapedArray)
if aval.shape[dimension] % block_dim != 0:
raise ValueError(
f'Shape along concat dimension {dimension} must be divisible by the'
f' block shape {block_shape[dimension]} for all children. Got shape'
f' {aval.shape}.'
)
num_blocks.append(aval.shape[dimension] // block_dim)
ends = np.cumsum(num_blocks).astype(np.int32)
starts = np.concatenate(([0], ends[:-1])).astype(np.int32)
def make_block_spec(child_index: int):
def new_index_map(*args):
idx = block_spec.index_map(*args)
block_idx = idx[dimension]
is_valid = (starts[child_index] <= block_idx) & (
block_idx < ends[child_index]
)
padding_index = jnp.where(
block_idx < starts[child_index], 0, num_blocks[child_index] - 1
)
block_idx = jnp.where(
is_valid, block_idx - starts[child_index], padding_index
)
return util.tuple_update(idx, dimension, block_idx)
return pallas_core.BlockSpec(block_spec.block_shape, new_index_map)
return [make_block_spec(i) for i in range(len(ctx.avals_in))]
@register_usage_rule(lax.broadcast_in_dim_p)
def _broadcast_in_dim_usage_rule(ctx, used_out: set[Usage], **params):
del params
if used_out == {Usage.SCALAR_PREFETCH}:
raise NotImplementedError('scalar prefetch not supported yet')
elif used_out == {Usage.REGULAR}:
return [
{Usage.SCALAR_PREFETCH}
if not ctx.avals_in[0].shape
else {Usage.REGULAR}
]
else:
return [set()]
@register_eval_rule(lax.broadcast_in_dim_p)
def _broadcast_in_dim_eval_rule(
eval_ctx: KernelEvalContext, x, broadcast_dimensions, shape, **params
):
del params # Unused.
in_shape = eval_ctx.avals_in[0].shape # pytype: disable=attribute-error
if in_shape == shape:
# Dummy broadcast
return x
shape = tuple(map(_block_size, eval_ctx.out_block_specs[0].block_shape))
dims = tuple(
d - sum(s is None for s in shape[:d])
for d in broadcast_dimensions
if shape[d] is not None
)
shape = tuple(s for s in shape if s is not None)
return jax.lax.broadcast_in_dim(x, broadcast_dimensions=dims, shape=shape)
@register_pull_block_spec_rule(lax.broadcast_in_dim_p)
def _broadcast_in_dim_pull_rule(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
*,
shape: tuple[int, ...],
broadcast_dimensions: tuple[int, ...],
sharding: jax.sharding.Sharding,
):
del shape, sharding
shape = ctx.avals_in[0].shape # pytype: disable=attribute-error
if not shape:
return [pallas_core.no_block_spec]
def new_index_map(*args):
idx = block_spec.index_map(*args)
return tuple(
0 if (d == 1) else idx[i]
for i, d in zip(broadcast_dimensions, shape, strict=True)
)
new_block_shape = tuple(
b if ((b := block_spec.block_shape[i]) is None) or (d != 1) else 1
for i, d in zip(broadcast_dimensions, shape, strict=True)
)
return [pallas_core.BlockSpec(new_block_shape, new_index_map)]
@register_eval_rule(lax.transpose_p)
def _transpose_eval_rule(
eval_ctx: KernelEvalContext, x, permutation: tuple[int, ...]
):
block_spec = eval_ctx.out_block_specs[0]
block_shape = block_spec.block_shape
block_shape_no_nones = tuple(
bs
for bs in block_shape
if not (bs is None or isinstance(bs, pallas_core.Squeezed))
)
block_dims_iter = iter(range(len(block_shape_no_nones)))
expanded_block_dims = [
None
if (bs is None or isinstance(bs, pallas_core.Squeezed))
else next(block_dims_iter)
for bs in block_shape
]
assert next(block_dims_iter, None) is None
permuted_block_dims = [expanded_block_dims[p] for p in permutation]
new_permutation = [p for p in permuted_block_dims if p is not None]
return jax.lax.transpose(x, permutation=new_permutation)
@register_pull_block_spec_rule(lax.transpose_p)
def _transpose_pull_rule(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
*,
permutation: tuple[int, ...],
):
block_shape = block_spec.block_shape
new_shape = tuple(block_shape[i] for i in permutation)
aval_in = ctx.avals_in[0]
assert isinstance(aval_in, core.ShapedArray)
assert len(block_shape) == len(aval_in.shape)
if set(permutation[-2:]) != {permutation[-1], permutation[-2]}:
raise NotImplementedError(
'Cannot permute last two dimensions with leading dimensions.'
)
def new_index_map(*args):
original_idxs = block_spec.index_map(*args)
return tuple(original_idxs[i] for i in permutation)
return [pallas_core.BlockSpec(new_shape, new_index_map)]
@register_eval_rule(lax.convert_element_type_p)
def _convert_element_type_eval_rule(
eval_ctx: KernelEvalContext, x, new_dtype, **params
):
return jax.lax.convert_element_type(x, new_dtype)
@register_pull_block_spec_rule(lax.convert_element_type_p)
def _convert_element_type_pull_rule(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
*,
new_dtype: jnp.dtype,
weak_type: bool,
sharding: jax.sharding.Sharding,
):
del ctx, new_dtype, weak_type, sharding
return [block_spec]
@register_eval_rule(lax.bitcast_convert_type_p)
def _bitcast_convert_type_eval_rule(eval_ctx: KernelEvalContext, x, new_dtype):
return jax.lax.bitcast_convert_type(x, new_dtype)
@register_pull_block_spec_rule(lax.bitcast_convert_type_p)
def _bitcast_convert_type_pull_rule(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
*,
new_dtype: jnp.dtype,
):
old_dtype = ctx.avals_in[0].dtype # pytype: disable=attribute-error
if old_dtype.itemsize != new_dtype.itemsize:
raise NotImplementedError(
'bitcast_convert_type with different bitwidths not supported yet:'
f' {old_dtype=}, {new_dtype=}'
)
return [block_spec]
@register_eval_rule(prng.random_bits_p)
def _random_bits_eval_rule(eval_ctx: KernelEvalContext, key, bit_width, shape):
del shape
block_spec = eval_ctx.out_block_specs[0]
indices = eval_ctx.get_out_block_indices()[0]
block_shape = block_spec.block_shape
# This is the important part here: we fold in block indices into the key so
# each block gets different random numbers.
for idx in indices:
key = jax.random.fold_in(key, idx)
return prng.random_bits(key, bit_width=bit_width, shape=block_shape)
@register_pull_block_spec_rule(prng.random_bits_p)
def _random_bits_pull_rule(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
**_,
):
del ctx, block_spec
key_block_spec = pallas_core.BlockSpec(
block_shape=None, memory_space=pallas_core.MemorySpace.KEY
)
return [key_block_spec]
@register_eval_rule(prng.random_wrap_p)
def _random_wrap_eval_rule(eval_ctx: KernelEvalContext, arr, *, impl):
del eval_ctx
return jax.random.wrap_key_data(arr, impl=impl)
@register_pull_block_spec_rule(prng.random_wrap_p)
def _random_wrap_pull_rule(
ctx: PullRuleContext, block_spec: pallas_core.BlockSpec, *, impl
):
del ctx, block_spec, impl
return [pallas_core.BlockSpec(block_shape=None)]
@register_eval_rule(lax.iota_p)
def _iota_eval_rule(
eval_ctx: KernelEvalContext, *, dimension, shape, dtype, sharding
):
del sharding
block_spec = eval_ctx.out_block_specs[0]
block_idx = eval_ctx.get_out_block_indices()[0]
assert len(block_idx) == len(shape)
iota_shape = tuple(
_block_size(s) for s in block_spec.block_shape if s is not None
)
dim_ = dimension - sum(
_block_size(s) is None for s in block_spec.block_shape[:dimension]
)
local_iota = jax.lax.broadcasted_iota(dtype, iota_shape, dim_)
return local_iota + block_idx[dimension] * _block_size(
block_spec.block_shape[dimension]
)
@register_pull_block_spec_rule(lax.iota_p)
def _iota_pull_rule(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
*,
dtype: jnp.dtype,
dimension: int,
shape: tuple[int, ...],
sharding: jax.sharding.Sharding,
):
del ctx, sharding, dtype, shape
if block_spec.block_shape[dimension] is None:
raise ValueError(
f'Cannot pull iota along dimension {dimension} with None block size.'
)
return []
def _pattern_match_lanes_to_sublanes_reshape(
aval_in: core.ShapedArray,
aval_out: core.ShapedArray,
) -> bool:
# Pattern matches a reshape of the form (..., n * l) -> (..., n, l)
# where l is a multiple of 128.
*leading_out, last_dim_in = aval_in.shape
*leading_in, second_to_last_dim_out, last_dim = aval_out.shape
if leading_in != leading_out:
return False
if second_to_last_dim_out * last_dim != last_dim_in:
return False
if last_dim % 128 != 0:
return False
return True
@register_pull_block_spec_rule(lax.reshape_p)
def _reshape_pull_rule(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
*,
dimensions: tuple[int, ...] | None,
new_sizes: tuple[int, ...],
sharding: jax.sharding.Sharding,
):
del sharding, new_sizes
if dimensions is not None:
raise NotImplementedError('reshape with None dimensions not supported yet')
aval_in = ctx.avals_in[0]
assert isinstance(aval_in, core.ShapedArray)
aval_out = ctx.avals_out[0]
assert isinstance(aval_out, core.ShapedArray)
block_shape = block_spec.block_shape
shape_in = aval_in.shape
shape_out = aval_out.shape
assert np.prod(shape_in) == np.prod(shape_out)
# Handle merged dims; i.e. (..., m, n, ...) -> (..., m * n, ...).
i = 0
j = 0
merged_dims = []
while i < len(shape_in) and j < len(shape_out):
merged = []
while not merged or np.prod(merged) < shape_out[j]:
merged.append(shape_in[i])
i += 1
if np.prod(merged) > shape_out[j]:
break # Dimension has been split (or something more complex).
merged_dims.append(merged)
j += 1
if (i == len(shape_in)) and np.prod(shape_out[j:]) == 1:
new_block_shape = []
new_grids = []
for d, bd, merged in zip(shape_out, block_shape, merged_dims):
bs = pallas_core.get_block_size(bd)
if len(merged) == 1:
new_grids.append((merged[0] // bs,))
new_block_shape.append(bd if bd is not None else 1)
continue
if not isinstance(bd, (int, pallas_core.Blocked)):
raise NotImplementedError('reshape merge must use `Blocked` block size')
num_blocks = pallas_utils.cdiv(d, bs)
new_block_dims = []
for md in reversed(merged):
if bs % md == 0:
new_block_dims.append(md)
bs //= md
elif md % bs == 0:
new_block_dims.append(bs)
bs = 1
else:
raise NotImplementedError('unsupported reshape merge')
new_block_dims.reverse()
new_block_shape.extend(new_block_dims)
new_grid = [
np.int32(md // pallas_core.get_block_size(bd))
for md, bd in zip(merged, new_block_dims)
]
new_grids.append(tuple(new_grid))
if np.prod(new_grid) != num_blocks:
raise NotImplementedError('reshape merge must maintain grid size')
def new_index_map(*args):
# NOTE: The `zip` will drop indices for any trailing `1` dims.
idxs = (
jnp.unravel_index(idx, new_grid) if len(new_grid) > 1 else (idx,)
for idx, new_grid in zip(block_spec.index_map(*args), new_grids)
)
return sum(idxs, ())
return [pallas_core.BlockSpec(tuple(new_block_shape), new_index_map)]
# Handle the case where we reshape from (..., n * l) -> (..., n, l)
if _pattern_match_lanes_to_sublanes_reshape(aval_in, aval_out):
if not isinstance(block_shape[-1], (int, pallas_core.Blocked)):
raise NotImplementedError(
f'reshape must use Blocked block size on lanes: {block_shape}'
)
if not isinstance(block_shape[-2], (int, pallas_core.Blocked)):
raise NotImplementedError(
f'reshape must use Blocked block size on sublanes: {block_shape}'
)
last_dim = aval_out.shape[-1]
block_sublane_dim, block_lane_dim = (
_block_size(block_shape[-2]),
_block_size(block_shape[-1]),
)
total_block_size = block_sublane_dim * block_lane_dim
if total_block_size % 128 != 0:
raise NotImplementedError(
'reshape with non-128 aligned block size on lanes not supported yet'
)
if block_lane_dim != last_dim:
raise NotImplementedError(
'reshape with non-matching block size on lanes not supported yet:'
f' {block_shape}'
)
new_block_shape = block_shape[:-2] + (total_block_size,)
def new_index_map(*args): # pylint: disable=function-redefined
*idx, second_to_last, last = block_spec.index_map(*args)
# last should always be 0
if not isinstance(last, int) and last != 0:
raise NotImplementedError(
'Must select entire block on last dimension for reshape'
)
return *idx, second_to_last
return [pallas_core.BlockSpec(new_block_shape, new_index_map)]
raise NotImplementedError(f'reshape not supported yet: {aval_in}, {aval_out}')
@register_eval_rule(lax.reshape_p)
def _reshape_eval_rule(
eval_ctx: KernelEvalContext, x, *, dimensions, new_sizes, sharding
):
del sharding, dimensions, new_sizes
out_shape_nones = tuple(
_block_size(s) for s in eval_ctx.out_block_specs[0].block_shape
)
out_shape = tuple(s for s in out_shape_nones if s is not None)
# Because we have restricted the pull block spec rule, we can just apply a
# basic reshape here.
x = x.reshape(out_shape)
return x
@register_pull_block_spec_rule(lax.reduce_sum_p)
def _reduce_sum_pull_rule(
ctx: PullRuleContext,
block_spec: pallas_core.BlockSpec,
*,
axes: tuple[int, ...],
out_sharding,
):
aval_in = ctx.avals_in[0]
assert isinstance(aval_in, core.ShapedArray)
new_block_shape = []
block_shape = iter(block_spec.block_shape)
for i, d in enumerate(aval_in.shape):
if i in axes:
new_block_shape.append(pallas_core.Blocked(d))
else:
new_block_shape.append(next(block_shape))
assert next(block_shape, None) is None
def new_index_map(*args):
idx = block_spec.index_map(*args)
new_idx = []
idx_iter = iter(idx)
for i in range(len(aval_in.shape)):
if i in axes:
new_idx.append(0)
else:
new_idx.append(next(idx_iter))
assert next(idx_iter, None) is None
return tuple(new_idx)
new_block_spec = block_spec.replace(
block_shape=tuple(new_block_shape), index_map=new_index_map
)
return [new_block_spec]
@register_eval_rule(lax.reduce_sum_p)
def _reduce_sum_eval_rule(
ctx: KernelEvalContext,
x,
*,
axes: tuple[int, ...],
out_sharding,
):
aval_in = ctx.avals_in[0]
assert isinstance(aval_in, core.ShapedArray)
block_shape = tuple(ctx.in_block_specs[0].block_shape)
for i in axes:
if _block_size(block_shape[i]) != aval_in.shape[i]:
raise NotImplementedError(
f'reduce_sum on partial blocks not supported: {aval_in=},'
f' {block_shape=}'
)
return jax.lax.reduce_sum(x, axes=axes)
# Higher order primitives
@register_usage_rule(pjit.jit_p)
def _jit_usage_rule(
ctx, used_out: list[set[Usage]], *, jaxpr: core.ClosedJaxpr, **_
):
read_usage_env = compute_usage(jaxpr.jaxpr, used_out)
in_usages = util.safe_map(read_usage_env, jaxpr.jaxpr.invars)
return in_usages
@register_eval_rule(pjit.jit_p)
def _jit_eval_rule(ctx: KernelEvalContext, *args, jaxpr, **kwargs):
jaxpr, consts = jaxpr.jaxpr, jaxpr.consts
if consts:
raise NotImplementedError('pjit with consts not supported yet')
out_tree = tree_util.tree_structure(tuple(jaxpr.outvars))
in_tree = tree_util.tree_structure((tuple(jaxpr.invars), {}))
def read_usage_env(_: core.Var):
return {Usage.REGULAR}
_, env, _ = _pull_block_spec(
jaxpr,
ctx.out_block_specs,
scalar_prefetch_handler=ctx.scalar_prefetch_handler,
read_usage_env=read_usage_env,
grid=ctx.grid,
)
kernel_fn = make_kernel_function(
jaxpr,
(),
in_tree,
out_tree,
read_usage_env,
ctx.in_block_specs,
env,
ctx.scalar_prefetch_handler,
ctx.grid,
)
return kernel_fn(ctx.get_program_ids(), ctx.scalar_prefetch, *args)
@register_pull_block_spec_rule(pjit.jit_p)
def _jit_pull_block_spec_rule(
ctx: PullRuleContext, out_block_specs, *, jaxpr, **kwargs
):
jaxpr, consts = jaxpr.jaxpr, jaxpr.consts
if consts:
raise NotImplementedError('pjit with consts not supported yet')
def read_usage_env(_: core.Var):
return {Usage.REGULAR}
in_block_specs, _, _ = _pull_block_spec(
jaxpr,
out_block_specs,
scalar_prefetch_handler=ctx.scalar_prefetch_handler,
read_usage_env=read_usage_env,
grid=ctx.grid,
)
return in_block_specs
@register_usage_rule(custom_derivatives.custom_jvp_call_p)
def _custom_jvp_call_usage_rule(
ctx, used_out: list[set[Usage]], *, call_jaxpr: core.ClosedJaxpr, **_
):
del ctx
read_usage_env = compute_usage(call_jaxpr.jaxpr, used_out)
in_usages = util.safe_map(read_usage_env, call_jaxpr.jaxpr.invars)
return in_usages
@register_eval_rule(custom_derivatives.custom_jvp_call_p)
def _custom_jvp_call_eval_rule(
ctx: KernelEvalContext, *args, call_jaxpr: core.ClosedJaxpr, **kwargs
):
jaxpr, consts = call_jaxpr.jaxpr, call_jaxpr.consts
if consts:
raise NotImplementedError('custom_jvp_call with consts not supported yet')
out_tree = tree_util.tree_structure(tuple(jaxpr.outvars))
in_tree = tree_util.tree_structure((tuple(jaxpr.invars), {}))
def read_usage_env(_: core.Var):
return {Usage.REGULAR}
_, env, _ = _pull_block_spec(
jaxpr,
ctx.out_block_specs,
scalar_prefetch_handler=ctx.scalar_prefetch_handler,
grid=ctx.grid,
read_usage_env=read_usage_env,
)
kernel_fn = make_kernel_function(
jaxpr,
(),
in_tree,
out_tree,
read_usage_env,
ctx.in_block_specs,
env,
ctx.scalar_prefetch_handler,
ctx.grid,
)
return kernel_fn(ctx.get_program_ids(), ctx.scalar_prefetch, *args)
@register_pull_block_spec_rule(custom_derivatives.custom_jvp_call_p)
def _custom_jvp_call_pull_block_spec_rule(
ctx: PullRuleContext, out_block_specs, *, call_jaxpr, **kwargs
):
jaxpr, consts = call_jaxpr.jaxpr, call_jaxpr.consts
if consts:
raise NotImplementedError('custom_jvp_call with consts not supported yet')
def read_usage_env(_: core.Var):
return {Usage.REGULAR}
in_block_specs, _, _ = _pull_block_spec(
jaxpr,
out_block_specs,
scalar_prefetch_handler=ctx.scalar_prefetch_handler,
grid=ctx.grid,
read_usage_env=read_usage_env,
)
return in_block_specs
@register_usage_rule(custom_derivatives.custom_vjp_call_p)
def _custom_vjp_call_usage_rule(
ctx, used_out: list[set[Usage]], *, call_jaxpr: core.ClosedJaxpr, **_
):
del ctx
read_usage_env = compute_usage(call_jaxpr.jaxpr, used_out)
in_usages = util.safe_map(read_usage_env, call_jaxpr.jaxpr.invars)
return in_usages
@register_eval_rule(custom_derivatives.custom_vjp_call_p)
def _custom_vjp_call_eval_rule(
ctx: KernelEvalContext, *args, call_jaxpr: core.ClosedJaxpr, **kwargs
):
jaxpr, consts = call_jaxpr.jaxpr, call_jaxpr.consts
if consts:
raise NotImplementedError('custom_vjp_call with consts not supported yet')
out_tree = tree_util.tree_structure(tuple(jaxpr.outvars))
in_tree = tree_util.tree_structure((tuple(jaxpr.invars), {}))
def read_usage_env(_: core.Var):
return {Usage.REGULAR}
_, env, _ = _pull_block_spec(
jaxpr,
ctx.out_block_specs,
scalar_prefetch_handler=ctx.scalar_prefetch_handler,
grid=ctx.grid,
read_usage_env=read_usage_env,
)
kernel_fn = make_kernel_function(
jaxpr,
(),
in_tree,
out_tree,
read_usage_env,
ctx.in_block_specs,
env,
ctx.scalar_prefetch_handler,
ctx.grid,
)
return kernel_fn(ctx.get_program_ids(), ctx.scalar_prefetch, *args)
@register_pull_block_spec_rule(custom_derivatives.custom_vjp_call_p)
def _custom_vjp_call_pull_block_spec_rule(
ctx: PullRuleContext, out_block_specs, *, call_jaxpr, **kwargs
):
jaxpr, consts = call_jaxpr.jaxpr, call_jaxpr.consts
if consts:
raise NotImplementedError('custom_vjp_call with consts not supported yet')
def read_usage_env(_: core.Var):
return {Usage.REGULAR}
in_block_specs, _, _ = _pull_block_spec(
jaxpr,
out_block_specs,
scalar_prefetch_handler=ctx.scalar_prefetch_handler,
grid=ctx.grid,
read_usage_env=read_usage_env,
)
return in_block_specs
@register_pull_block_spec_rule(hijax.call_hi_primitive_p)
def _custom_call_hi_primitive_pull_block_spec_rule(
ctx: PullRuleContext, out_block_specs, *, prim
):
return prim.pull_block_spec_rule(ctx, out_block_specs)
@register_eval_rule(hijax.call_hi_primitive_p)
def _custom_call_hi_primitive_eval_rule(
ctx: KernelEvalContext, *args, prim
):
return jax.tree.leaves(prim.block_eval_rule(ctx, *args))
@functools.partial(api_boundary, repro_api_name="fuser.push_block_spec")
def push_block_spec(
f: Callable,
*in_spec_args,
**in_spec_kwargs,
):
def wrapper(*args, **kwargs):
flat_block_specs, in_tree_ = tree_util.tree_flatten(
(in_spec_args, in_spec_kwargs)
)
jaxpr, _, in_tree, out_tree = fuser_utils.make_jaxpr(f, *args, **kwargs)
if in_tree != in_tree_:
raise ValueError(f'Expected {in_tree} PyTree, got {in_tree_}')
out_bs = _push_block_spec_jaxpr(jaxpr, *flat_block_specs)
return tree_util.tree_unflatten(out_tree, out_bs)
return wrapper
def _push_block_spec_jaxpr(
jaxpr: core.Jaxpr,
*flat_block_specs,
) -> tuple[pallas_core.BlockSpec, ...]:
num_inputs = len(jaxpr.invars)
if len(flat_block_specs) != num_inputs:
raise ValueError(
f'Expected {num_inputs} block specs, got {len(flat_block_specs)}'
)
env: dict[core.Var, pallas_core.BlockSpec | pallas_core.NoBlockSpec] = {}
for invar, bs in zip(jaxpr.invars, flat_block_specs, strict=True):
env[invar] = bs
for constvar in jaxpr.constvars:
env[constvar] = pallas_core.no_block_spec
def _read_block_spec(
atom: core.Atom,
) -> pallas_core.BlockSpec | pallas_core.NoBlockSpec:
if isinstance(atom, core.Literal):
return pallas_core.no_block_spec
return env[atom]
def _write_block_spec(
atom: core.Atom,
block_spec: pallas_core.BlockSpec | pallas_core.NoBlockSpec,
):
if isinstance(atom, core.Literal):
return
env[atom] = block_spec
for eqn in jaxpr.eqns:
in_block_specs = tuple(util.safe_map(_read_block_spec, eqn.invars))
if all(bs is pallas_core.no_block_spec for bs in in_block_specs):
for outvar in eqn.outvars:
_write_block_spec(outvar, pallas_core.no_block_spec)
continue
rule = push_block_spec_rules.get(eqn.primitive, None)
if not rule:
raise NotImplementedError(eqn.primitive)
ctx = PushRuleContext(
avals_in=tuple(v.aval for v in eqn.invars),
avals_out=tuple(v.aval for v in eqn.outvars),
)
if eqn.primitive.multiple_results:
out_block_specs = rule(ctx, *in_block_specs, **eqn.params)
else:
out_block_specs = [rule(ctx, *in_block_specs, **eqn.params)]
util.safe_map(_write_block_spec, eqn.outvars, out_block_specs)
out_block_specs = tuple(util.safe_map(_read_block_spec, jaxpr.outvars))
valid_block_spec = [
bs for bs in flat_block_specs if bs is not pallas_core.no_block_spec
][0]
out_block_specs = tuple(
valid_block_spec if obs is pallas_core.no_block_spec else obs
for obs in out_block_specs
)
if any(bs is pallas_core.no_block_spec for bs in out_block_specs):
raise ValueError('No block spec found for output')
return out_block_specs # pytype: disable=bad-return-type
push_block_spec_rules: dict[core.Primitive, PushBlockSpecRuleFn] = {}
| PullBlockSpecRuleFn |
python | MongoEngine__mongoengine | tests/fields/test_binary_field.py | {
"start": 264,
"end": 4741
} | class ____(MongoDBTestCase):
def test_binary_fields(self):
"""Ensure that binary fields can be stored and retrieved."""
class Attachment(Document):
content_type = StringField()
blob = BinaryField()
BLOB = b"\xe6\x00\xc4\xff\x07"
MIME_TYPE = "application/octet-stream"
Attachment.drop_collection()
attachment = Attachment(content_type=MIME_TYPE, blob=BLOB)
attachment.save()
attachment_1 = Attachment.objects().first()
assert MIME_TYPE == attachment_1.content_type
assert BLOB == bytes(attachment_1.blob)
def test_bytearray_conversion_to_bytes(self):
class Dummy(Document):
blob = BinaryField()
byte_arr = bytearray(b"\x00\x00\x00\x00\x00")
dummy = Dummy(blob=byte_arr)
assert isinstance(dummy.blob, bytes)
def test_validation_succeeds(self):
"""Ensure that valid values can be assigned to binary fields."""
class AttachmentRequired(Document):
blob = BinaryField(required=True)
class AttachmentSizeLimit(Document):
blob = BinaryField(max_bytes=4)
attachment_required = AttachmentRequired()
with pytest.raises(ValidationError):
attachment_required.validate()
attachment_required.blob = Binary(b"\xe6\x00\xc4\xff\x07")
attachment_required.validate()
_5_BYTES = b"\xe6\x00\xc4\xff\x07"
_4_BYTES = b"\xe6\x00\xc4\xff"
with pytest.raises(ValidationError):
AttachmentSizeLimit(blob=_5_BYTES).validate()
AttachmentSizeLimit(blob=_4_BYTES).validate()
def test_validation_fails(self):
"""Ensure that invalid values cannot be assigned to binary fields."""
class Attachment(Document):
blob = BinaryField()
for invalid_data in (2, "Im_a_unicode", ["some_str"]):
with pytest.raises(ValidationError):
Attachment(blob=invalid_data).validate()
def test__primary(self):
class Attachment(Document):
id = BinaryField(primary_key=True)
Attachment.drop_collection()
binary_id = uuid.uuid4().bytes
att = Attachment(id=binary_id).save()
assert 1 == Attachment.objects.count()
assert 1 == Attachment.objects.filter(id=att.id).count()
att.delete()
assert 0 == Attachment.objects.count()
def test_primary_filter_by_binary_pk_as_str(self):
class Attachment(Document):
id = BinaryField(primary_key=True)
Attachment.drop_collection()
binary_id = uuid.uuid4().bytes
att = Attachment(id=binary_id).save()
assert 1 == Attachment.objects.filter(id=binary_id).count()
att.delete()
assert 0 == Attachment.objects.count()
def test_match_querying_with_bytes(self):
class MyDocument(Document):
bin_field = BinaryField()
MyDocument.drop_collection()
doc = MyDocument(bin_field=BIN_VALUE).save()
matched_doc = MyDocument.objects(bin_field=BIN_VALUE).first()
assert matched_doc.id == doc.id
def test_match_querying_with_binary(self):
class MyDocument(Document):
bin_field = BinaryField()
MyDocument.drop_collection()
doc = MyDocument(bin_field=BIN_VALUE).save()
matched_doc = MyDocument.objects(bin_field=Binary(BIN_VALUE)).first()
assert matched_doc.id == doc.id
def test_modify_operation__set(self):
"""Ensures no regression of bug #1127"""
class MyDocument(Document):
some_field = StringField()
bin_field = BinaryField()
MyDocument.drop_collection()
doc = MyDocument.objects(some_field="test").modify(
upsert=True, new=True, set__bin_field=BIN_VALUE
)
assert doc.some_field == "test"
assert doc.bin_field == BIN_VALUE
def test_update_one(self):
"""Ensures no regression of bug #1127"""
class MyDocument(Document):
bin_field = BinaryField()
MyDocument.drop_collection()
bin_data = b"\xe6\x00\xc4\xff\x07"
doc = MyDocument(bin_field=bin_data).save()
n_updated = MyDocument.objects(bin_field=bin_data).update_one(
bin_field=BIN_VALUE
)
assert n_updated == 1
fetched = MyDocument.objects.with_id(doc.id)
assert fetched.bin_field == BIN_VALUE
| TestBinaryField |
python | PrefectHQ__prefect | src/prefect/server/orchestration/global_policy.py | {
"start": 13797,
"end": 14542
} | class ____(
BaseUniversalTransform[orm_models.FlowRun, core.FlowRunPolicy]
):
"""
Update a child subflow state's references to a corresponding tracking task run id
in the parent flow run
"""
async def before_transition(
self, context: OrchestrationContext[orm_models.FlowRun, core.FlowRunPolicy]
) -> None:
if self.nullified_transition():
return
# only applies to flow runs with a parent task run id
if (
context.run.parent_task_run_id is not None
and context.proposed_state is not None
):
context.proposed_state.state_details.task_run_id = (
context.run.parent_task_run_id
)
| UpdateSubflowStateDetails |
python | bokeh__bokeh | src/bokeh/models/map_plots.py | {
"start": 2081,
"end": 2622
} | class ____(Model):
''' Abstract base class for map options' models.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
lat = Required(Float, help="""
The latitude where the map should be centered.
""")
lng = Required(Float, help="""
The longitude where the map should be centered.
""")
zoom = Int(12, help="""
The initial zoom level to use when displaying the map.
""")
@abstract
| MapOptions |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/tests/extensions.py | {
"start": 60098,
"end": 74495
} | class ____(threading.local):
"""Simple config used to maintain state related to a current pmap call."""
def __init__(self):
super(_PmapConfig, self).__init__()
self._axis_name = None
self._devices = None
def axis_name(self):
return self._axis_name
def set_axis_name(self, axis_name):
self._axis_name = axis_name
def devices(self):
return self._devices
def set_devices(self, devices):
self._devices = devices
_pmap_config = _PmapConfig()
@contextlib.contextmanager
def pmap_config(axis_name, devices):
"""Records axis_name and devices for this context."""
old_axis_name = _pmap_config.axis_name()
old_devices = _pmap_config.devices()
_pmap_config.set_axis_name(axis_name)
_pmap_config.set_devices(devices)
try:
yield
finally:
_pmap_config.set_axis_name(old_axis_name)
_pmap_config.set_devices(old_devices)
def _psum(tensor, axis_name=None):
"""Sum all-reduction.
Args:
tensor: A tensor.
axis_name: The axis name to reduce. Must equal to that of the surrounding
pmap.
Returns:
The sum of the `tensor` replicas on each participating devices.
"""
if axis_name != _pmap_config.axis_name():
raise ValueError("axis_name (%s) is not equal to that of the surrounding "
"pmap (%s)" % (axis_name, _pmap_config.axis_name()))
devices = _pmap_config.devices()
if devices is None:
raise ValueError("Can't retrieve the device list from the surrounding pmap")
tensor = tf_np.asarray(tensor)
if tpu_devices(devices):
# TODO(b/170895907): Remove this workaround when tpu.cross_replica_sum
# supports int64/float64.
is_int64 = False
is_float64 = False
if tensor.dtype == np.int64:
is_int64 = True
tensor = tensor.astype(np.int32)
elif tensor.dtype == np.float64:
is_float64 = True
tensor = tensor.astype(np.float32)
# TODO(wangpeng): Supply the `group_assignment` argument to
# tpu.cross_replica_sum, calculated from `devices`.
tensor = tpu_ops.cross_replica_sum(tensor)
if is_int64:
tensor = math_ops.cast(tensor, dtypes.int64)
elif is_float64:
tensor = math_ops.cast(tensor, dtypes.float64)
else:
tensor = gen_collective_ops.collective_reduce(
input=tensor,
group_size=len(devices),
group_key=_GROUP_KEY,
instance_key=_get_instance_key(),
merge_op="Add",
final_op="Id",
subdiv_offsets=(0,),
)
return tf_np.asarray(tensor)
def psum(tensors, axis_name=None):
return nest.map_structure(
functools.partial(_psum, axis_name=axis_name), tensors
)
# Note this is not available in the jax api, but seemed like a reasonable API
# to have.
def pmean(tensor, axis_name=None):
"""Mean all-reduction.
Args:
tensor: A tensor.
axis_name: The axis name to reduce. Must equal to that of the surrounding
pmap.
Returns:
The mean of the `tensor` replicas on each participating devices.
"""
if axis_name != _pmap_config.axis_name():
raise ValueError("axis_name (%s) is not equal to that of the surrounding "
"pmap (%s)" % (axis_name, _pmap_config.axis_name()))
devices = _pmap_config.devices()
if devices is None:
raise ValueError("Can't retrieve the device list from the surrounding pmap")
if tpu_devices(devices):
# TODO(wangpeng): Implement this.
raise ValueError("pmean for TPU is not supported yet.")
else:
return gen_collective_ops.collective_reduce(
input=tensor,
group_size=len(devices),
group_key=_GROUP_KEY,
instance_key=_get_instance_key(),
merge_op="Add",
final_op="Div",
subdiv_offsets=(0,),
)
def _get_pmap_impl(f, devices, has_tpu):
"""This is a helper function to return the pmap impl.
Args:
f: a function that takes ndarrays and returns ndarrays.
devices: a list of strings; the device list.
has_tpu: boolean; whether `devices` contains TPU devices.
Returns:
A function that takes tensors and returns tensors.
"""
if has_tpu:
# Workaround b/121383831
output_is_list = [False] # Use list for mutability
def recorder(args, kwargs, res):
del args, kwargs
output_is_list[0] = isinstance(res, list)
return res
f = _record_result_type(recorder, f)
def tf_f(*tf_args):
"""A wrapper for `f` that takes/returns tensors."""
np_args = _tf_to_np(tf_args)
np_out = f(*np_args)
return np_out
if has_tpu:
@polymorphic_function.function(autograph=False)
def fn(inputs):
# TODO(wangpeng): Supply the `device_assignment` argument to
# tpu.replicate, calculated from `devices`.
res = tpu.replicate(tf_f, inputs)
# Workaround b/121383831
if (res and isinstance(res[0], list) and len(res[0]) == 1 and
not output_is_list[0]):
res = [x[0] for x in res]
return res
return fn
else:
# This is run in a tf.function so that the various underlying functions can
# be run in parallel.
# The trace happens on the client, so any devices should not depend on any
# side effects.
jit_tf_f = polymorphic_function.function(tf_f, autograph=False)
@polymorphic_function.function(autograph=False)
def fn(all_per_device_args):
"""Multi-device function with calls placed on the correct device."""
results = []
for per_device_args, device in zip(all_per_device_args, devices):
with ops.device(device):
results.append(jit_tf_f(*per_device_args))
return results
return fn
def pmap(f, axis_name=None, devices=None):
"""Transforms a function into a multi-device function.
The semantics are similar to JAX's pmap.
Args:
f: The function to be converted.
axis_name: Used for nested pmap, which is not supported yet.
devices: The devices over which the returned function will run.
Returns:
A function that runs the underlying function `f` on `devices`. Its arguments
can be `ShardedNdArray`s, tensors or other Python objects, and its return
values are all `ShardedNdArray`s. If an input is a tensor, the length of its
first dimension must equal the number of devices, and the tensor will be
splitted along its first dimension among the devices. If an input is an
unknown Python object, it will be replicated among the devices.
"""
if devices is None:
devices = accelerators()
if not isinstance(devices, (list, tuple)):
raise ValueError("Must pass a list or tuple of devices")
num_devices = len(devices)
if not num_devices:
raise ValueError("There must be at least 1 device")
has_tpu = bool(tpu_devices(devices))
pmap_fn = _get_pmap_impl(f, devices, has_tpu)
def wrapper(*args):
"""Wrapper that wraps/unwraps args, retvals, and runs the function."""
if _pmap_config.devices() is not None:
raise ValueError("Found a surrounding pmap. Nested pmap is not supported "
"yet.")
# TODO(wangpeng): Maybe we should use `asarray` to convert everything
# to ndarray first.
flattened_input_args = nest.flatten(args)
flattened_per_device_args = [[] for _ in devices]
for arg in flattened_input_args:
if isinstance(arg, tensor_lib.Tensor):
# TODO(nareshmodi): Try and use the dynamic shape instead.
if (not arg.shape.rank) or arg.shape[0] != len(devices):
# TODO(nareshmodi): Fix this restriction
raise ValueError(
"Input tensors need to have a first dimension equal to "
"the number of devices; got tensor of shape %s and %s devices" %
(arg.shape, len(devices)))
# NOTE: Alternatively use tf.split, and place the split tensors on the
# appropriate device. The best solution for this is to have an API that
# splits a tensor across devices.
for j, device in enumerate(devices):
updated_arg = array_ops.gather_v2(arg, j)
# TODO(wangpeng): Investigate whether we need a tf.identity for TPU.
if not has_tpu:
with ops.device(device):
updated_arg = array_ops.identity(updated_arg)
flattened_per_device_args[j].append(updated_arg)
elif isinstance(arg, ShardedNdArray):
for device_args, tensor in zip(flattened_per_device_args, arg.tensors):
device_args.append(tensor)
else:
for device_args in flattened_per_device_args:
device_args.append(arg)
all_per_device_args = [
nest.pack_sequence_as(args, device_args)
for device_args in flattened_per_device_args
]
with pmap_config(axis_name, devices):
results = pmap_fn(all_per_device_args)
# Rewrap things. This can probably be written better.
flattened_results = [nest.flatten(result) for result in results]
final_tree = []
# TODO(nareshmodi): assert all items in flattened_results have the same
# structures
for i in range(len(flattened_results[0])):
tensors = []
for j, device in enumerate(devices):
assert isinstance(
flattened_results[j][i], tensor_lib.Tensor
), "currently only tensor return items are supported"
tensors.append(flattened_results[j][i])
final_tree.append(ShardedNdArray(tensors))
return nest.pack_sequence_as(results[0], final_tree)
return wrapper
def find_devices(device_type, devices=None):
if not devices:
devices = [d.name for d in config.list_logical_devices()]
devices = [(d, device_spec.DeviceSpecV2.from_string(d)) for d in devices]
results = [name for name, d in devices if d.device_type == device_type]
return results
def tpu_devices(devices=None):
"""Gets TPU devices out of `devices`.
Args:
devices: A device list (as a list of strings). If None, the list of all
available devices will be used for it.
Returns:
Those in `devices` that are TPUs.
"""
return find_devices("TPU", devices)
def gpu_devices(devices=None):
"""Gets GPU devices out of `devices`.
Args:
devices: A device list (as a list of strings). If None, the list of all
available devices will be used for it.
Returns:
Those in `devices` that are GPUs.
"""
return find_devices("GPU", devices)
def accelerators(devices=None):
return tpu_devices(devices) or gpu_devices(devices)
def _tree_broadcast(to, s):
"""Broadcasts `s` to the nested structure `to`."""
if not isinstance(to, (list, tuple, dict)):
if not isinstance(s, (int, type(None))):
raise ValueError
return s
if isinstance(s, (int, type(None))):
return nest.map_structure(lambda x: s, to)
if isinstance(to, (list, tuple)):
if len(to) != len(s):
raise ValueError
new_s = [_tree_broadcast(x, y) for x, y in zip(to, s)]
if isinstance(to, tuple):
new_s = tuple(new_s)
return new_s
elif isinstance(to, dict):
return {k: _tree_broadcast(to[k], s[k]) for k in to}
else:
raise TypeError("Unsupported type %s" % type(to))
def vmap(f, in_axes=0, out_axes=0):
"""Returns a function that maps `f` over first dimension of inputs."""
in_axes_flat = nest.flatten(in_axes)
if not all(isinstance(l, (type(None), int))
for l in in_axes_flat):
raise TypeError(
"vmap in_axes must be an int, None, or (nested) container with "
"those types as leaves, but got {}.".format(in_axes))
if all(isinstance(l, type(None)) for l in in_axes_flat):
raise ValueError("vmap must have at least one non-None value in in_axes")
out_axes_flat = nest.flatten(out_axes)
if not all(isinstance(l, (type(None), int))
for l in out_axes_flat):
raise TypeError(
"vmap out_axes must be an int, None, or (nested) container with "
"those types as leaves, but got {}.".format(out_axes))
def _f(*args):
flat_args = nest.flatten(args)
try:
f_in_axes = _tree_broadcast(args, in_axes)
except ValueError:
six.reraise(
ValueError,
ValueError(
"vmap in_axes specification must be a tree prefix of the "
r"corresponding value, got specification %s for value tree %s" % (
in_axes, args)),
sys.exc_info()[2])
f_in_axes_flat = nest.flatten(f_in_axes)
def tf_f(tf_args):
"""Function passed to tf.vectorized_map call."""
# Note that unbatched arguments are not passed to tf_f. Here we fill thos
# arguments back before calling `f`.
tf_flat_args = []
j = 0
for arg, axis in zip(flat_args, f_in_axes_flat):
if axis is None:
tf_flat_args.append(arg)
else:
tf_flat_args.append(tf_args[j])
j += 1
unbatched_args = nest.pack_sequence_as(args, tf_flat_args)
return f(*unbatched_args)
# Constructs arguments to pass to `tf_f`.
# Unbatch arguments are skipped. Arguments with non-zero axis are
# transposed.
tf_args = []
for arg, axis in zip(flat_args, f_in_axes_flat):
if axis is None:
continue
arg = tf_np.asarray(arg)
if axis != 0:
arg = tf_np.moveaxis(arg, axis, 0)
tf_args.append(arg)
# TODO(agarwal): consider creating a tf.function outside of _f and reusing
# that to avoid overheads of re-vectorizing the code when running eagerly.
outputs = pfor_ops.vectorized_map(tf_f, tf_args)
try:
f_out_axes = _tree_broadcast(outputs, out_axes)
except ValueError:
six.reraise(
ValueError,
ValueError(
"vmap out_axes specification must be a tree prefix of the "
r"corresponding value, got specification %s for value tree %s" % (
out_axes, outputs)),
sys.exc_info()[2])
def map_output(x, axis):
"""Maps output of tf.vectorized_map to the final output."""
x = tf_np.asarray(x)
if axis is None:
# Note that `tf.vectorized_map always batches the outputs.
# Here we unbatch it again.
return x[0, ...]
elif axis == 0:
return x
else:
# Need to transpose the output.
return tf_np.moveaxis(x, 0, axis)
new_outputs = [
map_output(output, axis)
for output, axis in zip(nest.flatten(outputs), nest.flatten(f_out_axes))
]
return nest.pack_sequence_as(outputs, new_outputs)
return _f
| _PmapConfig |
python | kamyu104__LeetCode-Solutions | Python/find-the-shortest-superstring.py | {
"start": 47,
"end": 1649
} | class ____(object):
def shortestSuperstring(self, A):
"""
:type A: List[str]
:rtype: str
"""
n = len(A)
overlaps = [[0]*n for _ in xrange(n)]
for i, x in enumerate(A):
for j, y in enumerate(A):
for l in reversed(xrange(min(len(x), len(y)))):
if y[:l].startswith(x[len(x)-l:]):
overlaps[i][j] = l
break
dp = [[0]*n for _ in xrange(1<<n)]
prev = [[None]*n for _ in xrange(1<<n)]
for mask in xrange(1, 1<<n):
for bit in xrange(n):
if ((mask>>bit) & 1) == 0:
continue
prev_mask = mask^(1<<bit)
for i in xrange(n):
if ((prev_mask>>i) & 1) == 0:
continue
value = dp[prev_mask][i] + overlaps[i][bit]
if value > dp[mask][bit]:
dp[mask][bit] = value
prev[mask][bit] = i
bit = max(xrange(n), key = dp[-1].__getitem__)
words = []
mask = (1<<n)-1
while bit is not None:
words.append(bit)
mask, bit = mask^(1<<bit), prev[mask][bit]
words.reverse()
lookup = set(words)
words.extend([i for i in xrange(n) if i not in lookup])
result = [A[words[0]]]
for i in xrange(1, len(words)):
overlap = overlaps[words[i-1]][words[i]]
result.append(A[words[i]][overlap:])
return "".join(result)
| Solution |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_group_events.py | {
"start": 450,
"end": 19935
} | class ____(APITestCase, SnubaTestCase, SearchIssueTestMixin, PerformanceIssueTestCase):
def setUp(self) -> None:
super().setUp()
self.min_ago = before_now(minutes=1)
self.two_min_ago = before_now(minutes=2)
def do_request(self, url: str) -> Response:
return self.client.get(url, format="json")
def _parse_links(self, header: str) -> dict[str | None, dict[str, str | None]]:
# links come in {url: {...attrs}}, but we need {rel: {...attrs}}
links = {}
for url, attrs in parse_link_header(header).items():
links[attrs["rel"]] = attrs
attrs["href"] = url
return links
def test_simple(self) -> None:
self.login_as(user=self.user)
event_1 = self.store_event(
data={
"event_id": "a" * 32,
"fingerprint": ["1"],
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
event_2 = self.store_event(
data={
"event_id": "b" * 32,
"fingerprint": ["1"],
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
url = f"/api/0/issues/{event_1.group.id}/events/"
response = self.do_request(url)
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert sorted(map(lambda x: x["eventID"], response.data)) == sorted(
[str(event_1.event_id), str(event_2.event_id)]
)
# Should default to full=false which does not include context property
assert "context" not in response.data[0]
assert "context" not in response.data[1]
def test_full_false(self) -> None:
self.login_as(user=self.user)
event_1 = self.store_event(
data={
"event_id": "a" * 32,
"fingerprint": ["1"],
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
event_2 = self.store_event(
data={
"event_id": "b" * 32,
"fingerprint": ["1"],
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
url = f"/api/0/issues/{event_1.group.id}/events/?full=false"
response = self.do_request(url)
assert response.status_code == 200, response.content
assert sorted(map(lambda x: x["eventID"], response.data)) == sorted(
[str(event_1.event_id), str(event_2.event_id)]
)
# Simplified response does not have context property
assert "context" not in response.data[0]
assert "context" not in response.data[1]
def test_full_true(self) -> None:
self.login_as(user=self.user)
event_1 = self.store_event(
data={
"event_id": "a" * 32,
"fingerprint": ["1"],
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "b" * 32,
"fingerprint": ["1"],
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
url = f"/api/0/issues/{event_1.group.id}/events/?full=true"
response = self.do_request(url)
assert response.status_code == 200, response.content
# Full response has context property
assert "context" in response.data[0]
assert "context" in response.data[1]
def test_tags(self) -> None:
self.login_as(user=self.user)
event_1 = self.store_event(
data={
"event_id": "a" * 32,
"fingerprint": ["1"],
"tags": {"foo": "baz", "bar": "buz"},
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
event_2 = self.store_event(
data={
"event_id": "b" * 32,
"fingerprint": ["1"],
"tags": {"bar": "biz"},
"timestamp": before_now(seconds=61).isoformat(),
},
project_id=self.project.id,
)
url = f"/api/0/issues/{event_1.group.id}/events/"
response = self.do_request(url + "?query=foo:baz")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["eventID"] == str(event_1.event_id)
response = self.do_request(url + "?query=!foo:baz")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["eventID"] == str(event_2.event_id)
response = self.do_request(url + "?query=bar:biz")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["eventID"] == str(event_2.event_id)
response = self.do_request(url + "?query=bar:biz%20foo:baz")
assert response.status_code == 200, response.content
assert len(response.data) == 0
response = self.do_request(url + "?query=bar:buz%20foo:baz")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["eventID"] == str(event_1.event_id)
response = self.do_request(url + "?query=bar:baz")
assert response.status_code == 200, response.content
assert len(response.data) == 0
response = self.do_request(url + "?query=a:b")
assert response.status_code == 200, response.content
assert len(response.data) == 0
response = self.do_request(url + "?query=bar:b")
assert response.status_code == 200, response.content
assert len(response.data) == 0
response = self.do_request(url + "?query=bar:baz")
assert response.status_code == 200, response.content
assert len(response.data) == 0
response = self.do_request(url + "?query=!bar:baz")
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert {e["eventID"] for e in response.data} == {event_1.event_id, event_2.event_id}
def test_search_event_by_id(self) -> None:
self.login_as(user=self.user)
event_1 = self.store_event(
data={
"event_id": "a" * 32,
"fingerprint": ["group-1"],
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "b" * 32,
"fingerprint": ["group-1"],
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
url = f"/api/0/issues/{event_1.group.id}/events/?query={event_1.event_id}"
response = self.do_request(url)
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["eventID"] == event_1.event_id
def test_search_event_by_message(self) -> None:
self.login_as(user=self.user)
event_1 = self.store_event(
data={
"event_id": "a" * 32,
"fingerprint": ["group-1"],
"message": "foo bar hello world",
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
group = event_1.group
event_2 = self.store_event(
data={
"event_id": "b" * 32,
"fingerprint": ["group-1"],
"message": "this bar hello world",
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
assert group == event_2.group
query_1 = "foo"
query_2 = "hello+world"
# Single Word Query
url = f"/api/0/issues/{group.id}/events/?query={query_1}"
response = self.do_request(url)
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["eventID"] == event_1.event_id
# Multiple Word Query
url = f"/api/0/issues/{group.id}/events/?query={query_2}"
response = self.do_request(url)
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert sorted(map(lambda x: x["eventID"], response.data)) == sorted(
[str(event_1.event_id), str(event_2.event_id)]
)
def test_search_by_release(self) -> None:
self.login_as(user=self.user)
self.create_release(self.project, version="first-release")
event_1 = self.store_event(
data={
"event_id": "a" * 32,
"fingerprint": ["group-1"],
"timestamp": self.min_ago.isoformat(),
"release": "first-release",
},
project_id=self.project.id,
)
url = f"/api/0/issues/{event_1.group.id}/events/?query=release:latest"
response = self.do_request(url)
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["eventID"] == event_1.event_id
def test_environment(self) -> None:
self.login_as(user=self.user)
events = {}
for name in ["production", "development"]:
events[name] = self.store_event(
data={
"fingerprint": ["put-me-in-group1"],
"timestamp": self.min_ago.isoformat(),
"environment": name,
},
project_id=self.project.id,
)
# Asserts that all are in the same group
(group_id,) = {e.group.id for e in events.values()}
url = f"/api/0/issues/{group_id}/events/"
response = self.do_request(url + "?environment=production")
assert response.status_code == 200, response.content
assert set(map(lambda x: x["eventID"], response.data)) == {
str(events["production"].event_id)
}
response = self.client.get(
url, data={"environment": ["production", "development"]}, format="json"
)
assert response.status_code == 200, response.content
assert set(map(lambda x: x["eventID"], response.data)) == {
str(event.event_id) for event in events.values()
}
response = self.do_request(url + "?environment=invalid")
assert response.status_code == 200, response.content
assert response.data == []
response = self.client.get(
url + "?environment=production&query=environment:development", format="json"
)
assert response.status_code == 200, response.content
assert response.data == []
def test_filters_based_on_retention(self) -> None:
self.login_as(user=self.user)
self.store_event(
data={"fingerprint": ["group_1"], "timestamp": before_now(days=2).isoformat()},
project_id=self.project.id,
)
event_2 = self.store_event(
data={"fingerprint": ["group_1"], "timestamp": self.min_ago.isoformat()},
project_id=self.project.id,
)
group = event_2.group
with self.options({"system.event-retention-days": 1}):
response = self.client.get(f"/api/0/issues/{group.id}/events/")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert sorted(map(lambda x: x["eventID"], response.data)) == sorted([str(event_2.event_id)])
def test_search_event_has_tags(self) -> None:
self.login_as(user=self.user)
event = self.store_event(
data={
"timestamp": self.min_ago.isoformat(),
"message": "foo",
"tags": {"logger": "python"},
},
project_id=self.project.id,
)
response = self.client.get(f"/api/0/issues/{event.group.id}/events/")
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert {"key": "logger", "value": "python"} in response.data[0]["tags"]
@freeze_time()
def test_date_filters(self) -> None:
self.login_as(user=self.user)
event_1 = self.store_event(
data={"timestamp": before_now(days=5).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
event_2 = self.store_event(
data={"timestamp": before_now(days=1).isoformat(), "fingerprint": ["group-1"]},
project_id=self.project.id,
)
group = event_1.group
assert group == event_2.group
response = self.client.get(f"/api/0/issues/{group.id}/events/", data={"statsPeriod": "6d"})
assert response.status_code == 200, response.content
assert len(response.data) == 2
assert sorted(map(lambda x: x["eventID"], response.data)) == sorted(
[str(event_1.event_id), str(event_2.event_id)]
)
response = self.client.get(f"/api/0/issues/{group.id}/events/", data={"statsPeriod": "2d"})
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]["eventID"] == str(event_2.event_id)
def test_invalid_period(self) -> None:
self.login_as(user=self.user)
first_seen = timezone.now() - timedelta(days=5)
group = self.create_group(first_seen=first_seen)
response = self.client.get(f"/api/0/issues/{group.id}/events/", data={"statsPeriod": "lol"})
assert response.status_code == 400
def test_invalid_query(self) -> None:
self.login_as(user=self.user)
first_seen = timezone.now() - timedelta(days=5)
group = self.create_group(first_seen=first_seen)
response = self.client.get(
f"/api/0/issues/{group.id}/events/",
data={"statsPeriod": "7d", "query": "foo(bar"},
)
assert response.status_code == 400
def test_multiple_group(self) -> None:
self.login_as(user=self.user)
event_1 = self.store_event(
data={
"fingerprint": ["group_1"],
"event_id": "a" * 32,
"message": "foo",
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
event_2 = self.store_event(
data={
"fingerprint": ["group_2"],
"event_id": "b" * 32,
"message": "group2",
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
for event in (event_1, event_2):
url = f"/api/0/issues/{event.group.id}/events/"
response = self.do_request(url)
assert response.status_code == 200, response.content
assert len(response.data) == 1, response.data
assert list(map(lambda x: x["eventID"], response.data)) == [str(event.event_id)]
def test_pagination(self) -> None:
self.login_as(user=self.user)
for _ in range(2):
event = self.store_event(
data={
"fingerprint": ["group_1"],
"event_id": "a" * 32,
"message": "foo",
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
url = f"/api/0/issues/{event.group.id}/events/?per_page=1"
response = self.do_request(url)
links = self._parse_links(response["Link"])
assert response.status_code == 200, response.content
assert links["previous"]["results"] == "false"
assert links["next"]["results"] == "true"
assert len(response.data) == 1
def test_orderby(self) -> None:
self.login_as(user=self.user)
event = self.store_event(
data={
"fingerprint": ["group_1"],
"event_id": "a" * 32,
"message": "foo",
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
event = self.store_event(
data={
"fingerprint": ["group_1"],
"event_id": "b" * 32,
"message": "foo",
"timestamp": self.two_min_ago.isoformat(),
},
project_id=self.project.id,
)
url = f"/api/0/issues/{event.group.id}/events/"
response = self.do_request(url)
assert len(response.data) == 2
assert response.data[0]["eventID"] == "a" * 32
assert response.data[1]["eventID"] == "b" * 32
def test_perf_issue(self) -> None:
event_1 = self.create_performance_issue()
event_2 = self.create_performance_issue()
assert event_1.group is not None
self.login_as(user=self.user)
url = f"/api/0/issues/{event_1.group.id}/events/"
response = self.do_request(url)
assert response.status_code == 200, response.content
assert sorted(map(lambda x: x["eventID"], response.data)) == sorted(
[str(event_1.event_id), str(event_2.event_id)]
)
def test_generic_issue(self) -> None:
event_1, _, group_info = self.store_search_issue(
self.project.id,
self.user.id,
[f"{ProfileFileIOGroupType.type_id}-group1"],
"prod",
before_now(hours=1),
)
assert group_info is not None
event_2, _, _ = self.store_search_issue(
self.project.id,
self.user.id,
[f"{ProfileFileIOGroupType.type_id}-group1"],
"prod",
before_now(hours=1),
)
self.login_as(user=self.user)
url = f"/api/0/issues/{group_info.group.id}/events/"
response = self.do_request(url)
assert response.status_code == 200, response.content
assert sorted(map(lambda x: x["eventID"], response.data)) == sorted(
[str(event_1.event_id), str(event_2.event_id)]
)
def test_sample(self) -> None:
"""Test that random=true doesn't blow up. We can't really test if they're in random order."""
self.login_as(user=self.user)
event = self.store_event(
data={
"fingerprint": ["group_1"],
"event_id": "a" * 32,
"message": "foo",
"timestamp": self.min_ago.isoformat(),
},
project_id=self.project.id,
)
event = self.store_event(
data={
"fingerprint": ["group_1"],
"event_id": "b" * 32,
"message": "foo",
"timestamp": self.two_min_ago.isoformat(),
},
project_id=self.project.id,
)
url = f"/api/0/issues/{event.group.id}/events/?sample=true"
response = self.do_request(url)
assert len(response.data) == 2
| GroupEventsTest |
python | ray-project__ray | python/ray/util/multiprocessing/pool.py | {
"start": 17270,
"end": 18987
} | class ____(IMapIterator):
"""Iterator to the results of tasks submitted using `imap`.
The results are returned in the same order that they were submitted, even
if they don't finish in that order. Only one batch of tasks per actor
process is submitted at a time - the rest are submitted as results come in.
Should not be constructed directly.
"""
def next(self, timeout=None):
if len(self._ready_objects) == 0:
if self._finished_iterating and (
self._next_chunk_index == len(self._submitted_chunks)
):
# Finish when all chunks have been dispatched and processed
# Notify the calling process that the work is done.
raise StopIteration
# This loop will break when the next index in order is ready or
# self._result_thread.next_ready_index() raises a timeout.
index = -1
while index != self._next_chunk_index:
start = time.time()
index = self._result_thread.next_ready_index(timeout=timeout)
self._submit_next_chunk()
self._submitted_chunks[index] = True
if timeout is not None:
timeout = max(0, timeout - (time.time() - start))
while (
self._next_chunk_index < len(self._submitted_chunks)
and self._submitted_chunks[self._next_chunk_index]
):
for result in self._result_thread.result(self._next_chunk_index):
self._ready_objects.append(result)
self._next_chunk_index += 1
return self._ready_objects.popleft()
| OrderedIMapIterator |
python | huggingface__transformers | src/transformers/models/kosmos2_5/modeling_kosmos2_5.py | {
"start": 11805,
"end": 15298
} | class ____(ModelOutput):
"""
Base class for text model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
width (`torch.FloatTensor` of shape `(batch_size,)`):
The original width (before resizing) of each image in the batch.
height (`torch.FloatTensor` of shape `(batch_size,)`):
The original height (before resizing) of each image in the batch.
image_embeds (`torch.FloatTensor` of shape `(batch_size, latent_query_num, hidden_size)`, *optional*):
Sequence of hidden-states at the output of `Kosmos2ImageToTextProjection`.
projection_attentions (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights given by `Kosmos2ImageToTextProjection`, after the attention softmax, used to compute
the weighted average in the self-attention heads.
vision_model_output(`BaseModelOutputWithPooling`, *optional*):
The output of the [`Kosmos2VisionModel`].
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
`config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
input) to speed up sequential decoding.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
width: Optional[torch.FloatTensor] = None
height: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
projection_attentions: Optional[tuple[torch.FloatTensor]] = None
vision_model_output: BaseModelOutputWithPooling = None
def to_tuple(self) -> tuple[Any]:
return tuple((self[k] if k != "vision_model_output" else getattr(self, k).to_tuple()) for k in self.keys())
@dataclass
| Kosmos2_5ModelOutput |
python | django__django | tests/m2m_recursive/tests.py | {
"start": 2413,
"end": 4912
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.a, cls.b, cls.c, cls.d = [
Person.objects.create(name=name)
for name in ["Anne", "Bill", "Chuck", "David"]
]
cls.a.colleagues.add(
cls.b,
cls.c,
through_defaults={
"first_meet": datetime.date(2013, 1, 5),
},
)
# Add m2m for Anne and Chuck in reverse direction.
cls.d.colleagues.add(
cls.a,
cls.c,
through_defaults={
"first_meet": datetime.date(2015, 6, 15),
},
)
def test_recursive_m2m_all(self):
for person, colleagues in (
(self.a, [self.b, self.c, self.d]),
(self.b, [self.a]),
(self.c, [self.a, self.d]),
(self.d, [self.a, self.c]),
):
with self.subTest(person=person):
self.assertSequenceEqual(person.colleagues.all(), colleagues)
def test_recursive_m2m_reverse_add(self):
# Add m2m for Anne in reverse direction.
self.b.colleagues.add(
self.a,
through_defaults={
"first_meet": datetime.date(2013, 1, 5),
},
)
self.assertCountEqual(self.a.colleagues.all(), [self.b, self.c, self.d])
self.assertSequenceEqual(self.b.colleagues.all(), [self.a])
def test_recursive_m2m_remove(self):
self.b.colleagues.remove(self.a)
self.assertSequenceEqual(self.a.colleagues.all(), [self.c, self.d])
self.assertSequenceEqual(self.b.colleagues.all(), [])
def test_recursive_m2m_clear(self):
# Clear m2m for Anne.
self.a.colleagues.clear()
self.assertSequenceEqual(self.a.friends.all(), [])
# Reverse m2m relationships is removed.
self.assertSequenceEqual(self.c.colleagues.all(), [self.d])
self.assertSequenceEqual(self.d.colleagues.all(), [self.c])
def test_recursive_m2m_set(self):
# Set new relationships for Chuck.
self.c.colleagues.set(
[self.b, self.d],
through_defaults={
"first_meet": datetime.date(2013, 1, 5),
},
)
self.assertSequenceEqual(self.c.colleagues.order_by("name"), [self.b, self.d])
# Reverse m2m relationships is removed.
self.assertSequenceEqual(self.a.colleagues.order_by("name"), [self.b, self.d])
| RecursiveSymmetricalM2MThroughTests |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 31206,
"end": 31508
} | class ____(FieldValues):
"""
Valid and invalid values for `URLField`.
"""
valid_inputs = {
'http://example.com': 'http://example.com',
}
invalid_inputs = {
'example.com': ['Enter a valid URL.']
}
outputs = {}
field = serializers.URLField()
| TestURLField |
python | getsentry__sentry | src/sentry/snuba/metrics/naming_layer/public.py | {
"start": 2387,
"end": 5171
} | class ____(Enum):
"""
These are the public facing names of the API and only the transaction fields listed here are
queryable in the API.
"""
USER = "transaction.user"
DURATION = "transaction.duration"
MEASUREMENTS_FCP = "transaction.measurements.fcp"
MEASUREMENTS_LCP = "transaction.measurements.lcp"
MEASUREMENTS_APP_START_COLD = "transaction.measurements.app_start_cold"
MEASUREMENTS_APP_START_WARM = "transaction.measurements.app_start_warm"
MEASUREMENTS_CLS = "transaction.measurements.cls"
MEASUREMENTS_FID = "transaction.measurements.fid"
MEASUREMENTS_FP = "transaction.measurements.fp"
MEASUREMENTS_FRAMES_FROZEN = "transaction.measurements.frames_frozen"
MEASUREMENTS_FRAMES_FROZEN_RATE = "transaction.measurements.frames_frozen_rate"
MEASUREMENTS_FRAMES_SLOW = "transaction.measurements.frames_slow"
MEASUREMENTS_FRAMES_SLOW_RATE = "transaction.measurements.frames_slow_rate"
MEASUREMENTS_FRAMES_TOTAL = "transaction.measurements.frames_total"
MEASUREMENTS_TIME_TO_INITIAL_DISPLAY = "transaction.measurements.time_to_initial_display"
MEASUREMENTS_TIME_TO_FULL_DISPLAY = "transaction.measurements.time_to_full_display"
MEASUREMENTS_STALL_COUNT = "transaction.measurements.stall_count"
MEASUREMENTS_STALL_LONGEST_TIME = "transaction.measurements.stall_longest_time"
MEASUREMENTS_STALL_PERCENTAGE = "transaction.measurements.stall_percentage"
MEASUREMENTS_STALL_TOTAL_TIME = "transaction.measurements.stall_total_time"
MEASUREMENTS_TTFB = "transaction.measurements.ttfb"
MEASUREMENTS_TTFB_REQUEST_TIME = "transaction.measurements.ttfb.requesttime"
BREAKDOWNS_HTTP = "transaction.breakdowns.ops.http"
BREAKDOWNS_DB = "transaction.breakdowns.ops.db"
BREAKDOWNS_BROWSER = "transaction.breakdowns.ops.browser"
BREAKDOWNS_RESOURCE = "transaction.breakdowns.ops.resource"
FAILURE_RATE = "transaction.failure_rate"
APDEX = "transaction.apdex"
MISERABLE_USER = "transaction.miserable_user"
USER_MISERY = "transaction.user_misery"
FAILURE_COUNT = "transaction.failure_count"
TEAM_KEY_TRANSACTION = "transactions.team_key_transaction"
HTTP_ERROR_RATE = "transaction.http_error_rate"
# Less granular coarse metrics
DURATION_LIGHT = "d:transactions/duration_light@millisecond"
# Span metrics.
# NOTE: These might be moved to their own namespace soon.
SPAN_USER = "span.user"
SPAN_DURATION = "span.duration"
SPAN_SELF_TIME = "span.exclusive_time"
SPAN_SELF_TIME_LIGHT = "span.exclusive_time_light"
# TODO: Remove this as soon as the MetricsQuery supports private metrics
COUNT_ON_DEMAND = "count.on_demand"
DIST_ON_DEMAND = "dist.on_demand"
SET_ON_DEMAND = "set.on_demand"
| TransactionMetricKey |
python | numpy__numpy | numpy/_core/tests/test_numeric.py | {
"start": 159152,
"end": 159681
} | class ____:
def test_zero_dimension(self):
# Test resolution to issue #5663
a = np.ndarray((3, 0))
b = np.ndarray((0, 4))
td = np.tensordot(a, b, (1, 0))
assert_array_equal(td, np.dot(a, b))
assert_array_equal(td, np.einsum('ij,jk', a, b))
def test_zero_dimensional(self):
# gh-12130
arr_0d = np.array(1)
# contracting no axes is well defined
ret = np.tensordot(arr_0d, arr_0d, ([], []))
assert_array_equal(ret, arr_0d)
| TestTensordot |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.