language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | keras-team__keras | keras/src/models/cloning_test.py | {
"start": 2291,
"end": 8983
} | class ____(testing.TestCase):
def assert_models_equal(self, model1, model2, ref_input):
result1 = model1(ref_input)
result2 = model2(ref_input)
for r1, r2 in zip(tree.flatten(result1), tree.flatten(result2)):
self.assertAllClose(
ops.convert_to_numpy(r1), ops.convert_to_numpy(r2)
)
def assert_weights_equal(self, model1, model2):
for a, b in zip(model1.weights, model2.weights):
self.assertAllClose(a.numpy(), b.numpy())
@parameterized.named_parameters(
("mlp_functional", get_mlp_functional_model),
("cnn_functional", get_cnn_functional_model, True),
("sequential", get_sequential_model),
(
"deferred_sequential",
lambda: get_sequential_model(explicit_input=False),
),
("subclassed", get_subclassed_model),
)
def test_cloning_correctness(self, model_fn, is_conv=False):
ref_input = np.random.random((2, 7, 3) if is_conv else (2, 3))
model = model_fn()
new_model = clone_model(model)
model(ref_input) # Maybe needed to build the model
new_model(ref_input) # Maybe needed to build the model
new_model.set_weights(model.get_weights())
self.assert_models_equal(model, new_model, ref_input)
@parameterized.named_parameters(
("mlp_functional", get_mlp_functional_model),
("cnn_functional", get_cnn_functional_model),
("sequential", get_sequential_model),
)
def test_custom_clone_function(self, model_fn):
def clone_function(layer):
config = layer.get_config()
config["name"] = f"{config['name']}_custom"
return layer.__class__.from_config(config)
model = model_fn()
new_model = clone_model(model, clone_function=clone_function)
for l1, l2 in zip(model.layers, new_model.layers):
if not isinstance(l1, layers.InputLayer):
self.assertEqual(l2.name, f"{l1.name}_custom")
@parameterized.named_parameters(
("cnn_functional", get_cnn_functional_model),
("cnn_sequential", get_cnn_sequential_model),
(
"cnn_sequential_noinputlayer",
lambda: get_cnn_sequential_model(explicit_input=False),
),
)
def test_input_tensors(self, model_fn):
ref_input = np.random.random((2, 7, 3))
model = model_fn()
model(ref_input) # Maybe needed to get model inputs if no Input layer
input_tensor = model.inputs[0]
new_model = clone_model(model, input_tensors=input_tensor)
tree.assert_same_structure(model.inputs, new_model.inputs)
tree.assert_same_structure(model.outputs, new_model.outputs)
def test_shared_layers_cloning(self):
model = get_mlp_functional_model(shared_layers=True)
new_model = clone_model(model)
self.assertLen(new_model.layers, 4)
def test_structured_io_cloning(self):
x = layers.Input((3,))
y = layers.Input((3,))
z1 = x + y
z2 = layers.Dense(5)(z1)
inputs = dict(x=x, y=y)
outputs = dict(z1=z1, z2=z2)
model0 = models.Model(inputs, outputs)
model = clone_model(model0)
tree.assert_same_structure(model.input, inputs)
tree.assert_same_structure(model.output, outputs)
model = clone_model(model0, input_tensors=inputs)
tree.assert_same_structure(model.input, inputs)
tree.assert_same_structure(model.output, outputs)
with self.assertRaisesRegex(
ValueError,
"`input_tensors` must have the same structure as model.input",
):
model = clone_model(model0, input_tensors=(x, y))
def test_call_fn(self):
model = get_mlp_functional_model(shared_layers=False)
def call_function(layer, *args, **kwargs):
out = layer(*args, **kwargs)
if isinstance(layer, layers.Dense):
out = layers.Dropout(0.5)(out)
return out
new_model = clone_model(
model,
clone_function=lambda x: x, # Reuse the same layers.
call_function=call_function,
)
self.assertLen(model.layers, 3)
self.assertLen(new_model.layers, 5)
self.assertIsInstance(new_model.layers[2], layers.Dropout)
self.assertIsInstance(new_model.layers[4], layers.Dropout)
ref_input = np.random.random((2, 3))
self.assert_models_equal(model, new_model, ref_input)
def test_recursive(self):
model = get_nested_functional_model()
def call_function(layer, *args, **kwargs):
out = layer(*args, **kwargs)
if isinstance(layer, layers.Dense):
out = layers.Dropout(0.5)(out)
return out
new_model = clone_model(
model,
clone_function=lambda x: x, # Reuse the same layers.
call_function=call_function,
recursive=True,
)
self.assertLen(model._flatten_layers(), 8)
self.assertLen(new_model._flatten_layers(), 12)
self.assertIsInstance(new_model.layers[3].layers[2], layers.Dropout)
self.assertIsInstance(new_model.layers[3].layers[4], layers.Dropout)
ref_input = np.random.random((2, 4))
self.assert_models_equal(model, new_model, ref_input)
# Sequential.
def clone_function(layer):
layer = layer.__class__.from_config(layer.get_config())
layer.flag = True
return layer
model = get_nested_sequential_model()
new_model = clone_model(
model,
clone_function=clone_function,
recursive=True,
)
ref_input = np.random.random((2, 3))
model(ref_input) # Maybe needed to build the model
new_model(ref_input) # Maybe needed to build the model
new_model.set_weights(model.get_weights())
self.assert_models_equal(model, new_model, ref_input)
for l1, l2 in zip(model._flatten_layers(), new_model._flatten_layers()):
if isinstance(l2, layers.Dense):
self.assertFalse(hasattr(l1, "flag"))
self.assertTrue(hasattr(l2, "flag"))
def test_compiled_model_cloning(self):
model = models.Sequential()
model.add(layers.Input((3,)))
model.add(layers.Dense(5, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
model.compile(optimizer="adam", loss="binary_crossentropy")
cloned_model = clone_model(model)
self.assertEqual(model.compiled, cloned_model.compiled)
| CloneModelTest |
python | django-extensions__django-extensions | django_extensions/management/commands/runjobs.py | {
"start": 280,
"end": 3518
} | class ____(BaseCommand):
help = "Runs scheduled maintenance jobs."
when_options = [
"minutely",
"quarter_hourly",
"hourly",
"daily",
"weekly",
"monthly",
"yearly",
]
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"when", nargs="?", help="options: %s" % ", ".join(self.when_options)
)
parser.add_argument(
"--list",
"-l",
action="store_true",
dest="list_jobs",
default=False,
help="List all jobs with their description",
)
def usage_msg(self):
print("%s Please specify: %s" % (self.help, ", ".join(self.when_options)))
def runjobs(self, when, options):
verbosity = options["verbosity"]
jobs = get_jobs(when, only_scheduled=True)
for app_name, job_name in sorted(jobs.keys()):
job = jobs[(app_name, job_name)]
if verbosity > 1:
logger.info("Executing %s job: %s (app: %s)", when, job_name, app_name)
try:
job().execute()
except Exception:
logger.exception(
"ERROR OCCURED IN JOB: %s (APP: %s)", job_name, app_name
)
def runjobs_by_signals(self, when, options):
"""Run jobs from the signals"""
# Thanks for Ian Holsman for the idea and code
from django_extensions.management import signals
from django.conf import settings
verbosity = options["verbosity"]
for app_name in settings.INSTALLED_APPS:
try:
__import__(app_name + ".management", "", "", [""])
except ImportError:
pass
for app in (
app.models_module for app in apps.get_app_configs() if app.models_module
):
if verbosity > 1:
app_name = ".".join(app.__name__.rsplit(".")[:-1])
print("Sending %s job signal for: %s" % (when, app_name))
if when == "minutely":
signals.run_minutely_jobs.send(sender=app, app=app)
elif when == "quarter_hourly":
signals.run_quarter_hourly_jobs.send(sender=app, app=app)
elif when == "hourly":
signals.run_hourly_jobs.send(sender=app, app=app)
elif when == "daily":
signals.run_daily_jobs.send(sender=app, app=app)
elif when == "weekly":
signals.run_weekly_jobs.send(sender=app, app=app)
elif when == "monthly":
signals.run_monthly_jobs.send(sender=app, app=app)
elif when == "yearly":
signals.run_yearly_jobs.send(sender=app, app=app)
@signalcommand
def handle(self, *args, **options):
when = options["when"]
setup_logger(logger, self.stdout)
if options["list_jobs"]:
print_jobs(when, only_scheduled=True, show_when=True, show_appname=True)
elif when in self.when_options:
self.runjobs(when, options)
self.runjobs_by_signals(when, options)
else:
self.usage_msg()
| Command |
python | Textualize__textual | examples/five_by_five.py | {
"start": 4392,
"end": 9471
} | class ____(Screen):
"""Main 5x5 game grid screen."""
SIZE: Final = 5
"""The size of the game grid. Clue's in the name really."""
BINDINGS = [
Binding("n", "new_game", "New Game"),
Binding("question_mark", "app.push_screen('help')", "Help", key_display="?"),
Binding("q", "app.quit", "Quit"),
Binding("up,w,k", "navigate(-1,0)", "Move Up", False),
Binding("down,s,j", "navigate(1,0)", "Move Down", False),
Binding("left,a,h", "navigate(0,-1)", "Move Left", False),
Binding("right,d,l", "navigate(0,1)", "Move Right", False),
Binding("space", "move", "Toggle", False),
]
"""The bindings for the main game grid."""
@property
def filled_cells(self) -> DOMQuery[GameCell]:
"""DOMQuery[GameCell]: The collection of cells that are currently turned on."""
return cast(DOMQuery[GameCell], self.query("GameCell.filled"))
@property
def filled_count(self) -> int:
"""int: The number of cells that are currently filled."""
return len(self.filled_cells)
@property
def all_filled(self) -> bool:
"""bool: Are all the cells filled?"""
return self.filled_count == self.SIZE * self.SIZE
def game_playable(self, playable: bool) -> None:
"""Mark the game as playable, or not.
Args:
playable (bool): Should the game currently be playable?
"""
self.query_one(GameGrid).disabled = not playable
def cell(self, row: int, col: int) -> GameCell:
"""Get the cell at a given location.
Args:
row (int): The row of the cell to get.
col (int): The column of the cell to get.
Returns:
GameCell: The cell at that location.
"""
return self.query_one(f"#{GameCell.at(row,col)}", GameCell)
def compose(self) -> ComposeResult:
"""Compose the game screen.
Returns:
ComposeResult: The result of composing the game screen.
"""
yield GameHeader()
yield GameGrid()
yield Footer()
yield WinnerMessage()
def toggle_cell(self, row: int, col: int) -> None:
"""Toggle an individual cell, but only if it's in bounds.
If the row and column would place the cell out of bounds for the
game grid, this function call is a no-op. That is, it's safe to call
it with an invalid cell coordinate.
Args:
row (int): The row of the cell to toggle.
col (int): The column of the cell to toggle.
"""
if 0 <= row <= (self.SIZE - 1) and 0 <= col <= (self.SIZE - 1):
self.cell(row, col).toggle_class("filled")
_PATTERN: Final = (-1, 1, 0, 0, 0)
def toggle_cells(self, cell: GameCell) -> None:
"""Toggle a 5x5 pattern around the given cell.
Args:
cell (GameCell): The cell to toggle the cells around.
"""
for row, col in zip(self._PATTERN, reversed(self._PATTERN)):
self.toggle_cell(cell.row + row, cell.col + col)
self.query_one(GameHeader).filled = self.filled_count
def make_move_on(self, cell: GameCell) -> None:
"""Make a move on the given cell.
All relevant cells around the given cell are toggled as per the
game's rules.
Args:
cell (GameCell): The cell to make a move on
"""
self.toggle_cells(cell)
self.query_one(GameHeader).moves += 1
if self.all_filled:
self.query_one(WinnerMessage).show(self.query_one(GameHeader).moves)
self.game_playable(False)
def on_button_pressed(self, event: GameCell.Pressed) -> None:
"""React to a press of a button on the game grid.
Args:
event (GameCell.Pressed): The event to react to.
"""
self.make_move_on(cast(GameCell, event.button))
def action_new_game(self) -> None:
"""Start a new game."""
self.query_one(GameHeader).moves = 0
self.filled_cells.remove_class("filled")
self.query_one(WinnerMessage).hide()
middle = self.cell(self.SIZE // 2, self.SIZE // 2)
self.toggle_cells(middle)
self.set_focus(middle)
self.game_playable(True)
def action_navigate(self, row: int, col: int) -> None:
"""Navigate to a new cell by the given offsets.
Args:
row (int): The row of the cell to navigate to.
col (int): The column of the cell to navigate to.
"""
if isinstance(self.focused, GameCell):
self.set_focus(
self.cell(
(self.focused.row + row) % self.SIZE,
(self.focused.col + col) % self.SIZE,
)
)
def action_move(self) -> None:
"""Make a move on the current cell."""
if isinstance(self.focused, GameCell):
self.focused.press()
def on_mount(self) -> None:
"""Get the game started when we first mount."""
self.action_new_game()
| Game |
python | mlflow__mlflow | tests/langgraph/sample_code/langgraph_prebuilt.py | {
"start": 308,
"end": 1618
} | class ____(ChatOpenAI, extra="allow"):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._responses = itertools.cycle(
[
AIMessage(
content="",
tool_calls=[ToolCall(name="get_weather", args={"city": "sf"}, id="123")],
usage_metadata={"input_tokens": 5, "output_tokens": 10, "total_tokens": 15},
),
AIMessage(
content="The weather in San Francisco is always sunny!",
usage_metadata={"input_tokens": 10, "output_tokens": 20, "total_tokens": 30},
),
]
)
def _generate(self, *args, **kwargs):
return ChatResult(generations=[ChatGeneration(message=next(self._responses))])
async def _agenerate(self, *args, **kwargs):
return ChatResult(generations=[ChatGeneration(message=next(self._responses))])
@tool
def get_weather(city: Literal["nyc", "sf"]):
"""Use this to get weather information."""
if city == "nyc":
return "It might be cloudy in nyc"
elif city == "sf":
return "It's always sunny in sf"
llm = FakeOpenAI()
tools = [get_weather]
graph = create_react_agent(llm, tools)
mlflow.models.set_model(graph)
| FakeOpenAI |
python | getsentry__sentry | src/sentry/integrations/repository/issue_alert.py | {
"start": 1596,
"end": 1698
} | class ____(NotificationMessageValidationError):
pass
| NewIssueAlertNotificationMessageValidationError |
python | openai__openai-python | src/openai/resources/chat/completions/completions.py | {
"start": 81258,
"end": 160117
} | class ____(AsyncAPIResource):
@cached_property
def messages(self) -> AsyncMessages:
return AsyncMessages(self._client)
@cached_property
def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncCompletionsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncCompletionsWithStreamingResponse(self)
async def parse(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
response_format: type[ResponseFormatT] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: completion_create_params.FunctionCall | Omit = omit,
functions: Iterable[completion_create_params.Function] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
n: Optional[int] | Omit = omit,
parallel_tool_calls: bool | Omit = omit,
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
safety_identifier: str | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ParsedChatCompletion[ResponseFormatT]:
"""Wrapper over the `client.chat.completions.create()` method that provides richer integrations with Python specific types
& returns a `ParsedChatCompletion` object, which is a subclass of the standard `ChatCompletion` class.
You can pass a pydantic model to this method and it will automatically convert the model
into a JSON schema, send it to the API and parse the response content back into the given model.
This method will also automatically parse `function` tool calls if:
- You use the `openai.pydantic_function_tool()` helper method
- You mark your tool schema with `"strict": True`
Example usage:
```py
from pydantic import BaseModel
from openai import AsyncOpenAI
class Step(BaseModel):
explanation: str
output: str
class MathResponse(BaseModel):
steps: List[Step]
final_answer: str
client = AsyncOpenAI()
completion = await client.chat.completions.parse(
model="gpt-4o-2024-08-06",
messages=[
{"role": "system", "content": "You are a helpful math tutor."},
{"role": "user", "content": "solve 8x + 31 = 2"},
],
response_format=MathResponse,
)
message = completion.choices[0].message
if message.parsed:
print(message.parsed.steps)
print("answer: ", message.parsed.final_answer)
```
"""
_validate_input_tools(tools)
extra_headers = {
"X-Stainless-Helper-Method": "chat.completions.parse",
**(extra_headers or {}),
}
def parser(raw_completion: ChatCompletion) -> ParsedChatCompletion[ResponseFormatT]:
return _parse_chat_completion(
response_format=response_format,
chat_completion=raw_completion,
input_tools=tools,
)
return await self._post(
"/chat/completions",
body=await async_maybe_transform(
{
"messages": messages,
"model": model,
"audio": audio,
"frequency_penalty": frequency_penalty,
"function_call": function_call,
"functions": functions,
"logit_bias": logit_bias,
"logprobs": logprobs,
"max_completion_tokens": max_completion_tokens,
"max_tokens": max_tokens,
"metadata": metadata,
"modalities": modalities,
"n": n,
"parallel_tool_calls": parallel_tool_calls,
"prediction": prediction,
"presence_penalty": presence_penalty,
"prompt_cache_key": prompt_cache_key,
"prompt_cache_retention": prompt_cache_retention,
"reasoning_effort": reasoning_effort,
"response_format": _type_to_response_format(response_format),
"safety_identifier": safety_identifier,
"seed": seed,
"service_tier": service_tier,
"store": store,
"stop": stop,
"stream": False,
"stream_options": stream_options,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"top_logprobs": top_logprobs,
"top_p": top_p,
"user": user,
"verbosity": verbosity,
"web_search_options": web_search_options,
},
completion_create_params.CompletionCreateParams,
),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
post_parser=parser,
),
# we turn the `ChatCompletion` instance into a `ParsedChatCompletion`
# in the `parser` function above
cast_to=cast(Type[ParsedChatCompletion[ResponseFormatT]], ChatCompletion),
stream=False,
)
@overload
async def create(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: completion_create_params.FunctionCall | Omit = omit,
functions: Iterable[completion_create_params.Function] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
n: Optional[int] | Omit = omit,
parallel_tool_calls: bool | Omit = omit,
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
response_format: completion_create_params.ResponseFormat | Omit = omit,
safety_identifier: str | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatCompletion:
"""
**Starting a new project?** We recommend trying
[Responses](https://platform.openai.com/docs/api-reference/responses) to take
advantage of the latest OpenAI platform features. Compare
[Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
---
Creates a model response for the given chat conversation. Learn more in the
[text generation](https://platform.openai.com/docs/guides/text-generation),
[vision](https://platform.openai.com/docs/guides/vision), and
[audio](https://platform.openai.com/docs/guides/audio) guides.
Parameter support can differ depending on the model used to generate the
response, particularly for newer reasoning models. Parameters that are only
supported for reasoning models are noted below. For the current state of
unsupported parameters in reasoning models,
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
Args:
messages: A list of messages comprising the conversation so far. Depending on the
[model](https://platform.openai.com/docs/models) you use, different message
types (modalities) are supported, like
[text](https://platform.openai.com/docs/guides/text-generation),
[images](https://platform.openai.com/docs/guides/vision), and
[audio](https://platform.openai.com/docs/guides/audio).
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
audio: Parameters for audio output. Required when audio output is requested with
`modalities: ["audio"]`.
[Learn more](https://platform.openai.com/docs/guides/audio).
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
function_call: Deprecated in favor of `tool_choice`.
Controls which (if any) function is called by the model.
`none` means the model will not call a function and instead generates a message.
`auto` means the model can pick between generating a message or calling a
function.
Specifying a particular function via `{"name": "my_function"}` forces the model
to call that function.
`none` is the default when no functions are present. `auto` is the default if
functions are present.
functions: Deprecated in favor of `tools`.
A list of functions the model may generate JSON inputs for.
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
bias is added to the logits generated by the model prior to sampling. The exact
effect will vary per model, but values between -1 and 1 should decrease or
increase likelihood of selection; values like -100 or 100 should result in a ban
or exclusive selection of the relevant token.
logprobs: Whether to return log probabilities of the output tokens or not. If true,
returns the log probabilities of each output token returned in the `content` of
`message`.
max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
including visible output tokens and
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
completion. This value can be used to control
[costs](https://openai.com/api/pricing/) for text generated via API.
This value is now deprecated in favor of `max_completion_tokens`, and is not
compatible with
[o-series models](https://platform.openai.com/docs/guides/reasoning).
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
modalities: Output types that you would like the model to generate. Most models are capable
of generating text, which is the default:
`["text"]`
The `gpt-4o-audio-preview` model can also be used to
[generate audio](https://platform.openai.com/docs/guides/audio). To request that
this model generate both text and audio responses, you can use:
`["text", "audio"]`
n: How many chat completion choices to generate for each input message. Note that
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
parallel_tool_calls: Whether to enable
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
prediction: Static predicted output content, such as the content of a text file that is
being regenerated.
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
of 24 hours.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
response_format: An object specifying the format that the model must output.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
ensures the message the model generates is valid JSON. Using `json_schema` is
preferred for models that support it.
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
seed: This feature is in Beta. If specified, our system will make a best effort to
sample deterministically, such that repeated requests with the same `seed` and
parameters should return the same result. Determinism is not guaranteed, and you
should refer to the `system_fingerprint` response parameter to monitor changes
in the backend.
service_tier: Specifies the processing type used for serving the request.
- If set to 'auto', then the request will be processed with the service tier
configured in the Project settings. Unless otherwise configured, the Project
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
When the `service_tier` parameter is set, the response body will include the
`service_tier` value based on the processing mode actually used to serve the
request. This response value may be different from the value set in the
parameter.
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
store: Whether or not to store the output of this chat completion request for use in
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
Supports text and image inputs. Note: image inputs over 8MB will be dropped.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
for more information, along with the
[streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
guide for more information on how to handle the streaming events.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tool and instead generates a message. `auto` means the model can
pick between generating a message or calling one or more tools. `required` means
the model must call one or more tools. Specifying a particular tool via
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that tool.
`none` is the default when no tools are present. `auto` is the default if tools
are present.
tools: A list of tools the model may call. You can provide either
[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
or [function tools](https://platform.openai.com/docs/guides/function-calling).
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
verbosity: Constrains the verbosity of the model's response. Lower values will result in
more concise responses, while higher values will result in more verbose
responses. Currently supported values are `low`, `medium`, and `high`.
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
about the
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
async def create(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
stream: Literal[True],
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: completion_create_params.FunctionCall | Omit = omit,
functions: Iterable[completion_create_params.Function] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
n: Optional[int] | Omit = omit,
parallel_tool_calls: bool | Omit = omit,
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
response_format: completion_create_params.ResponseFormat | Omit = omit,
safety_identifier: str | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncStream[ChatCompletionChunk]:
"""
**Starting a new project?** We recommend trying
[Responses](https://platform.openai.com/docs/api-reference/responses) to take
advantage of the latest OpenAI platform features. Compare
[Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
---
Creates a model response for the given chat conversation. Learn more in the
[text generation](https://platform.openai.com/docs/guides/text-generation),
[vision](https://platform.openai.com/docs/guides/vision), and
[audio](https://platform.openai.com/docs/guides/audio) guides.
Parameter support can differ depending on the model used to generate the
response, particularly for newer reasoning models. Parameters that are only
supported for reasoning models are noted below. For the current state of
unsupported parameters in reasoning models,
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
Args:
messages: A list of messages comprising the conversation so far. Depending on the
[model](https://platform.openai.com/docs/models) you use, different message
types (modalities) are supported, like
[text](https://platform.openai.com/docs/guides/text-generation),
[images](https://platform.openai.com/docs/guides/vision), and
[audio](https://platform.openai.com/docs/guides/audio).
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
for more information, along with the
[streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
guide for more information on how to handle the streaming events.
audio: Parameters for audio output. Required when audio output is requested with
`modalities: ["audio"]`.
[Learn more](https://platform.openai.com/docs/guides/audio).
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
function_call: Deprecated in favor of `tool_choice`.
Controls which (if any) function is called by the model.
`none` means the model will not call a function and instead generates a message.
`auto` means the model can pick between generating a message or calling a
function.
Specifying a particular function via `{"name": "my_function"}` forces the model
to call that function.
`none` is the default when no functions are present. `auto` is the default if
functions are present.
functions: Deprecated in favor of `tools`.
A list of functions the model may generate JSON inputs for.
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
bias is added to the logits generated by the model prior to sampling. The exact
effect will vary per model, but values between -1 and 1 should decrease or
increase likelihood of selection; values like -100 or 100 should result in a ban
or exclusive selection of the relevant token.
logprobs: Whether to return log probabilities of the output tokens or not. If true,
returns the log probabilities of each output token returned in the `content` of
`message`.
max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
including visible output tokens and
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
completion. This value can be used to control
[costs](https://openai.com/api/pricing/) for text generated via API.
This value is now deprecated in favor of `max_completion_tokens`, and is not
compatible with
[o-series models](https://platform.openai.com/docs/guides/reasoning).
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
modalities: Output types that you would like the model to generate. Most models are capable
of generating text, which is the default:
`["text"]`
The `gpt-4o-audio-preview` model can also be used to
[generate audio](https://platform.openai.com/docs/guides/audio). To request that
this model generate both text and audio responses, you can use:
`["text", "audio"]`
n: How many chat completion choices to generate for each input message. Note that
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
parallel_tool_calls: Whether to enable
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
prediction: Static predicted output content, such as the content of a text file that is
being regenerated.
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
of 24 hours.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
response_format: An object specifying the format that the model must output.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
ensures the message the model generates is valid JSON. Using `json_schema` is
preferred for models that support it.
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
seed: This feature is in Beta. If specified, our system will make a best effort to
sample deterministically, such that repeated requests with the same `seed` and
parameters should return the same result. Determinism is not guaranteed, and you
should refer to the `system_fingerprint` response parameter to monitor changes
in the backend.
service_tier: Specifies the processing type used for serving the request.
- If set to 'auto', then the request will be processed with the service tier
configured in the Project settings. Unless otherwise configured, the Project
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
When the `service_tier` parameter is set, the response body will include the
`service_tier` value based on the processing mode actually used to serve the
request. This response value may be different from the value set in the
parameter.
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
store: Whether or not to store the output of this chat completion request for use in
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
Supports text and image inputs. Note: image inputs over 8MB will be dropped.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tool and instead generates a message. `auto` means the model can
pick between generating a message or calling one or more tools. `required` means
the model must call one or more tools. Specifying a particular tool via
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that tool.
`none` is the default when no tools are present. `auto` is the default if tools
are present.
tools: A list of tools the model may call. You can provide either
[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
or [function tools](https://platform.openai.com/docs/guides/function-calling).
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
verbosity: Constrains the verbosity of the model's response. Lower values will result in
more concise responses, while higher values will result in more verbose
responses. Currently supported values are `low`, `medium`, and `high`.
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
about the
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@overload
async def create(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
stream: bool,
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: completion_create_params.FunctionCall | Omit = omit,
functions: Iterable[completion_create_params.Function] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
n: Optional[int] | Omit = omit,
parallel_tool_calls: bool | Omit = omit,
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
response_format: completion_create_params.ResponseFormat | Omit = omit,
safety_identifier: str | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
"""
**Starting a new project?** We recommend trying
[Responses](https://platform.openai.com/docs/api-reference/responses) to take
advantage of the latest OpenAI platform features. Compare
[Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
---
Creates a model response for the given chat conversation. Learn more in the
[text generation](https://platform.openai.com/docs/guides/text-generation),
[vision](https://platform.openai.com/docs/guides/vision), and
[audio](https://platform.openai.com/docs/guides/audio) guides.
Parameter support can differ depending on the model used to generate the
response, particularly for newer reasoning models. Parameters that are only
supported for reasoning models are noted below. For the current state of
unsupported parameters in reasoning models,
[refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
Args:
messages: A list of messages comprising the conversation so far. Depending on the
[model](https://platform.openai.com/docs/models) you use, different message
types (modalities) are supported, like
[text](https://platform.openai.com/docs/guides/text-generation),
[images](https://platform.openai.com/docs/guides/vision), and
[audio](https://platform.openai.com/docs/guides/audio).
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
stream: If set to true, the model response data will be streamed to the client as it is
generated using
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).
See the
[Streaming section below](https://platform.openai.com/docs/api-reference/chat/streaming)
for more information, along with the
[streaming responses](https://platform.openai.com/docs/guides/streaming-responses)
guide for more information on how to handle the streaming events.
audio: Parameters for audio output. Required when audio output is requested with
`modalities: ["audio"]`.
[Learn more](https://platform.openai.com/docs/guides/audio).
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
existing frequency in the text so far, decreasing the model's likelihood to
repeat the same line verbatim.
function_call: Deprecated in favor of `tool_choice`.
Controls which (if any) function is called by the model.
`none` means the model will not call a function and instead generates a message.
`auto` means the model can pick between generating a message or calling a
function.
Specifying a particular function via `{"name": "my_function"}` forces the model
to call that function.
`none` is the default when no functions are present. `auto` is the default if
functions are present.
functions: Deprecated in favor of `tools`.
A list of functions the model may generate JSON inputs for.
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
bias is added to the logits generated by the model prior to sampling. The exact
effect will vary per model, but values between -1 and 1 should decrease or
increase likelihood of selection; values like -100 or 100 should result in a ban
or exclusive selection of the relevant token.
logprobs: Whether to return log probabilities of the output tokens or not. If true,
returns the log probabilities of each output token returned in the `content` of
`message`.
max_completion_tokens: An upper bound for the number of tokens that can be generated for a completion,
including visible output tokens and
[reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
completion. This value can be used to control
[costs](https://openai.com/api/pricing/) for text generated via API.
This value is now deprecated in favor of `max_completion_tokens`, and is not
compatible with
[o-series models](https://platform.openai.com/docs/guides/reasoning).
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
modalities: Output types that you would like the model to generate. Most models are capable
of generating text, which is the default:
`["text"]`
The `gpt-4o-audio-preview` model can also be used to
[generate audio](https://platform.openai.com/docs/guides/audio). To request that
this model generate both text and audio responses, you can use:
`["text", "audio"]`
n: How many chat completion choices to generate for each input message. Note that
you will be charged based on the number of generated tokens across all of the
choices. Keep `n` as `1` to minimize costs.
parallel_tool_calls: Whether to enable
[parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
during tool use.
prediction: Static predicted output content, such as the content of a text file that is
being regenerated.
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
whether they appear in the text so far, increasing the model's likelihood to
talk about new topics.
prompt_cache_key: Used by OpenAI to cache responses for similar requests to optimize your cache
hit rates. Replaces the `user` field.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching).
prompt_cache_retention: The retention policy for the prompt cache. Set to `24h` to enable extended
prompt caching, which keeps cached prefixes active for longer, up to a maximum
of 24 hours.
[Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
reasoning_effort: Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
response_format: An object specifying the format that the model must output.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables the older JSON mode, which
ensures the message the model generates is valid JSON. Using `json_schema` is
preferred for models that support it.
safety_identifier: A stable identifier used to help detect users of your application that may be
violating OpenAI's usage policies. The IDs should be a string that uniquely
identifies each user. We recommend hashing their username or email address, in
order to avoid sending us any identifying information.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
seed: This feature is in Beta. If specified, our system will make a best effort to
sample deterministically, such that repeated requests with the same `seed` and
parameters should return the same result. Determinism is not guaranteed, and you
should refer to the `system_fingerprint` response parameter to monitor changes
in the backend.
service_tier: Specifies the processing type used for serving the request.
- If set to 'auto', then the request will be processed with the service tier
configured in the Project settings. Unless otherwise configured, the Project
will use 'default'.
- If set to 'default', then the request will be processed with the standard
pricing and performance for the selected model.
- If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
'[priority](https://openai.com/api-priority-processing/)', then the request
will be processed with the corresponding service tier.
- When not set, the default behavior is 'auto'.
When the `service_tier` parameter is set, the response body will include the
`service_tier` value based on the processing mode actually used to serve the
request. This response value may be different from the value set in the
parameter.
stop: Not supported with latest reasoning models `o3` and `o4-mini`.
Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
store: Whether or not to store the output of this chat completion request for use in
our [model distillation](https://platform.openai.com/docs/guides/distillation)
or [evals](https://platform.openai.com/docs/guides/evals) products.
Supports text and image inputs. Note: image inputs over 8MB will be dropped.
stream_options: Options for streaming response. Only set this when you set `stream: true`.
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
make the output more random, while lower values like 0.2 will make it more
focused and deterministic. We generally recommend altering this or `top_p` but
not both.
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
not call any tool and instead generates a message. `auto` means the model can
pick between generating a message or calling one or more tools. `required` means
the model must call one or more tools. Specifying a particular tool via
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
call that tool.
`none` is the default when no tools are present. `auto` is the default if tools
are present.
tools: A list of tools the model may call. You can provide either
[custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
or [function tools](https://platform.openai.com/docs/guides/function-calling).
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
return at each token position, each with an associated log probability.
`logprobs` must be set to `true` if this parameter is used.
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
user: This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
`prompt_cache_key` instead to maintain caching optimizations. A stable
identifier for your end-users. Used to boost cache hit rates by better bucketing
similar requests and to help OpenAI detect and prevent abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
verbosity: Constrains the verbosity of the model's response. Lower values will result in
more concise responses, while higher values will result in more verbose
responses. Currently supported values are `low`, `medium`, and `high`.
web_search_options: This tool searches the web for relevant results to use in a response. Learn more
about the
[web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
...
@required_args(["messages", "model"], ["messages", "model", "stream"])
async def create(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: completion_create_params.FunctionCall | Omit = omit,
functions: Iterable[completion_create_params.Function] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
n: Optional[int] | Omit = omit,
parallel_tool_calls: bool | Omit = omit,
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
response_format: completion_create_params.ResponseFormat | Omit = omit,
safety_identifier: str | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream: Optional[Literal[False]] | Literal[True] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
validate_response_format(response_format)
return await self._post(
"/chat/completions",
body=await async_maybe_transform(
{
"messages": messages,
"model": model,
"audio": audio,
"frequency_penalty": frequency_penalty,
"function_call": function_call,
"functions": functions,
"logit_bias": logit_bias,
"logprobs": logprobs,
"max_completion_tokens": max_completion_tokens,
"max_tokens": max_tokens,
"metadata": metadata,
"modalities": modalities,
"n": n,
"parallel_tool_calls": parallel_tool_calls,
"prediction": prediction,
"presence_penalty": presence_penalty,
"prompt_cache_key": prompt_cache_key,
"prompt_cache_retention": prompt_cache_retention,
"reasoning_effort": reasoning_effort,
"response_format": response_format,
"safety_identifier": safety_identifier,
"seed": seed,
"service_tier": service_tier,
"stop": stop,
"store": store,
"stream": stream,
"stream_options": stream_options,
"temperature": temperature,
"tool_choice": tool_choice,
"tools": tools,
"top_logprobs": top_logprobs,
"top_p": top_p,
"user": user,
"verbosity": verbosity,
"web_search_options": web_search_options,
},
completion_create_params.CompletionCreateParamsStreaming
if stream
else completion_create_params.CompletionCreateParamsNonStreaming,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatCompletion,
stream=stream or False,
stream_cls=AsyncStream[ChatCompletionChunk],
)
async def retrieve(
self,
completion_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatCompletion:
"""Get a stored chat completion.
Only Chat Completions that have been created with
the `store` parameter set to `true` will be returned.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return await self._get(
f"/chat/completions/{completion_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatCompletion,
)
async def update(
self,
completion_id: str,
*,
metadata: Optional[Metadata],
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatCompletion:
"""Modify a stored chat completion.
Only Chat Completions that have been created
with the `store` parameter set to `true` can be modified. Currently, the only
supported modification is to update the `metadata` field.
Args:
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return await self._post(
f"/chat/completions/{completion_id}",
body=await async_maybe_transform({"metadata": metadata}, completion_update_params.CompletionUpdateParams),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatCompletion,
)
def list(
self,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
model: str | Omit = omit,
order: Literal["asc", "desc"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[ChatCompletion, AsyncCursorPage[ChatCompletion]]:
"""List stored Chat Completions.
Only Chat Completions that have been stored with
the `store` parameter set to `true` will be returned.
Args:
after: Identifier for the last chat completion from the previous pagination request.
limit: Number of Chat Completions to retrieve.
metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful
for storing additional information about the object in a structured format, and
querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
model: The model used to generate the Chat Completions.
order: Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
`desc` for descending order. Defaults to `asc`.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get_api_list(
"/chat/completions",
page=AsyncCursorPage[ChatCompletion],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
"metadata": metadata,
"model": model,
"order": order,
},
completion_list_params.CompletionListParams,
),
),
model=ChatCompletion,
)
async def delete(
self,
completion_id: str,
*,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ChatCompletionDeleted:
"""Delete a stored chat completion.
Only Chat Completions that have been created
with the `store` parameter set to `true` can be deleted.
Args:
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not completion_id:
raise ValueError(f"Expected a non-empty value for `completion_id` but received {completion_id!r}")
return await self._delete(
f"/chat/completions/{completion_id}",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=ChatCompletionDeleted,
)
def stream(
self,
*,
messages: Iterable[ChatCompletionMessageParam],
model: Union[str, ChatModel],
audio: Optional[ChatCompletionAudioParam] | Omit = omit,
response_format: completion_create_params.ResponseFormat | type[ResponseFormatT] | Omit = omit,
frequency_penalty: Optional[float] | Omit = omit,
function_call: completion_create_params.FunctionCall | Omit = omit,
functions: Iterable[completion_create_params.Function] | Omit = omit,
logit_bias: Optional[Dict[str, int]] | Omit = omit,
logprobs: Optional[bool] | Omit = omit,
max_completion_tokens: Optional[int] | Omit = omit,
max_tokens: Optional[int] | Omit = omit,
metadata: Optional[Metadata] | Omit = omit,
modalities: Optional[List[Literal["text", "audio"]]] | Omit = omit,
n: Optional[int] | Omit = omit,
parallel_tool_calls: bool | Omit = omit,
prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit,
presence_penalty: Optional[float] | Omit = omit,
prompt_cache_key: str | Omit = omit,
prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit,
reasoning_effort: Optional[ReasoningEffort] | Omit = omit,
safety_identifier: str | Omit = omit,
seed: Optional[int] | Omit = omit,
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit,
stop: Union[Optional[str], SequenceNotStr[str], None] | Omit = omit,
store: Optional[bool] | Omit = omit,
stream_options: Optional[ChatCompletionStreamOptionsParam] | Omit = omit,
temperature: Optional[float] | Omit = omit,
tool_choice: ChatCompletionToolChoiceOptionParam | Omit = omit,
tools: Iterable[ChatCompletionToolUnionParam] | Omit = omit,
top_logprobs: Optional[int] | Omit = omit,
top_p: Optional[float] | Omit = omit,
user: str | Omit = omit,
verbosity: Optional[Literal["low", "medium", "high"]] | Omit = omit,
web_search_options: completion_create_params.WebSearchOptions | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncChatCompletionStreamManager[ResponseFormatT]:
"""Wrapper over the `client.chat.completions.create(stream=True)` method that provides a more granular event API
and automatic accumulation of each delta.
This also supports all of the parsing utilities that `.parse()` does.
Unlike `.create(stream=True)`, the `.stream()` method requires usage within a context manager to prevent accidental leakage of the response:
```py
async with client.chat.completions.stream(
model="gpt-4o-2024-08-06",
messages=[...],
) as stream:
async for event in stream:
if event.type == "content.delta":
print(event.delta, flush=True, end="")
```
When the context manager is entered, an `AsyncChatCompletionStream` instance is returned which, like `.create(stream=True)` is an async iterator. The full list of events that are yielded by the iterator are outlined in [these docs](https://github.com/openai/openai-python/blob/main/helpers.md#chat-completions-events).
When the context manager exits, the response will be closed, however the `stream` instance is still available outside
the context manager.
"""
_validate_input_tools(tools)
extra_headers = {
"X-Stainless-Helper-Method": "chat.completions.stream",
**(extra_headers or {}),
}
api_request = self.create(
messages=messages,
model=model,
audio=audio,
stream=True,
response_format=_type_to_response_format(response_format),
frequency_penalty=frequency_penalty,
function_call=function_call,
functions=functions,
logit_bias=logit_bias,
logprobs=logprobs,
max_completion_tokens=max_completion_tokens,
max_tokens=max_tokens,
metadata=metadata,
modalities=modalities,
n=n,
parallel_tool_calls=parallel_tool_calls,
prediction=prediction,
presence_penalty=presence_penalty,
prompt_cache_key=prompt_cache_key,
prompt_cache_retention=prompt_cache_retention,
reasoning_effort=reasoning_effort,
safety_identifier=safety_identifier,
seed=seed,
service_tier=service_tier,
stop=stop,
store=store,
stream_options=stream_options,
temperature=temperature,
tool_choice=tool_choice,
tools=tools,
top_logprobs=top_logprobs,
top_p=top_p,
user=user,
verbosity=verbosity,
web_search_options=web_search_options,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
)
return AsyncChatCompletionStreamManager(
api_request,
response_format=response_format,
input_tools=tools,
)
| AsyncCompletions |
python | kamyu104__LeetCode-Solutions | Python/longest-common-prefix.py | {
"start": 71,
"end": 527
} | class ____(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if not strs:
return ""
for i in xrange(len(strs[0])):
for string in strs[1:]:
if i >= len(string) or string[i] != strs[0][i]:
return strs[0][:i]
return strs[0]
# Time: O(n * k), k is the length of the common prefix
# Space: O(k)
| Solution |
python | walkccc__LeetCode | solutions/1370. Increasing Decreasing String/1370.py | {
"start": 0,
"end": 312
} | class ____:
def sortString(self, s: str) -> str:
ans = []
count = collections.Counter(s)
while count:
for chars in string.ascii_lowercase, reversed(string.ascii_lowercase):
ans += [c for c in chars if c in count]
count -= dict.fromkeys(count, 1)
return ''.join(ans)
| Solution |
python | psf__black | tests/data/cases/raw_docstring.py | {
"start": 127,
"end": 170
} | class ____:
R"""Raw"""
# output
| UpperCaseR |
python | doocs__leetcode | solution/1400-1499/1431.Kids With the Greatest Number of Candies/Solution.py | {
"start": 0,
"end": 191
} | class ____:
def kidsWithCandies(self, candies: List[int], extraCandies: int) -> List[bool]:
mx = max(candies)
return [candy + extraCandies >= mx for candy in candies]
| Solution |
python | numpy__numpy | benchmarks/benchmarks/bench_function_base.py | {
"start": 1790,
"end": 2606
} | class ____(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
self.tall = np.random.random((10000, 20))
self.wide = np.random.random((20, 10000))
def time_even(self):
np.median(self.e)
def time_odd(self):
np.median(self.o)
def time_even_inplace(self):
np.median(self.e, overwrite_input=True)
def time_odd_inplace(self):
np.median(self.o, overwrite_input=True)
def time_even_small(self):
np.median(self.e[:500], overwrite_input=True)
def time_odd_small(self):
np.median(self.o[:500], overwrite_input=True)
def time_tall(self):
np.median(self.tall, axis=-1)
def time_wide(self):
np.median(self.wide, axis=0)
| Median |
python | boto__boto3 | tests/functional/test_s3.py | {
"start": 850,
"end": 1904
} | class ____(unittest.TestCase):
def test_transfer_methods_injected_to_client(self):
session = boto3.session.Session(region_name='us-west-2')
client = session.client('s3')
assert hasattr(client, 'upload_file')
assert hasattr(client, 'download_file')
assert hasattr(client, 'copy')
def test_bucket_resource_has_load_method(self):
session = boto3.session.Session(region_name='us-west-2')
bucket = session.resource('s3').Bucket('fakebucket')
assert hasattr(bucket, 'load')
def test_transfer_methods_injected_to_bucket(self):
bucket = boto3.resource('s3').Bucket('my_bucket')
assert hasattr(bucket, 'upload_file')
assert hasattr(bucket, 'download_file')
assert hasattr(bucket, 'copy')
def test_transfer_methods_injected_to_object(self):
obj = boto3.resource('s3').Object('my_bucket', 'my_key')
assert hasattr(obj, 'upload_file')
assert hasattr(obj, 'download_file')
assert hasattr(obj, 'copy')
| TestS3MethodInjection |
python | tensorflow__tensorflow | tensorflow/python/data/ops/dataset_ops.py | {
"start": 186831,
"end": 188104
} | class ____(composite_tensor.CompositeTensor):
def __init__(self, variant_tensor, element_spec, dataset_shape):
self._variant_tensor = variant_tensor
self._element_spec = element_spec
self._dataset_shape = dataset_shape
@property
def _type_spec(self):
return DatasetSpec(self._element_spec, self._dataset_shape)
@tf_export("data.experimental.from_variant")
def from_variant(variant, structure):
"""Constructs a dataset from the given variant and (nested) structure.
Args:
variant: A scalar `tf.variant` tensor representing a dataset.
structure: A (nested) structure of `tf.TypeSpec` objects representing the
structure of each element in the dataset.
Returns:
A `tf.data.Dataset` instance.
"""
return _VariantDataset(variant, structure) # pylint: disable=protected-access
@tf_export("data.experimental.to_variant")
def to_variant(dataset: DatasetV2):
"""Returns a variant representing the given dataset.
Args:
dataset: A `tf.data.Dataset`.
Returns:
A scalar `tf.variant` tensor representing the given dataset.
"""
return dataset._variant_tensor # pylint: disable=protected-access
@tf_export(
"data.DatasetSpec",
v1=["data.DatasetSpec", "data.experimental.DatasetStructure"])
| _NestedVariant |
python | ray-project__ray | python/ray/data/tests/test_json.py | {
"start": 18726,
"end": 21058
} | class ____:
@pytest.mark.parametrize(
"data",
[{"a": []}, {"a": [1]}, {"a": [1, 2, 3]}],
ids=["empty", "single", "multiple"],
)
@pytest.mark.parametrize(
"compression,filename",
[("gzip", "test.json.gz"), ("infer", "test.json")], # infer = default
)
def test_read_stream(
self,
data,
tmp_path,
compression,
filename,
target_max_block_size_infinite_or_default,
):
# Setup test file.
df = pd.DataFrame(data)
path = os.path.join(tmp_path, filename)
df.to_json(path, orient="records", lines=True, compression=compression)
# Setup datasource.
local_filesystem = fs.LocalFileSystem()
source = PandasJSONDatasource(
path, target_output_size_bytes=1, filesystem=local_filesystem
)
# Read stream.
block_builder = PandasBlockBuilder()
with source._open_input_source(local_filesystem, path) as f:
for block in source._read_stream(f, path):
block_builder.add_block(block)
block = block_builder.build()
# Verify.
assert rows_same(block, df)
def test_read_stream_with_target_output_size_bytes(
self, tmp_path, target_max_block_size_infinite_or_default
):
# Setup test file. It contains 16 lines, each line is 8 MiB.
df = pd.DataFrame({"data": ["a" * 8 * 1024 * 1024] * 16})
path = os.path.join(tmp_path, "test.json")
df.to_json(path, orient="records", lines=True)
# Setup datasource. It should read 32 MiB (4 lines) per output.
local_filesystem = fs.LocalFileSystem()
source = PandasJSONDatasource(
path,
target_output_size_bytes=32 * 1024 * 1024,
filesystem=local_filesystem,
)
# Read stream.
block_builder = PandasBlockBuilder()
with source._open_input_source(local_filesystem, path) as f:
for block in source._read_stream(f, path):
assert len(block) == 4
block_builder.add_block(block)
block = block_builder.build()
# Verify.
assert rows_same(block, df)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| TestPandasJSONDatasource |
python | pytorch__pytorch | test/test_utils.py | {
"start": 23147,
"end": 23278
} | class ____(TestCase):
def test_import_hipify(self):
from torch.utils.hipify import hipify_python # noqa: F401
| TestHipify |
python | kamyu104__LeetCode-Solutions | Python/insert-into-a-sorted-circular-linked-list.py | {
"start": 29,
"end": 134
} | class ____(object):
def __init__(self, val, next):
self.val = val
self.next = next
| Node |
python | bottlepy__bottle | test/test_environ.py | {
"start": 19717,
"end": 31884
} | class ____(unittest.TestCase):
def test_constructor_body(self):
self.assertEqual('',
BaseResponse('').body)
self.assertEqual('YAY',
BaseResponse('YAY').body)
def test_constructor_status(self):
self.assertEqual(200,
BaseResponse('YAY', 200).status_code)
self.assertEqual('200 OK',
BaseResponse('YAY', 200).status_line)
self.assertEqual('200 YAY',
BaseResponse('YAY', '200 YAY').status_line)
self.assertEqual('200 YAY',
BaseResponse('YAY', '200 YAY').status_line)
def test_constructor_headerlist(self):
from functools import partial
make_res = partial(BaseResponse, '', 200)
self.assertEqual('yay', make_res(x_test='yay')['x-test'])
def test_wsgi_header_values(self):
def cmp(app, wire):
rs = BaseResponse()
rs.set_header('x-test', app)
result = [v for (h, v) in rs.headerlist if h.lower()=='x-test'][0]
self.assertEqual(wire, result)
cmp(1, touni('1', 'latin1'))
cmp('öäü', 'öäü'.encode('utf8').decode('latin1'))
# Dropped byte header support in Python 3:
#cmp(tob('äöü'), 'äöü'.encode('utf8').decode('latin1'))
def test_set_status(self):
rs = BaseResponse()
rs.status = 200
self.assertEqual(rs.status, rs.status_line)
self.assertEqual(rs.status_code, 200)
self.assertEqual(rs.status_line, '200 OK')
rs.status = 999
self.assertEqual(rs.status, rs.status_line)
self.assertEqual(rs.status_code, 999)
self.assertEqual(rs.status_line, '999 Unknown')
rs.status = 404
self.assertEqual(rs.status, rs.status_line)
self.assertEqual(rs.status_code, 404)
self.assertEqual(rs.status_line, '404 Not Found')
def test(): rs.status = -200
self.assertRaises(ValueError, test)
self.assertEqual(rs.status, rs.status_line) # last value
self.assertEqual(rs.status_code, 404) # last value
self.assertEqual(rs.status_line, '404 Not Found') # last value
def test(): rs.status = 5
self.assertRaises(ValueError, test)
self.assertEqual(rs.status, rs.status_line) # last value
self.assertEqual(rs.status_code, 404) # last value
self.assertEqual(rs.status_line, '404 Not Found') # last value
rs.status = '999 Who knows?' # Illegal, but acceptable three digit code
self.assertEqual(rs.status, rs.status_line)
self.assertEqual(rs.status_code, 999)
self.assertEqual(rs.status_line, '999 Who knows?')
rs.status = 555 # Strange code
self.assertEqual(rs.status, rs.status_line)
self.assertEqual(rs.status_code, 555)
self.assertEqual(rs.status_line, '555 Unknown')
rs.status = '404 Brain not Found' # Custom reason
self.assertEqual(rs.status, rs.status_line)
self.assertEqual(rs.status_code, 404)
self.assertEqual(rs.status_line, '404 Brain not Found')
def test(): rs.status = '5 Illegal Code'
self.assertRaises(ValueError, test)
self.assertEqual(rs.status, rs.status_line) # last value
self.assertEqual(rs.status_code, 404) # last value
self.assertEqual(rs.status_line, '404 Brain not Found') # last value
def test(): rs.status = '-99 Illegal Code'
self.assertRaises(ValueError, test)
self.assertEqual(rs.status, rs.status_line) # last value
self.assertEqual(rs.status_code, 404) # last value
self.assertEqual(rs.status_line, '404 Brain not Found') # last value
def test(): rs.status = '1000 Illegal Code'
self.assertRaises(ValueError, test)
self.assertEqual(rs.status, rs.status_line) # last value
self.assertEqual(rs.status_code, 404) # last value
self.assertEqual(rs.status_line, '404 Brain not Found') # last value
def test(): rs.status = '555' # No reason
self.assertRaises(ValueError, test)
self.assertEqual(rs.status, rs.status_line) # last value
self.assertEqual(rs.status_code, 404) # last value
self.assertEqual(rs.status_line, '404 Brain not Found') # last value
# Unicode in status line (thanks RFC7230 :/)
rs.status = '400 Non-ASÎÎ'
self.assertEqual(rs.status, rs.status_line)
self.assertEqual(rs.status_code, 400)
wire = rs._wsgi_status_line().encode('latin1')
self.assertEqual(rs.status, wire.decode('utf8'))
def test_content_type(self):
rs = BaseResponse()
rs.content_type = 'test/some'
self.assertEqual('test/some', rs.headers.get('Content-Type'))
def test_charset(self):
rs = BaseResponse()
self.assertEqual(rs.charset, 'UTF-8')
rs.content_type = 'text/html; charset=latin9'
self.assertEqual(rs.charset, 'latin9')
rs.content_type = 'text/html'
self.assertEqual(rs.charset, 'UTF-8')
def test_set_cookie(self):
r = BaseResponse()
r.set_cookie('name1', 'value', max_age=5)
r.set_cookie('name2', 'value 2', path='/foo')
cookies = [value for name, value in r.headerlist
if name.title() == 'Set-Cookie']
cookies.sort()
self.assertEqual(cookies[0], 'name1=value; Max-Age=5')
self.assertEqual(cookies[1], 'name2="value 2"; Path=/foo')
def test_set_cookie_value_long_string(self):
r = BaseResponse()
self.assertRaises(ValueError, r.set_cookie, name='test', value='x' * 4097)
def test_set_cookie_name_long_string(self):
r = BaseResponse()
self.assertRaises(ValueError, r.set_cookie, name='x' * 4097, value='simple_value')
def test_set_cookie_maxage(self):
import datetime
r = BaseResponse()
r.set_cookie('name1', 'value', max_age=5)
r.set_cookie('name2', 'value', max_age=datetime.timedelta(days=1))
cookies = sorted([value for name, value in r.headerlist
if name.title() == 'Set-Cookie'])
self.assertEqual(cookies[0], 'name1=value; Max-Age=5')
self.assertEqual(cookies[1], 'name2=value; Max-Age=86400')
def test_set_cookie_expires(self):
import datetime
r = BaseResponse()
r.set_cookie('name1', 'value', expires=42)
r.set_cookie('name2', 'value', expires=datetime.datetime(1970,1,1,0,0,43))
cookies = sorted([value for name, value in r.headerlist
if name.title() == 'Set-Cookie'])
self.assertEqual(cookies[0], 'name1=value; expires=Thu, 01 Jan 1970 00:00:42 GMT')
self.assertEqual(cookies[1], 'name2=value; expires=Thu, 01 Jan 1970 00:00:43 GMT')
def test_set_cookie_secure(self):
r = BaseResponse()
r.set_cookie('name1', 'value', secure=True)
r.set_cookie('name2', 'value', secure=False)
cookies = sorted([value for name, value in r.headerlist
if name.title() == 'Set-Cookie'])
self.assertEqual(cookies[0].lower(), 'name1=value; secure')
self.assertEqual(cookies[1], 'name2=value')
def test_set_cookie_httponly(self):
if sys.version_info < (2,6,0):
return
r = BaseResponse()
r.set_cookie('name1', 'value', httponly=True)
r.set_cookie('name2', 'value', httponly=False)
cookies = sorted([value for name, value in r.headerlist
if name.title() == 'Set-Cookie'])
self.assertEqual('name1=value; httponly', cookies[0].lower())
self.assertEqual('name2=value', cookies[1])
def test_set_cookie_samesite(self):
r = BaseResponse()
r.set_cookie('name1', 'value', same_site="lax")
r.set_cookie('name2', 'value', same_site="strict")
try:
r.set_cookie('name3', 'value', same_site='invalid')
self.fail("Should raise CookieError")
except CookieError:
pass
cookies = sorted([value for name, value in r.headerlist
if name.title() == 'Set-Cookie'])
self.assertEqual('name1=value; samesite=lax', cookies[0].lower())
self.assertEqual('name2=value; samesite=strict', cookies[1].lower())
def test_clone_cookie(self):
r = BaseResponse()
r.set_cookie('name1', 'value', same_site="strict")
r2 = r.copy(BaseResponse)
cookies = sorted([value for name, value in r2.headerlist
if name.title() == 'Set-Cookie'])
self.assertEqual('name1=value; samesite=strict', cookies[0].lower())
def test_delete_cookie(self):
response = BaseResponse()
response.set_cookie('name', 'value')
response.delete_cookie('name')
cookies = [value for name, value in response.headerlist
if name.title() == 'Set-Cookie']
self.assertTrue('Max-Age=-1' in cookies[0])
def test_set_header(self):
response = BaseResponse()
response['x-test'] = 'foo'
headers = [value for name, value in response.headerlist
if name.title() == 'X-Test']
self.assertEqual(['foo'], headers)
self.assertEqual('foo', response['x-test'])
response['X-Test'] = 'bar'
headers = [value for name, value in response.headerlist
if name.title() == 'X-Test']
self.assertEqual(['bar'], headers)
self.assertEqual('bar', response['x-test'])
def test_append_header(self):
response = BaseResponse()
response.set_header('x-test', 'foo')
headers = [value for name, value in response.headerlist
if name.title() == 'X-Test']
self.assertEqual(['foo'], headers)
self.assertEqual('foo', response['x-test'])
response.add_header('X-Test', 'bar')
headers = [value for name, value in response.headerlist
if name.title() == 'X-Test']
self.assertEqual(['foo', 'bar'], headers)
self.assertEqual('bar', response['x-test'])
def test_delete_header(self):
response = BaseResponse()
response['x-test'] = 'foo'
self.assertEqual('foo', response['x-test'])
del response['X-tESt']
self.assertRaises(KeyError, lambda: response['x-test'])
def test_non_string_header(self):
response = BaseResponse()
response['x-test'] = 5
self.assertEqual('5', response['x-test'])
response['x-test'] = None
self.assertEqual('', response['x-test'])
response['x-test'] = touni('瓶')
self.assertEqual(touni('瓶'), response['x-test'])
def test_prevent_control_characters_in_headers(self):
masks = '{}test', 'test{}', 'te{}st'
tests = '\n', '\r', '\n\r', '\0'
# Test HeaderDict
apis = 'append', 'replace', '__setitem__', 'setdefault'
for api, mask, test in itertools.product(apis, masks, tests):
hd = bottle.HeaderDict()
func = getattr(hd, api)
value = mask.replace("{}", test)
self.assertRaises(ValueError, func, value, "test-value")
self.assertRaises(ValueError, func, "test-name", value)
# Test functions on BaseResponse
apis = 'add_header', 'set_header', '__setitem__'
for api, mask, test in itertools.product(apis, masks, tests):
rs = bottle.BaseResponse()
func = getattr(rs, api)
value = mask.replace("{}", test)
self.assertRaises(ValueError, func, value, "test-value")
self.assertRaises(ValueError, func, "test-name", value)
def test_expires_header(self):
import datetime
from bottle import UTC
response = BaseResponse()
now = datetime.datetime.now(UTC)
response.expires = now
def seconds(a, b):
td = max(a,b) - min(a,b)
return td.days*360*24 + td.seconds
self.assertEqual(0, seconds(response.expires, now))
now2 = datetime.datetime.fromtimestamp(
parse_date(response.headers['Expires']), tz=UTC)
self.assertEqual(0, seconds(now, now2))
| TestResponse |
python | doocs__leetcode | solution/0800-0899/0831.Masking Personal Information/Solution.py | {
"start": 0,
"end": 331
} | class ____:
def maskPII(self, s: str) -> str:
if s[0].isalpha():
s = s.lower()
return s[0] + '*****' + s[s.find('@') - 1 :]
s = ''.join(c for c in s if c.isdigit())
cnt = len(s) - 10
suf = '***-***-' + s[-4:]
return suf if cnt == 0 else f'+{"*" * cnt}-{suf}'
| Solution |
python | run-llama__llama_index | llama-index-core/llama_index/core/agent/workflow/workflow_events.py | {
"start": 544,
"end": 650
} | class ____(Event):
"""LLM input."""
input: list[ChatMessage]
current_agent_name: str
| AgentInput |
python | eth-brownie__brownie | brownie/test/managers/base.py | {
"start": 427,
"end": 10517
} | class ____:
"""
Brownie plugin base hooks.
Pytest hooks in this class are used in every testing mode.
"""
def __init__(self, config, project):
_apply_given_wrapper()
self.config = config
# required when brownie project is in a subfolder of another project
config._rootpath = Path(project._path)
self.project = project
self.project_path = project._path
self.results = {}
self.node_map = {}
self.isolated = {}
self.skip = {}
self.contracts = {
k: v["bytecodeSha1"] for k, v in project._build.items() if v.get("bytecode")
}
glob = list(
self.project_path.joinpath(self.project._structure["tests"]).glob("**/conftest.py")
)
key_func = compose(self._path, attrgetter("parent"))
self.conf_hashes = dict(zip(map(key_func, glob), map(_get_ast_hash, glob)))
try:
with self.project._build_path.joinpath("tests.json").open() as fp:
hashes = ujson_load(fp)
except (FileNotFoundError, JSONDecodeError):
hashes = {"tests": {}, "contracts": {}, "tx": {}}
self.tests = {
k: v
for k, v in hashes["tests"].items()
if self.project_path.joinpath(k).exists() and self._get_hash(k) == v["sha1"]
}
if changed_contracts := {
k
for k, v in hashes["contracts"].items()
if k not in self.contracts or v != self.contracts[k]
}:
for txhash, coverage_eval in hashes["tx"].items():
if not changed_contracts.intersection(coverage_eval.keys()):
coverage._add_cached_transaction(txhash, coverage_eval)
self.tests = {
k: v
for k, v in self.tests.items()
if v["isolated"] is not False and not changed_contracts.intersection(v["isolated"])
}
else:
for txhash, coverage_eval in hashes["tx"].items():
coverage._add_cached_transaction(txhash, coverage_eval)
def _reduce_path_strings(self, text):
# convert absolute path strings to relative ones, prior to outputting to console
base_path = f"{Path(brownie.__file__).parent.as_posix()}"
project_path = f"{self.project_path.as_posix()}/"
text = text.replace(base_path, "brownie")
text = text.replace(project_path, "")
return text
def _path(self, path):
return self.project_path.joinpath(path).relative_to(self.project_path).as_posix()
def _test_id(self, nodeid):
path, test_id = nodeid.split("::", maxsplit=1)
return self._path(path), test_id
def _get_hash(self, path):
hash_ = _get_ast_hash(self.project_path.joinpath(path))
for confpath in filter(lambda k: k in path, sorted(self.conf_hashes)):
hash_ += self.conf_hashes[confpath]
return sha1(hash_.encode()).hexdigest()
def pytest_configure(self, config):
config.addinivalue_line(
"markers", "require_network: only run test when a specific network is active"
)
config.addinivalue_line(
"markers", "skip_coverage: skips a test when coverage evaluation is active"
)
config.addinivalue_line(
"markers", "no_call_coverage: do not evaluate coverage for calls made during a test"
)
for key in ("coverage", "always_transact"):
CONFIG.argv[key] = config.getoption("--coverage")
CONFIG.argv["cli"] = "test"
CONFIG.argv["gas"] = config.getoption("--gas")
CONFIG.argv["revert"] = config.getoption("--revert-tb")
CONFIG.argv["update"] = config.getoption("--update")
CONFIG.argv["network"] = None
if config.getoption("--network"):
CONFIG.argv["network"] = config.getoption("--network")[0]
def _make_nodemap(self, ids):
self.node_map.clear()
for item in ids:
path, test = self._test_id(item)
self.node_map.setdefault(path, []).append(test)
def pytest_sessionstart(self):
"""
Called after the `Session` object has been created and before performing
collection and entering the run test loop.
* Replaces the default hypothesis reporter with a one that applies source
highlights and increased vertical space between results. The effect is
seen in output for `hypothesis.errors.MultipleFailures` and while the
`-v` flag is active.
* Removes `PytestAssertRewriteWarning` warnings from the terminalreporter.
This prevents warnings that "the `brownie` library was already imported and
so related assertions cannot be rewritten". The warning is not relevant
for end users who are performing tests with brownie, not on brownie,
so we suppress it to avoid confusion.
Removal of pytest warnings must be handled in this hook because session
information is passed between xdist workers and master prior to test execution.
"""
def _hypothesis_reporter(text):
text = self._reduce_path_strings(text)
if next((i for i in ("Falsifying", "Trying", "Traceback") if text.startswith(i)), None):
print("")
highlight = reporter._tw._highlight
text = "".join(
f"{i}\n" if i.lstrip().startswith("\x1b") else highlight(i)
for i in text.split("\n")
)
end = "\n" if text.startswith("Traceback") else ""
print(text, end=end)
hypothesis.reporting.reporter.default = _hypothesis_reporter
hypothesis.extra.pytestplugin.default_reporter = _hypothesis_reporter
reporter = self.config.pluginmanager.get_plugin("terminalreporter")
warnings = reporter.stats.pop("warnings", [])
warnings = [i for i in warnings if "PytestAssertRewriteWarning" not in i.message]
if warnings and not self.config.getoption("--disable-warnings"):
reporter.stats["warnings"] = warnings
def pytest_report_teststatus(self, report):
"""
Return result-category, shortletter and verbose word for status reporting.
With the `--update` flag, modifies the outcome of already-run skipped
tests so that the final report shows accurate pass/fail information.
Arguments
---------
report : _pytest.reports.BaseReport
Report object for the current test.
"""
if report.when == "setup":
self.skip[report.nodeid] = report.skipped
return ("error", "E", "ERROR") if report.failed else ("", "", "")
if report.when == "teardown":
if report.failed:
return "error", "E", "ERROR"
elif self.skip[report.nodeid]:
path, test_id = self._test_id(report.nodeid)
idx = self.node_map[path].index(test_id)
report.outcome = convert_outcome(self.results[path][idx])
report.longrepr = (path, None, "Skipped") # File path, line no., reason
return "skipped", "s", "SKIPPED"
return "", "", ""
if hasattr(report, "wasxfail"):
if report.skipped:
return "xfailed", "x", "XFAIL"
elif report.passed:
return "xpassed", "X", "XPASS"
return report.outcome, convert_outcome(report.outcome), report.outcome.upper()
def pytest_runtest_makereport(self, item):
"""
Return a _pytest.runner.TestReport object for the given pytest.Item and
_pytest.runner.CallInfo.
Applies source highlighting to hypothesis output that is not related to
`hypothesis.errors.MultipleFailures`.
Attributes
----------
item : pytest.Item
Object representing the currently active test
"""
if not hasattr(item, "hypothesis_report_information"):
return
highlight = self.config.pluginmanager.get_plugin("terminalreporter")._tw._highlight
item.hypothesis_report_information = [
i if i.lstrip().startswith("\x1b") else highlight(i).rstrip("\n")
for i in map(
self._reduce_path_strings,
concat(i.split("\n") for i in item.hypothesis_report_information),
)
]
def pytest_terminal_summary(self, terminalreporter):
"""
Add a section to terminal summary reporting.
* When the `--disable-warnings` flag is active, removes all raised warnings
prior to outputting the final console report.
* When `--coverage` is active, outputs the result to stdout and saves the
final report json.
Arguments
---------
terminalreporter : `_pytest.terminal.TerminalReporter`
The internal terminal reporter object
"""
if self.config.getoption("--disable-warnings") and "warnings" in terminalreporter.stats:
del terminalreporter.stats["warnings"]
if CONFIG.argv["coverage"]:
terminalreporter.section("Coverage")
# output coverage report to console
coverage_eval = coverage.get_merged_coverage_eval()
for line in output._build_coverage_output(coverage_eval):
terminalreporter.write_line(line)
# save coverage report as `reports/coverage.json`
output._save_coverage_report(
self.project._build,
coverage_eval,
self.project_path.joinpath(self.project._structure["reports"]),
)
def pytest_unconfigure(self):
"""
Called before test process is exited.
Closes all active projects.
"""
for project in brownie.project.get_loaded_projects():
project.close(raises=False)
def pytest_keyboard_interrupt(self):
CONFIG.argv["interrupt"] = True
| PytestBrownieBase |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/ddl.py | {
"start": 23514,
"end": 28423
} | class ____(DialectKWArgs, _TableViaSelect):
"""Represent a CREATE VIEW statement.
This creates a new view based on a particular SELECT statement. The schema
of the view is based on the columns of the SELECT statement, and the data
present in the view is derived from the rows represented by the
SELECT. A non-materialized view will evaluate the SELECT statement
dynamicaly as it is queried, whereas a materialized view represents a
snapshot of the SELECT statement at a particular point in time and
typically needs to be refreshed manually using database-specific commands.
The example below illustrates basic use of :class:`.CreateView`; given a
:class:`.Select` and optional :class:`.MetaData`, the
:class:`.CreateView` may be invoked directly via
:meth:`.Connection.execute` or indirectly via :meth:`.MetaData.create_all`;
the :attr:`.CreateView.table` attribute provides a :class:`.Table`
object with which to generate new queries::
from sqlalchemy import select
from sqlalchemy.sql.ddl import CreateView
# instantiate CreateView given a select() and optional MetaData
create_view = CreateView(
select(users.c.id, users.c.name).where(users.c.status == "active"),
"active_users_view",
metadata=some_metadata,
)
# a Table object is available immediately via the .table attribute
new_statement = select(create_view.table)
# to emit CREATE VIEW, either invoke CreateView directly...
with engine.begin() as conn:
conn.execute(create_view)
# or alternatively, invoke metadata.create_all()
some_metdata.create_all(engine)
# drop is performed in the usual way, via drop_all
# or table.drop() (will emit DROP VIEW)
some_metdata.drop_all(engine)
For detailed background on :class:`.CreateView` see
:ref:`metadata_create_view`.
.. versionadded:: 2.1
:param selectable: :class:`_sql.Select`
The SELECT statement defining the view.
:param view_name: table name as a string. Combine with the optional
:paramref:`.CreateView.schema` parameter to indicate a
schema-qualified table name.
:param metadata: :class:`_schema.MetaData`, optional
If provided, the :class:`_schema.Table` object available via the
:attr:`.table` attribute will be associated with this
:class:`.MetaData`. Otherwise, a new, empty :class:`.MetaData`
is created.
:param schema: str, optional schema or owner name.
:param temporary: bool, default False.
If True, render ``TEMPORARY``
:param or_replace: bool, default False.
If True, render ``OR REPLACE`` to replace an existing view if it
exists. Supported by PostgreSQL, MySQL, MariaDB, and Oracle.
Not supported by SQLite or SQL Server.
.. versionadded:: 2.1
:param materialized: bool, default False.
If True, render ``MATERIALIZED`` to create a materialized view.
Materialized views store the query results physically and can be
refreshed periodically. Not supported by all database backends.
.. versionadded:: 2.1
:param dialect_kw: Additional keyword arguments are dialect-specific and
are passed as keyword arguments to the dialect's compiler.
.. note::
For SQLite, the ``sqlite_if_not_exists`` boolean parameter
is supported to render ``CREATE VIEW IF NOT EXISTS``.
.. versionadded:: 2.1
.. seealso::
:ref:`metadata_create_view` - in :ref:`metadata_toplevel`
:class:`.CreateTableAs` - for creating a table from a SELECT statement
"""
__visit_name__ = "create_view"
inherit_cache = False
table: Table
""":class:`.Table` object representing the view that this
:class:`.CreateView` would generate when executed."""
materialized: bool
"""Boolean flag indicating if this is a materialized view."""
or_replace: bool
"""Boolean flag indicating if OR REPLACE should be used."""
def __init__(
self,
selectable: SelectBase,
view_name: str,
*,
metadata: Optional["MetaData"] = None,
schema: Optional[str] = None,
temporary: bool = False,
or_replace: bool = False,
materialized: bool = False,
**dialect_kwargs: Any,
):
self._validate_dialect_kwargs(dialect_kwargs)
super().__init__(
selectable=selectable,
name=view_name,
metadata=metadata,
schema=schema,
temporary=temporary,
if_not_exists=False,
)
self.materialized = materialized
self.or_replace = or_replace
self.table._dropper_ddl = DropView(
self.table, materialized=materialized
)
| CreateView |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numeric.py | {
"start": 106536,
"end": 108845
} | class ____(TestCase):
def test_broadcast_in_args(self):
# gh-5881
arrs = [
np.empty((6, 7)),
np.empty((5, 6, 1)),
np.empty((7,)),
np.empty((5, 1, 7)),
]
mits = [
np.broadcast(*arrs),
np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])),
np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])),
np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])),
np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1]),
]
for mit in mits:
assert_equal(mit.shape, (5, 6, 7))
assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 4)
for a, ia in zip(arrs, mit.iters):
assert_(a is ia.base)
def test_broadcast_single_arg(self):
# gh-6899
arrs = [np.empty((5, 6, 7))]
mit = np.broadcast(*arrs)
assert_equal(mit.shape, (5, 6, 7))
assert_equal(mit.ndim, 3)
assert_equal(mit.nd, 3)
assert_equal(mit.numiter, 1)
assert_(arrs[0] is mit.iters[0].base)
def test_number_of_arguments(self):
arr = np.empty((5,))
for j in range(35):
arrs = [arr] * j
if j > 32:
assert_raises(ValueError, np.broadcast, *arrs)
else:
mit = np.broadcast(*arrs)
assert_equal(mit.numiter, j)
def test_broadcast_error_kwargs(self):
# gh-13455
arrs = [np.empty((5, 6, 7))]
mit = np.broadcast(*arrs)
mit2 = np.broadcast(*arrs, **{}) # noqa: PIE804
assert_equal(mit.shape, mit2.shape)
assert_equal(mit.ndim, mit2.ndim)
assert_equal(mit.nd, mit2.nd)
assert_equal(mit.numiter, mit2.numiter)
assert_(mit.iters[0].base is mit2.iters[0].base)
assert_raises(ValueError, np.broadcast, 1, **{"x": 1}) # noqa: PIE804
@skip(reason="error messages do not match.")
def test_shape_mismatch_error_message(self):
with assert_raises(
ValueError,
match=r"arg 0 with shape \(1, 3\) and arg 2 with shape \(2,\)",
):
np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7])
| TestBroadcast |
python | psf__black | tests/data/cases/preview_long_strings__regression.py | {
"start": 23733,
"end": 23999
} | class ____:
def foo():
XXXXXXXXXXXX.append((
"xxx_xxxxxxxxxx(xxxxx={}, xxxx={}, xxxxx, xxxx_xxxx_xxxxxxxxxx={})".format(
xxxxx, xxxx, xxxx_xxxx_xxxxxxxxxx
),
my_var,
my_other_var,
))
| A |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataproc.py | {
"start": 18895,
"end": 19545
} | class ____(DataprocTestBase):
@classmethod
def setup_class(cls):
if AIRFLOW_V_3_0_PLUS:
cls.extra_links_expected_calls = [
call.ti.xcom_push(key="conf", value=DATAPROC_JOB_CONF_EXPECTED),
call.hook().wait_for_job(job_id=TEST_JOB_ID, region=GCP_REGION, project_id=GCP_PROJECT),
]
else:
cls.extra_links_expected_calls = [
call.ti.xcom_push(key="conf", value=DATAPROC_JOB_CONF_EXPECTED, execution_date=None),
call.hook().wait_for_job(job_id=TEST_JOB_ID, region=GCP_REGION, project_id=GCP_PROJECT),
]
| DataprocJobTestBase |
python | huggingface__transformers | tests/models/layoutxlm/test_processing_layoutxlm.py | {
"start": 5326,
"end": 21526
} | class ____(unittest.TestCase):
@cached_property
def get_images(self):
# we verify our implementation on 2 document images from the DocVQA dataset
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/fixtures_docvqa", split="test")
return ds[0]["image"].convert("RGB"), ds[1]["image"].convert("RGB")
@cached_property
def get_tokenizers(self):
slow_tokenizer = LayoutXLMTokenizer.from_pretrained("microsoft/layoutxlm-base")
fast_tokenizer = LayoutXLMTokenizerFast.from_pretrained("microsoft/layoutxlm-base")
return [slow_tokenizer, fast_tokenizer]
@slow
def test_processor_case_1(self):
# case 1: document image classification (training, inference) + token classification (inference), apply_ocr = True
image_processor = LayoutLMv2ImageProcessor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutXLMProcessor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
input_feat_extract = image_processor(images[0], return_tensors="pt")
input_processor = processor(images[0], return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify image
self.assertAlmostEqual(
input_feat_extract["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2
)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "<s> 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer</s>" # fmt: skip
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
input_feat_extract = image_processor(images, return_tensors="pt")
input_processor = processor(images, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify images
self.assertAlmostEqual(
input_feat_extract["pixel_values"].sum(), input_processor["image"].sum(), delta=1e-2
)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "<s> 7 ITC Limited REPORT AND ACCOUNTS 2013 ITC’s Brands: An Asset for the Nation The consumer needs and aspirations they fulfil, the benefit they generate for millions across ITC’s value chains, the future-ready capabilities that support them, and the value that they create for the country, have made ITC’s brands national assets, adding to India’s competitiveness. It is ITC’s aspiration to be the No 1 FMCG player in the country, driven by its new FMCG businesses. A recent Nielsen report has highlighted that ITC's new FMCG businesses are the fastest growing among the top consumer goods companies operating in India. ITC takes justifiable pride that, along with generating economic value, these celebrated Indian brands also drive the creation of larger societal capital through the virtuous cycle of sustainable and inclusive growth. DI WILLS * ; LOVE DELIGHTFULLY SOFT SKIN? aia Ans Source: https://www.industrydocuments.ucsf.edu/docs/snbx0223</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>" # fmt: skip
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
@slow
def test_processor_case_2(self):
# case 2: document image classification (training, inference) + token classification (inference), apply_ocr=False
image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutXLMProcessor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], words, boxes=boxes, return_tensors="pt")
# verify keys
expected_keys = ["input_ids", "bbox", "attention_mask", "image"]
actual_keys = list(input_processor.keys())
for key in expected_keys:
self.assertIn(key, actual_keys)
# verify input_ids
expected_decoding = "<s> hello world</s>"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, words, boxes=boxes, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "<s> hello world</s><pad><pad>"
decoding = processor.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [
[0, 0, 0, 0],
[3, 2, 5, 1],
[6, 7, 4, 2],
[3, 9, 2, 4],
[1, 1, 2, 3],
[1, 1, 2, 3],
[1000, 1000, 1000, 1000],
]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
@slow
def test_processor_case_3(self):
# case 3: token classification (training), apply_ocr=False
image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutXLMProcessor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
words = ["weirdly", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
word_labels = [1, 2]
input_processor = processor(images[0], words, boxes=boxes, word_labels=word_labels, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "<s> weirdly world</s>"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify labels
expected_labels = [-100, 1, -100, 2, -100]
self.assertListEqual(input_processor.labels.squeeze().tolist(), expected_labels)
# batched
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
word_labels = [[1, 2], [6, 3, 10, 2]]
input_processor = processor(
images, words, boxes=boxes, word_labels=word_labels, padding=True, return_tensors="pt"
)
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids", "labels"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "<s> my name is niels</s>"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [
[0, 0, 0, 0],
[3, 2, 5, 1],
[6, 7, 4, 2],
[3, 9, 2, 4],
[1, 1, 2, 3],
[1, 1, 2, 3],
[1000, 1000, 1000, 1000],
]
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
# verify labels
expected_labels = [-100, 6, 3, 10, 2, -100, -100]
self.assertListEqual(input_processor.labels[1].tolist(), expected_labels)
@slow
def test_processor_case_4(self):
# case 4: visual question answering (inference), apply_ocr=True
image_processor = LayoutLMv2ImageProcessor()
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutXLMProcessor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
question = "What's his name?"
input_processor = processor(images[0], question, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "<s> What's his name?</s></s> 11:14 to 11:39 a.m 11:39 to 11:44 a.m. 11:44 a.m. to 12:25 p.m. 12:25 to 12:58 p.m. 12:58 to 4:00 p.m. 2:00 to 5:00 p.m. Coffee Break Coffee will be served for men and women in the lobby adjacent to exhibit area. Please move into exhibit area. (Exhibits Open) TRRF GENERAL SESSION (PART |) Presiding: Lee A. Waller TRRF Vice President “Introductory Remarks” Lee A. Waller, TRRF Vice Presi- dent Individual Interviews with TRRF Public Board Members and Sci- entific Advisory Council Mem- bers Conducted by TRRF Treasurer Philip G. Kuehn to get answers which the public refrigerated warehousing industry is looking for. Plus questions from the floor. Dr. Emil M. Mrak, University of Cal- ifornia, Chairman, TRRF Board; Sam R. Cecil, University of Georgia College of Agriculture; Dr. Stanley Charm, Tufts University School of Medicine; Dr. Robert H. Cotton, ITT Continental Baking Company; Dr. Owen Fennema, University of Wis- consin; Dr. Robert E. Hardenburg, USDA. Questions and Answers Exhibits Open Capt. Jack Stoney Room TRRF Scientific Advisory Council Meeting Ballroom Foyer</s>" # fmt: skip
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
questions = ["How old is he?", "what's the time"]
input_processor = processor(
images, questions, padding="max_length", max_length=20, truncation=True, return_tensors="pt"
)
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
# this was obtained with Tesseract 4.1.1
expected_decoding = "<s> what's the time</s></s> 7 ITC Limited REPORT AND ACCOUNTS 2013</s>"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [1000, 1000, 1000, 1000], [1000, 1000, 1000, 1000], [0, 45, 67, 80], [72, 56, 109, 67], [72, 56, 109, 67], [116, 56, 189, 67], [198, 59, 253, 66], [257, 59, 285, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [289, 59, 365, 66], [372, 59, 407, 66], [1000, 1000, 1000, 1000]] # fmt: skip
self.assertListEqual(input_processor.bbox[1].tolist(), expected_bbox)
@slow
def test_processor_case_5(self):
# case 5: visual question answering (inference), apply_ocr=False
image_processor = LayoutLMv2ImageProcessor(apply_ocr=False)
tokenizers = self.get_tokenizers
images = self.get_images
for tokenizer in tokenizers:
processor = LayoutXLMProcessor(image_processor=image_processor, tokenizer=tokenizer)
# not batched
question = "What's his name?"
words = ["hello", "world"]
boxes = [[1, 2, 3, 4], [5, 6, 7, 8]]
input_processor = processor(images[0], question, words, boxes, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "<s> What's his name?</s></s> hello world</s>"
decoding = processor.decode(input_processor.input_ids.squeeze().tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# batched
questions = ["How old is he?", "what's the time"]
words = [["hello", "world"], ["my", "name", "is", "niels"]]
boxes = [[[1, 2, 3, 4], [5, 6, 7, 8]], [[3, 2, 5, 1], [6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3]]]
input_processor = processor(images, questions, words, boxes, padding=True, return_tensors="pt")
# verify keys
expected_keys = ["attention_mask", "bbox", "image", "input_ids"]
actual_keys = sorted(input_processor.keys())
self.assertListEqual(actual_keys, expected_keys)
# verify input_ids
expected_decoding = "<s> How old is he?</s></s> hello world</s><pad><pad>"
decoding = processor.decode(input_processor.input_ids[0].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
expected_decoding = "<s> what's the time</s></s> my name is niels</s>"
decoding = processor.decode(input_processor.input_ids[1].tolist())
self.assertSequenceEqual(decoding, expected_decoding)
# verify bbox
expected_bbox = [[6, 7, 4, 2], [3, 9, 2, 4], [1, 1, 2, 3], [1, 1, 2, 3], [1000, 1000, 1000, 1000]]
self.assertListEqual(input_processor.bbox[1].tolist()[-5:], expected_bbox)
| LayoutXLMProcessorIntegrationTests |
python | astropy__astropy | astropy/cosmology/_src/tests/io/test_yaml.py | {
"start": 1648,
"end": 5852
} | class ____(ToFromTestMixinBase):
"""
Tests for a Cosmology[To/From]Format with ``format="yaml"``.
This class will not be directly called by :mod:`pytest` since its name does
not begin with ``Test``. To activate the contained tests this class must
be inherited in a subclass. Subclasses must define a :func:`pytest.fixture`
``cosmo`` that returns/yields an instance of a |Cosmology|.
See ``TestCosmologyToFromFormat`` or ``TestCosmology`` for examples.
"""
@pytest.fixture
def xfail_if_not_registered_with_yaml(self, cosmo_cls):
"""
YAML I/O only works on registered classes. So the thing to check is
if this class is registered. If not, :func:`pytest.xfail` this test.
Some of the tests define custom cosmologies. They are not registered.
"""
if cosmo_cls not in AstropyDumper.yaml_representers:
pytest.xfail(
f"Cosmologies of type {cosmo_cls} are not registered with YAML."
)
# ===============================================================
def test_to_yaml(self, cosmo_cls, to_format, xfail_if_not_registered_with_yaml):
"""Test cosmology -> YAML."""
yml = to_format("yaml")
assert isinstance(yml, str) # test type
assert yml.startswith("!" + ".".join(cosmo_cls.__module__.split(".")[:2]))
# e.g. "astropy.cosmology" for built-in cosmologies, or "__main__" for the test
# SubCosmology class defined in ``astropy.cosmology._src.tests.test_core``.
def test_from_yaml_default(
self, cosmo, to_format, from_format, xfail_if_not_registered_with_yaml
):
"""Test cosmology -> YAML -> cosmology."""
yml = to_format("yaml")
got = from_format(yml, format="yaml") # (cannot autoidentify)
assert got.name == cosmo.name
assert got.meta == cosmo.meta
# it won't error if everything matches up
got = from_format(yml, format="yaml")
assert got == cosmo
assert got.meta == cosmo.meta
# auto-identify test moved because it doesn't work.
# see test_from_yaml_autoidentify
def test_from_yaml_autoidentify(
self, cosmo, to_format, from_format, xfail_if_not_registered_with_yaml
):
"""As a non-path string, it does NOT auto-identifies 'format'.
TODO! this says there should be different types of I/O registries.
not just hacking object conversion on top of file I/O.
"""
assert self.can_autodentify("yaml") is False
# Showing the specific error. The str is interpreted as a file location
# but is too long a file name.
yml = to_format("yaml")
with pytest.raises((FileNotFoundError, OSError)): # OSError in Windows
from_format(yml)
# # TODO! this is a challenging test to write. It's also unlikely to happen.
# def test_fromformat_subclass_partial_info_yaml(self, cosmo):
# """
# Test writing from an instance and reading from that class.
# This works with missing information.
# """
# -----------------------------------------------------
@pytest.mark.parametrize("format", [True, False, None])
def test_is_equivalent_to_yaml(
self, cosmo, to_format, format, xfail_if_not_registered_with_yaml
):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
This test checks that Cosmology equivalency can be extended to any
Python object that can be converted to a Cosmology -- in this case
a YAML string. YAML can't be identified without "format" specified.
"""
obj = to_format("yaml")
assert not isinstance(obj, Cosmology)
is_equiv = cosmo.is_equivalent(obj, format=format)
assert is_equiv is False
def test_is_equivalent_to_yaml_specify_format(
self, cosmo, to_format, xfail_if_not_registered_with_yaml
):
"""Test :meth:`astropy.cosmology.Cosmology.is_equivalent`.
Same as ``test_is_equivalent_to_yaml`` but with ``format="yaml"``.
"""
assert cosmo.is_equivalent(to_format("yaml"), format="yaml") is True
| ToFromYAMLTestMixin |
python | doocs__leetcode | solution/1800-1899/1855.Maximum Distance Between a Pair of Values/Solution2.py | {
"start": 0,
"end": 318
} | class ____:
def maxDistance(self, nums1: List[int], nums2: List[int]) -> int:
m, n = len(nums1), len(nums2)
ans = i = j = 0
while i < m:
while j < n and nums1[i] <= nums2[j]:
j += 1
ans = max(ans, j - i - 1)
i += 1
return ans
| Solution |
python | getsentry__sentry | src/sentry/sentry_apps/api/serializers/platform_external_issue.py | {
"start": 540,
"end": 1038
} | class ____(Serializer):
def serialize(
self,
obj: PlatformExternalIssue,
attrs: Mapping[str, Any],
user: User | AnonymousUser | RpcUser,
**kwargs: Any,
) -> PlatformExternalIssueSerializerResponse:
return {
"id": str(obj.id),
"issueId": str(obj.group_id),
"serviceType": obj.service_type,
"displayName": obj.display_name,
"webUrl": obj.web_url,
}
| PlatformExternalIssueSerializer |
python | getsentry__sentry | src/sentry/integrations/api/endpoints/organization_integration_serverless_functions.py | {
"start": 887,
"end": 3162
} | class ____(RegionOrganizationIntegrationBaseEndpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
"POST": ApiPublishStatus.UNKNOWN,
}
def get(
self,
request: Request,
organization: Organization,
integration_id: int,
**kwds: Any,
) -> Response:
"""
Get the list of repository project path configs in an integration
"""
integration = self.get_integration(organization.id, integration_id)
install = integration.get_installation(organization_id=organization.id)
if not isinstance(install, ServerlessMixin):
return self.respond({"detail": "Serverless not supported"}, status=400)
try:
serverless_functions = install.get_serverless_functions()
except IntegrationError as e:
return self.respond({"detail": str(e)}, status=400)
return self.respond(serverless_functions)
def post(
self,
request: Request,
organization: Organization,
integration_id: int,
**kwds: Any,
) -> Response:
integration = self.get_integration(organization.id, integration_id)
install = integration.get_installation(organization_id=organization.id)
if not isinstance(install, ServerlessMixin):
return self.respond({"detail": "Serverless not supported"}, status=400)
serializer = ServerlessActionSerializer(data=request.data, context={"install": install})
if not serializer.is_valid():
return self.respond(serializer.errors, status=400)
data = serializer.validated_data
action = data["action"]
target = data["target"]
try:
resp = None
if action == "enable":
resp = install.enable_function(target)
elif action == "disable":
resp = install.disable_function(target)
elif action == "updateVersion":
resp = install.update_function_to_latest_version(target)
return self.respond(resp)
except IntegrationError as e:
return self.respond({"detail": str(e)}, status=400)
| OrganizationIntegrationServerlessFunctionsEndpoint |
python | scipy__scipy | scipy/sparse/linalg/tests/test_onenormest.py | {
"start": 288,
"end": 1069
} | class ____(scipy.sparse.linalg.LinearOperator):
"""
This is purely for onenormest testing.
"""
def __init__(self, A, B):
if A.ndim != 2 or B.ndim != 2:
raise ValueError('expected ndarrays representing matrices')
if A.shape[1] != B.shape[0]:
raise ValueError('incompatible shapes')
self.A = A
self.B = B
self.ndim = 2
self.shape = (A.shape[0], B.shape[1])
def _matvec(self, x):
return np.dot(self.A, np.dot(self.B, x))
def _rmatvec(self, x):
return np.dot(np.dot(x, self.A), self.B)
def _matmat(self, X):
return np.dot(self.A, np.dot(self.B, X))
@property
def T(self):
return MatrixProductOperator(self.B.T, self.A.T)
| MatrixProductOperator |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/mariadb.py | {
"start": 965,
"end": 1111
} | class ____(sqltypes.TypeEngine[str]):
"""INET6 column type for MariaDB
.. versionadded:: 2.0.37
"""
__visit_name__ = "INET6"
| INET6 |
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 249293,
"end": 249492
} | class ____(unittest.TestCase):
def test_tcp_keepalive(self):
self.assertTrue(socket.TCP_KEEPALIVE)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
| TestMacOSTCPFlags |
python | huggingface__transformers | tests/models/dinov2_with_registers/test_modeling_dinov2_with_registers.py | {
"start": 11301,
"end": 12840
} | class ____(unittest.TestCase):
@cached_property
def default_image_processor(self):
return (
AutoImageProcessor.from_pretrained("facebook/dinov2-with-registers-base")
if is_vision_available()
else None
)
@slow
def test_inference_no_head(self):
model = Dinov2WithRegistersModel.from_pretrained("facebook/dinov2-with-registers-base").to(torch_device)
image_processor = self.default_image_processor
image = prepare_img()
inputs = image_processor(image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the last hidden states
# in DINOv2 with Registers, the seq length equals the number of patches + 1 + num_register_tokens (we add 1 for the [CLS] token)
num_patches = (image_processor.crop_size["height"] // model.config.patch_size) ** 2
expected_seq_length = num_patches + 1 + model.config.num_register_tokens
expected_shape = torch.Size((1, expected_seq_length, model.config.hidden_size))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[-0.4636, -1.4582, -0.0274], [-1.4738, -0.8858, 0.3002], [0.0714, -0.2407, -1.5940]],
device=torch_device,
)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@require_torch
| Dinov2WithRegistersModelIntegrationTest |
python | huggingface__transformers | src/transformers/models/fsmt/configuration_fsmt.py | {
"start": 781,
"end": 1225
} | class ____(PreTrainedConfig):
r"""
Configuration class for FSMT's decoder specific things. note: this is a private helper class
"""
model_type = "fsmt_decoder"
def __init__(self, vocab_size=0, bos_token_id=0, is_encoder_decoder=True, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.bos_token_id = bos_token_id
self.is_encoder_decoder = is_encoder_decoder
| DecoderConfig |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_test.py | {
"start": 5017,
"end": 9975
} | class ____(test.TestCase):
@classmethod
def setUpClass(cls): # pylint: disable=g-missing-super-call
cls._gpu_available = test_util.is_gpu_available()
def _testSparseSparse(self, transpose_a, transpose_b, adjoint_a, adjoint_b):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape_a = [5, 13, 7] if transpose_a or adjoint_a else [5, 7, 13]
dense_shape_b = [5, 15, 13] if transpose_b or adjoint_b else [5, 13, 15]
dtypes_to_test = [np.float32, np.complex64]
for dtype in dtypes_to_test:
a_mats = sparsify((np.random.randn(*dense_shape_a) +
1.j * np.random.randn(*dense_shape_a))).astype(dtype)
b_mats = sparsify((np.random.randn(*dense_shape_b) +
1.j * np.random.randn(*dense_shape_b))).astype(dtype)
a_sm = sparse_csr_matrix_ops.CSRSparseMatrix(a_mats)
b_sm = sparse_csr_matrix_ops.CSRSparseMatrix(b_mats)
c_dense = test_util.matmul_without_tf32(
a_mats,
b_mats,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
c_sm = sparse_csr_matrix_ops.matmul(
a_sm,
b_sm,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
self.assertIsInstance(c_sm, sparse_csr_matrix_ops.CSRSparseMatrix)
c_sm_dense = c_sm.to_dense()
c_dense, c_sm_dense = self.evaluate([c_dense, c_sm_dense])
self.assertAllClose(c_dense, c_sm_dense)
@test_util.run_in_graph_and_eager_modes
def testSparseSparse(self):
for (t_a, t_b, adj_a, adj_b) in itertools.product(*(([False, True],) * 4)):
if (t_a and adj_a) or (t_b and adj_b):
continue
self._testSparseSparse(t_a, t_b, adj_a, adj_b)
def _testSparseDense(self, transpose_a, transpose_b, adjoint_a, adjoint_b):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape_a = [5, 13, 7] if transpose_a or adjoint_a else [5, 7, 13]
dense_shape_b = [5, 15, 13] if transpose_b or adjoint_b else [5, 13, 15]
dtypes_to_test = [np.float32, np.complex64]
for dtype in dtypes_to_test:
a_mats = sparsify((np.random.randn(*dense_shape_a) +
1.j * np.random.randn(*dense_shape_a))).astype(dtype)
b_mats = (np.random.randn(*dense_shape_b) +
1.j * np.random.randn(*dense_shape_b)).astype(dtype)
a_sm = sparse_csr_matrix_ops.CSRSparseMatrix(a_mats)
c_dense = test_util.matmul_without_tf32(
a_mats,
b_mats,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
c_sm_dense = sparse_csr_matrix_ops.matmul(
a_sm,
b_mats,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
c_dense, c_sm_dense = self.evaluate([c_dense, c_sm_dense])
self.assertAllClose(c_dense, c_sm_dense)
@test_util.run_in_graph_and_eager_modes
def testSparseDense(self):
for (t_a, t_b, adj_a, adj_b) in itertools.product(*(([False, True],) * 4)):
if (t_a and adj_a) or (t_b and adj_b):
continue
self._testSparseDense(t_a, t_b, adj_a, adj_b)
def _testDenseSparse(self, transpose_a, transpose_b, adjoint_a, adjoint_b):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
dense_shape_a = [5, 13, 7] if transpose_a or adjoint_a else [5, 7, 13]
dense_shape_b = [5, 15, 13] if transpose_b or adjoint_b else [5, 13, 15]
dtypes_to_test = [np.float32, np.complex64]
for dtype in dtypes_to_test:
a_mats = (np.random.randn(*dense_shape_a) +
1.j * np.random.randn(*dense_shape_a)).astype(dtype)
b_mats = sparsify((np.random.randn(*dense_shape_b) +
1.j * np.random.randn(*dense_shape_b))).astype(dtype)
b_sm = sparse_csr_matrix_ops.CSRSparseMatrix(b_mats)
c_dense = test_util.matmul_without_tf32(
a_mats,
b_mats,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
c_sm_dense = sparse_csr_matrix_ops.matmul(
a_mats,
b_sm,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
c_dense, c_sm_dense = self.evaluate([c_dense, c_sm_dense])
self.assertAllClose(c_dense, c_sm_dense)
@test_util.run_in_graph_and_eager_modes
def testDenseSparse(self):
for (t_a, t_b, adj_a, adj_b) in itertools.product(*(([False, True],) * 4)):
if (t_a and adj_a) or (t_b and adj_b):
continue
self._testDenseSparse(t_a, t_b, adj_a, adj_b)
if __name__ == "__main__":
test.main()
| SparseMatrixMatmulTest |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-you/llama_index/llms/you/base.py | {
"start": 1535,
"end": 3956
} | class ____(CustomLLM):
"""
Wrapper around You.com's conversational Smart and Research APIs.
Each API endpoint is designed to generate conversational
responses to a variety of query types, including inline citations
and web results when relevant.
Smart Mode:
- Quick, reliable answers for a variety of questions
- Cites the entire web page URL
Research Mode:
- In-depth answers with extensive citations for a variety of questions
- Cites the specific web page snippet relevant to the claim
To connect to the You.com api requires an API key which
you can get at https://api.you.com.
For more information, check out the documentations at
https://documentation.you.com/api-reference/.
Args:
mode: You.com conversational endpoints. Choose from "smart" or "research"
ydc_api_key: You.com API key, if `YDC_API_KEY` is not set in the environment
"""
mode: Literal["smart", "research"] = Field(
"smart",
description='You.com conversational endpoints. Choose from "smart" or "research"',
)
ydc_api_key: Optional[str] = Field(
None,
description="You.com API key, if `YDC_API_KEY` is not set in the envrioment",
)
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
model_name=f"you.com-{self.mode}",
is_chat_model=True,
is_function_calling_model=False,
)
@llm_completion_callback()
def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
response = _request(
self.endpoint,
api_key=self._api_key,
query=prompt,
)
return CompletionResponse(text=response["answer"], raw=response)
@llm_completion_callback()
def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
response = _request_stream(
self.endpoint,
api_key=self._api_key,
query=prompt,
)
completion = ""
for token in response:
completion += token
yield CompletionResponse(text=completion, delta=token)
@property
def endpoint(self) -> str:
if self.mode == "smart":
return SMART_ENDPOINT
return RESEARCH_ENDPOINT
@property
def _api_key(self) -> str:
return self.ydc_api_key or os.environ["YDC_API_KEY"]
| You |
python | cython__cython | docs/examples/userguide/language_basics/override.py | {
"start": 165,
"end": 244
} | class ____(B): # NOTE: no cclass decorator
def foo(self):
print("C")
| C |
python | apache__airflow | providers/common/sql/tests/unit/common/sql/operators/test_sql.py | {
"start": 2545,
"end": 2654
} | class ____:
def get_records(self):
return
def _get_mock_db_hook():
return MockHook()
| MockHook |
python | django__django | django/contrib/auth/backends.py | {
"start": 9371,
"end": 12845
} | class ____(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, request, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. Return
the ``User`` object with the given username. Create a new ``User``
object if ``create_unknown_user`` is ``True``.
Return None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
created = False
user = None
username = self.clean_username(remote_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = UserModel._default_manager.get_or_create(
**{UserModel.USERNAME_FIELD: username}
)
else:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
pass
user = self.configure_user(request, user, created=created)
return user if self.user_can_authenticate(user) else None
async def aauthenticate(self, request, remote_user):
"""See authenticate()."""
if not remote_user:
return
created = False
user = None
username = self.clean_username(remote_user)
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = await UserModel._default_manager.aget_or_create(
**{UserModel.USERNAME_FIELD: username}
)
else:
try:
user = await UserModel._default_manager.aget_by_natural_key(username)
except UserModel.DoesNotExist:
pass
user = await self.aconfigure_user(request, user, created=created)
return user if self.user_can_authenticate(user) else None
def clean_username(self, username):
"""
Perform any cleaning on the "username" prior to using it to get or
create the user object. Return the cleaned username.
By default, return the username unchanged.
"""
return username
def configure_user(self, request, user, created=True):
"""
Configure a user and return the updated user.
By default, return the user unmodified.
"""
return user
async def aconfigure_user(self, request, user, created=True):
"""See configure_user()"""
return await sync_to_async(self.configure_user)(request, user, created)
| RemoteUserBackend |
python | getsentry__sentry | src/sentry/migrations/0974_hc_json_field.py | {
"start": 244,
"end": 2057
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "0973_safe_del_dashboardwidgetsnapshot"),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[
mod.to_jsonb("sentry_controloutbox", "payload"),
mod.to_jsonb("sentry_regionoutbox", "payload"),
],
state_operations=[
migrations.AlterField(
model_name="controloutbox",
name="payload",
field=models.JSONField(null=True),
),
migrations.AlterField(
model_name="regionoutbox",
name="payload",
field=models.JSONField(null=True),
),
],
)
]
| Migration |
python | numba__numba | numba/tests/test_extending_types.py | {
"start": 734,
"end": 4990
} | class ____(unittest.TestCase):
def setUp(self):
class Dummy(object):
def __init__(self, value):
self.value = value
class DummyType(types.Type):
def __init__(self):
super(DummyType, self).__init__(name='Dummy')
dummy_type = DummyType()
@register_model(DummyType)
class DummyModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('value', types.intp),
]
models.StructModel.__init__(self, dmm, fe_type, members)
make_attribute_wrapper(DummyType, 'value', 'value')
@type_callable(Dummy)
def type_dummy(context):
def typer(value):
return dummy_type
return typer
@lower_builtin(Dummy, types.intp)
def impl_dummy(context, builder, sig, args):
typ = sig.return_type
[value] = args
dummy = cgutils.create_struct_proxy(typ)(context, builder)
dummy.value = value
return dummy._getvalue()
@typeof_impl.register(Dummy)
def typeof_dummy(val, c):
return DummyType()
# Store attributes
self.Dummy = Dummy
self.DummyType = DummyType
def _add_float_overload(self, mock_float_inst):
@overload(mock_float_inst)
def dummy_to_float(x):
if isinstance(x, self.DummyType):
def codegen(x):
return float(x.value)
return codegen
else:
raise NumbaTypeError('cannot type float({})'.format(x))
def test_overload_float(self):
mock_float = gen_mock_float()
self._add_float_overload(mock_float)
Dummy = self.Dummy
@njit
def foo(x):
return mock_float(Dummy(x))
self.assertEqual(foo(123), float(123))
def test_overload_float_error_msg(self):
mock_float = gen_mock_float()
self._add_float_overload(mock_float)
@njit
def foo(x):
return mock_float(x)
with self.assertRaises(TypingError) as raises:
foo(1j)
self.assertIn("cannot type float(complex128)",
str(raises.exception))
def test_unboxing(self):
"""A test for the unboxing logic on unknown type
"""
Dummy = self.Dummy
@njit
def foo(x):
# pass a dummy object into another function
bar(Dummy(x))
# make sure a cpython wrapper is created
@njit(no_cpython_wrapper=False)
def bar(dummy_obj):
pass
foo(123)
with self.assertRaises(TypeError) as raises:
bar(Dummy(123))
self.assertIn("can't unbox Dummy type", str(raises.exception))
def test_boxing(self):
"""A test for the boxing logic on unknown type
"""
Dummy = self.Dummy
@njit
def foo(x):
return Dummy(x)
with self.assertRaises(TypeError) as raises:
foo(123)
self.assertIn("cannot convert native Dummy to Python object",
str(raises.exception))
def test_issue5565_literal_getitem(self):
# the following test is adapted from
# https://github.com/numba/numba/issues/5565
Dummy, DummyType = self.Dummy, self.DummyType
MAGIC_NUMBER = 12321
@overload(operator.getitem)
def dummy_getitem_ovld(self, idx):
if not isinstance(self, DummyType):
return None
# suppose we can only support idx as literal argument
if isinstance(idx, types.StringLiteral):
def dummy_getitem_impl(self, idx):
return MAGIC_NUMBER
return dummy_getitem_impl
if isinstance(idx, types.UnicodeType):
def dummy_getitem_impl(self, idx):
return literally(idx)
return dummy_getitem_impl
return None
@njit
def test_impl(x, y):
return Dummy(x)[y]
var = 'abc'
self.assertEqual(test_impl(1, var), MAGIC_NUMBER)
| TestExtTypDummy |
python | ray-project__ray | python/ray/autoscaler/v2/tests/test_node_provider.py | {
"start": 10917,
"end": 29381
} | class ____(unittest.TestCase):
def setUp(self):
raycluster_cr = get_basic_ray_cr()
# Remove fake TPU and GPU worker groups from CR since podlist1 only
# contains small-group.
raycluster_cr["spec"]["workerGroupSpecs"][1]["replicas"] = 0
raycluster_cr["spec"]["workerGroupSpecs"][2]["replicas"] = 0
self.mock_client = MockKubernetesHttpApiClient(
_get_test_yaml("podlist1.yaml"), raycluster_cr
)
self.provider = KubeRayProvider(
cluster_name="test",
provider_config={
"namespace": "default",
"head_node_type": "headgroup",
},
k8s_api_client=self.mock_client,
)
def test_get_nodes(self):
nodes = self.provider.get_non_terminated()
errors = self.provider.poll_errors()
assert len(nodes) == 2
assert len(errors) == 0
assert sorted(nodes) == sorted(
{
"raycluster-autoscaler-head-8zsc8": CloudInstance(
cloud_instance_id="raycluster-autoscaler-head-8zsc8",
node_kind=NodeKind.HEAD,
node_type="headgroup",
is_running=True,
), # up-to-date status because the Ray container is in running status
"raycluster-autoscaler-worker-small-group-dkz2r": CloudInstance(
cloud_instance_id="raycluster-autoscaler-worker-small-group-dkz2r",
node_kind=NodeKind.WORKER,
node_type="small-group",
is_running=False,
), # waiting status, because Ray container's state is pending.
}
)
def test_launch_node(self):
launch_request = {"small-group": 1}
self.provider.launch(shape=launch_request, request_id="launch-1")
patches = self.mock_client.get_patches(
f"rayclusters/{self.provider._cluster_name}"
)
assert len(patches) == 1
assert patches[0] == {
"op": "replace",
"path": "/spec/workerGroupSpecs/0/replicas",
"value": 2, # 1 + 1
}
def test_terminate_node(self):
self.provider.terminate(
ids=["raycluster-autoscaler-worker-small-group-dkz2r"], request_id="term-1"
)
patches = self.mock_client.get_patches(
f"rayclusters/{self.provider._cluster_name}"
)
assert len(patches) == 2
assert patches == [
{
"op": "replace",
"path": "/spec/workerGroupSpecs/0/replicas",
"value": 0,
},
{
"op": "replace",
"path": "/spec/workerGroupSpecs/0/scaleStrategy",
"value": {
"workersToDelete": [
"raycluster-autoscaler-worker-small-group-dkz2r"
]
},
},
]
def test_pending_deletes(self):
# Modify the cr.yaml to have a pending delete.
self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0][
"scaleStrategy"
] = {"workersToDelete": ["raycluster-autoscaler-worker-small-group-dkz2r"]}
self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0]["replicas"] = 0
# Launching new nodes should fail.
self.provider.launch(shape={"small-group": 1}, request_id="launch-1")
errors = self.provider.poll_errors()
assert errors[0].node_type == "small-group"
assert errors[0].request_id == "launch-1"
assert "There are workers to be deleted" in str(errors[0]), errors[0]
# Terminating new nodes should fail.
self.provider.terminate(
ids=["raycluster-autoscaler-worker-small-group-dkz2r"], request_id="term-1"
)
errors = self.provider.poll_errors()
assert (
errors[0].cloud_instance_id
== "raycluster-autoscaler-worker-small-group-dkz2r"
)
assert errors[0].request_id == "term-1"
assert "There are workers to be deleted" in str(errors[0]), errors[0]
# Remove the pod from the pod list.
self.mock_client._pod_list["items"] = [
pod
for pod in self.mock_client._pod_list["items"]
if pod["metadata"]["name"]
!= "raycluster-autoscaler-worker-small-group-dkz2r"
]
# Launch OK now, and we should also clears the pending delete.
self.provider.launch(shape={"small-group": 1}, request_id="launch-2")
errors = self.provider.poll_errors()
assert len(errors) == 0
patches = self.mock_client.get_patches(
f"rayclusters/{self.provider._cluster_name}"
)
assert len(patches) == 2
assert patches == [
{
"op": "replace",
"path": "/spec/workerGroupSpecs/0/replicas",
"value": 1,
},
{
"op": "replace",
"path": "/spec/workerGroupSpecs/0/scaleStrategy",
"value": {"workersToDelete": []},
},
]
def test_increase_min_replicas_to_scale_up(self):
# Simulate the case where users manually increase the `minReplicas` field
# from 0 to $num_pods. KubeRay will create $num_pods worker Pods to meet the new
# `minReplicas`, even though the `replicas` field is still 0.
small_group = "small-group"
num_pods = 0
assert (
self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0]["groupName"]
== small_group
)
for pod in self.mock_client._pod_list["items"]:
if pod["metadata"]["labels"]["ray.io/group"] == small_group:
num_pods += 1
assert num_pods > 0
self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0]["replicas"] = 0
self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0][
"minReplicas"
] = num_pods
# Launching a new node and `replicas` should be
# `max(replicas, minReplicas) + 1`.
self.provider.launch(shape={small_group: 1}, request_id="launch-1")
patches = self.mock_client.get_patches(
f"rayclusters/{self.provider._cluster_name}"
)
assert len(patches) == 1
assert patches[0] == {
"op": "replace",
"path": "/spec/workerGroupSpecs/0/replicas",
"value": num_pods + 1,
}
def test_inconsistent_pods_raycr_scale_up(self):
"""
Test the case where the cluster state has not yet reached the desired state.
Specifically, the replicas field in the RayCluster CR does not match the actual
number of Pods.
"""
# Check the assumptions of the test
small_group = "small-group"
num_pods = 0
for pod in self.mock_client._pod_list["items"]:
if pod["metadata"]["labels"]["ray.io/group"] == small_group:
num_pods += 1
assert (
self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0]["groupName"]
== small_group
)
desired_replicas = num_pods + 1
self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0][
"replicas"
] = desired_replicas
# Launch a new node. The replicas field should be incremented by 1, even though
# the cluster state has not yet reached the goal state.
launch_request = {"small-group": 1}
self.provider.launch(shape=launch_request, request_id="launch-1")
patches = self.mock_client.get_patches(
f"rayclusters/{self.provider._cluster_name}"
)
assert len(patches) == 1
assert patches[0] == {
"op": "replace",
"path": "/spec/workerGroupSpecs/0/replicas",
"value": desired_replicas + 1,
}
def test_inconsistent_pods_raycr_scale_down(self):
"""
Test the case where the cluster state has not yet reached the desired state.
Specifically, the replicas field in the RayCluster CR does not match the actual
number of Pods.
"""
# Check the assumptions of the test
small_group = "small-group"
num_pods = 0
pod_to_delete = None
for pod in self.mock_client._pod_list["items"]:
if pod["metadata"]["labels"]["ray.io/group"] == small_group:
num_pods += 1
pod_to_delete = pod["metadata"]["name"]
assert pod_to_delete is not None
assert (
self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0]["groupName"]
== small_group
)
desired_replicas = num_pods + 1
self.mock_client._ray_cluster["spec"]["workerGroupSpecs"][0][
"replicas"
] = desired_replicas
# Terminate a node. The replicas field should be decremented by 1, even though
# the cluster state has not yet reached the goal state.
self.provider.terminate(ids=[pod_to_delete], request_id="term-1")
patches = self.mock_client.get_patches(
f"rayclusters/{self.provider._cluster_name}"
)
assert len(patches) == 2
assert patches == [
{
"op": "replace",
"path": "/spec/workerGroupSpecs/0/replicas",
"value": desired_replicas - 1,
},
{
"op": "replace",
"path": "/spec/workerGroupSpecs/0/scaleStrategy",
"value": {
"workersToDelete": [
pod_to_delete,
]
},
},
]
def test_decrease_cr_replicas_below_observed_then_scale_down(self):
"""
If a user/operator decreases the CR's replicas below the observed number of
Pods without specifying workersToDelete, scaling down should base the
new desired on observed (floor), decrement by one, and add the pod to
workersToDelete.
"""
# Prepare a RayCluster CR with replicas set to 0 for the small-group
# while the pod list contains multiple small-group pods.
raycluster_cr = get_basic_ray_cr()
mock_client = MockKubernetesHttpApiClient(
_get_test_yaml("podlist2.yaml"), raycluster_cr
)
small_group = "small-group"
pod_names = []
for pod in mock_client._pod_list["items"]:
if pod["metadata"]["labels"]["ray.io/group"] == small_group:
pod_names.append(pod["metadata"]["name"])
assert len(pod_names) >= 2
# Decrease CR replicas below observed without workersToDelete.
assert raycluster_cr["spec"]["workerGroupSpecs"][0]["groupName"] == small_group
raycluster_cr["spec"]["workerGroupSpecs"][0]["replicas"] = 0
provider = KubeRayProvider(
cluster_name="test",
provider_config={
"namespace": "default",
"head_node_type": "headgroup",
},
k8s_api_client=mock_client,
)
# Terminate a single observed pod.
pod_to_delete = pod_names[0]
provider.terminate(ids=[pod_to_delete], request_id="term-decrease")
# Expected: replicas becomes observed-1; workersToDelete contains the pod.
patches = mock_client.get_patches(f"rayclusters/{provider._cluster_name}")
assert len(patches) == 2
assert patches == [
{
"op": "replace",
"path": "/spec/workerGroupSpecs/0/replicas",
"value": len(pod_names) - 1,
},
{
"op": "replace",
"path": "/spec/workerGroupSpecs/0/scaleStrategy",
"value": {
"workersToDelete": [
pod_to_delete,
]
},
},
]
def test_scale_down_multiple_pods_of_node_type(self):
"""
Test the case where multiple pods of the same node type are scaled
down on one autoscaler iteration. This test verifies that the provider
properly handles multiple pod deletions and counting workers_to_delete.
"""
# Setup provider with multiple worker pods in podlist. We use podlist2
# here because podlist1 only contains one running worker.
raycluster_cr = get_basic_ray_cr()
raycluster_cr["spec"]["workerGroupSpecs"][0]["replicas"] = 2
mock_client = MockKubernetesHttpApiClient(
_get_test_yaml("podlist2.yaml"), raycluster_cr
)
provider = KubeRayProvider(
cluster_name="test",
provider_config={
"namespace": "default",
"head_node_type": "headgroup",
},
k8s_api_client=mock_client,
)
# Identify all pods in the target group
small_group = "small-group"
pod_names = []
for pod in mock_client._pod_list["items"]:
if pod["metadata"]["labels"]["ray.io/group"] == small_group:
pod_names.append(pod["metadata"]["name"])
# Terminate all pods in the group
provider._sync_with_api_server()
cur_instance_ids = set(provider.instances.keys())
pods_to_terminate = [name for name in pod_names if name in cur_instance_ids]
assert (
len(pods_to_terminate) > 1
), "Expected multiple pods to terminate in the target group."
provider.terminate(ids=pods_to_terminate, request_id="term-2")
# Check the patches applied to the RayCluster resource
patches = mock_client.get_patches(f"rayclusters/{provider._cluster_name}")
assert len(patches) == 2
assert patches == [
{
"op": "replace",
"path": "/spec/workerGroupSpecs/0/replicas",
"value": 0,
},
{
"op": "replace",
"path": "/spec/workerGroupSpecs/0/scaleStrategy",
"value": {
"workersToDelete": pods_to_terminate,
},
},
]
def test_worker_to_delete_info(self):
"""
Validate _get_workers_delete_info correctly returns the worker groups with pending
deletes, worker groups with finished deletes, and the set of workers to delete.
"""
# Create a RayCluster CR and set replicas to 0 to simulate the case where the autoscaler
# patches the RayCluster with `replicas: 0`, but alive Pods still exist in workersToDelete.
raycluster_cr = get_basic_ray_cr()
raycluster_cr["spec"]["workerGroupSpecs"][0]["replicas"] = 0
mock_client = MockKubernetesHttpApiClient(
_get_test_yaml("podlist2.yaml"), raycluster_cr
)
# Add some workers to workersToDelete.
small_group = "small-group"
pod_names = []
for pod in mock_client._pod_list["items"]:
if pod["metadata"]["labels"]["ray.io/group"] == small_group:
pod_names.append(pod["metadata"]["name"])
raycluster_cr["spec"]["workerGroupSpecs"][0]["scaleStrategy"] = {
"workersToDelete": pod_names,
}
(
pending_deletes,
finished_deletes,
workers_to_delete,
) = KubeRayProvider._get_workers_delete_info(raycluster_cr, set(pod_names))
# Validate _get_workers_delete_info populates sets as expected.
assert pending_deletes == {"small-group"}
assert finished_deletes == set()
assert workers_to_delete == {pod_names[0], pod_names[1]}
def test_scale_down_with_multi_host_group(self):
"""
Test the case where a worker group has numOfHosts > 1.
This ensures that the KubeRay provider accounts for multi-host replicas
during scale down and properly updates the workersToDelete field.
"""
# Setup mock RayCluster CR with numOfHosts: 2 and replicas: 1,
# resulting in 2 workers total.
raycluster_cr = get_basic_ray_cr()
raycluster_cr["spec"]["workerGroupSpecs"][0]["replicas"] = 2
mock_client = MockKubernetesHttpApiClient(
_get_test_yaml("podlist2.yaml"), raycluster_cr
)
provider = KubeRayProvider(
cluster_name="test",
provider_config={
"namespace": "default",
"head_node_type": "headgroup",
},
k8s_api_client=mock_client,
)
# Identify all pods in the multi-host group
pod_names = []
for pod in mock_client._pod_list["items"]:
if pod["metadata"]["labels"]["ray.io/group"] == "tpu-group":
pod_names.append(pod["metadata"]["name"])
# Expect 2 pods since replicas=1 and numOfHosts=2
assert len(pod_names) == 2, "Expected 2 pods in the multi-host group."
# Sync provider state and mark all pods for deletion
provider._sync_with_api_server()
cur_instance_ids = set(provider.instances.keys())
pods_to_terminate = [name for name in pod_names if name in cur_instance_ids]
assert (
len(pods_to_terminate) == 2
), "Expected all multi-host pods to be tracked by the provider."
# Terminate all pods in the group
provider.terminate(ids=pods_to_terminate, request_id="term-multi")
# Check that scale request successfully created
patches = mock_client.get_patches(f"rayclusters/{provider._cluster_name}")
assert len(patches) == 2
assert patches == [
{
"op": "replace",
"path": "/spec/workerGroupSpecs/2/replicas",
"value": 0,
},
{
"op": "replace",
"path": "/spec/workerGroupSpecs/2/scaleStrategy",
"value": {
"workersToDelete": pods_to_terminate,
},
},
]
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
| KubeRayProviderIntegrationTest |
python | getsentry__sentry | src/sentry/metrics/logging.py | {
"start": 103,
"end": 1983
} | class ____(MetricsBackend):
def incr(
self,
key: str,
instance: str | None = None,
tags: Tags | None = None,
amount: float | int = 1,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
logger.debug("%r: %+g", key, amount, extra={"instance": instance, "tags": tags or {}})
def timing(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
stacklevel: int = 0,
) -> None:
logger.debug(
"%r: %g ms", key, value * 1000, extra={"instance": instance, "tags": tags or {}}
)
def gauge(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
logger.debug("%r: %+g", key, value, extra={"instance": instance, "tags": tags or {}})
def distribution(
self,
key: str,
value: float,
instance: str | None = None,
tags: Tags | None = None,
sample_rate: float = 1,
unit: str | None = None,
stacklevel: int = 0,
) -> None:
logger.debug("%r: %+g", key, value, extra={"instance": instance, "tags": tags or {}})
def event(
self,
title: str,
message: str,
alert_type: str | None = None,
aggregation_key: str | None = None,
source_type_name: str | None = None,
priority: str | None = None,
instance: str | None = None,
tags: Tags | None = None,
stacklevel: int = 0,
) -> None:
logger.debug("%r: %+g", title, message, extra={"instance": instance, "tags": tags or {}})
| LoggingBackend |
python | scikit-learn__scikit-learn | sklearn/utils/_tags.py | {
"start": 6126,
"end": 9817
} | class ____:
"""Tags for the estimator.
See :ref:`estimator_tags` for more information.
Parameters
----------
estimator_type : str or None
The type of the estimator. Can be one of:
- "classifier"
- "regressor"
- "transformer"
- "clusterer"
- "outlier_detector"
- "density_estimator"
target_tags : :class:`TargetTags`
The target(y) tags.
transformer_tags : :class:`TransformerTags` or None
The transformer tags.
classifier_tags : :class:`ClassifierTags` or None
The classifier tags.
regressor_tags : :class:`RegressorTags` or None
The regressor tags.
array_api_support : bool, default=False
Whether the estimator supports Array API compatible inputs.
no_validation : bool, default=False
Whether the estimator skips input-validation. This is only meant for
stateless and dummy transformers!
non_deterministic : bool, default=False
Whether the estimator is not deterministic given a fixed ``random_state``.
requires_fit : bool, default=True
Whether the estimator requires to be fitted before calling one of
`transform`, `predict`, `predict_proba`, or `decision_function`.
_skip_test : bool, default=False
Whether to skip common tests entirely. Don't use this unless
you have a *very good* reason.
input_tags : :class:`InputTags`
The input data(X) tags.
"""
estimator_type: str | None
target_tags: TargetTags
transformer_tags: TransformerTags | None = None
classifier_tags: ClassifierTags | None = None
regressor_tags: RegressorTags | None = None
array_api_support: bool = False
no_validation: bool = False
non_deterministic: bool = False
requires_fit: bool = True
_skip_test: bool = False
input_tags: InputTags = field(default_factory=InputTags)
def get_tags(estimator) -> Tags:
"""Get estimator tags.
:class:`~sklearn.BaseEstimator` provides the estimator tags machinery.
For scikit-learn built-in estimators, we should still rely on
`self.__sklearn_tags__()`. `get_tags(est)` should be used when we
are not sure where `est` comes from: typically
`get_tags(self.estimator)` where `self` is a meta-estimator, or in
the common checks.
.. versionadded:: 1.6
Parameters
----------
estimator : estimator object
The estimator from which to get the tag.
Returns
-------
tags : :class:`~.sklearn.utils.Tags`
The estimator tags.
"""
try:
tags = estimator.__sklearn_tags__()
except AttributeError as exc:
if "object has no attribute '__sklearn_tags__'" in str(exc):
# Happens when `__sklearn_tags__` is implemented by calling
# `super().__sklearn_tags__()` but there is no `__sklearn_tags__`
# method in the base class. Typically happens when only inheriting
# from Mixins.
raise AttributeError(
f"The following error was raised: {exc}. It seems that "
"there are no classes that implement `__sklearn_tags__` "
"in the MRO and/or all classes in the MRO call "
"`super().__sklearn_tags__()`. Make sure to inherit from "
"`BaseEstimator` which implements `__sklearn_tags__` (or "
"alternatively define `__sklearn_tags__` but we don't recommend "
"this approach). Note that `BaseEstimator` needs to be on the "
"right side of other Mixins in the inheritance order."
)
else:
raise
return tags
| Tags |
python | ansible__ansible | lib/ansible/utils/context_objects.py | {
"start": 1589,
"end": 1907
} | class ____(Singleton, ABCMeta):
"""
Combine ABCMeta based classes with Singleton based classes
Combine Singleton and ABCMeta so we have a metaclass that unambiguously knows which can override
the other. Useful for making new types of containers which are also Singletons.
"""
pass
| _ABCSingleton |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/random/random_ops_test.py | {
"start": 10161,
"end": 15773
} | class ____(RandomOpTestCommon):
def _Sampler(self, num, minv, maxv, dtype, use_gpu, seed=None):
def func():
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.random_uniform(
[num], minval=minv, maxval=maxv, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in range(10):
ret[i, :] = self.evaluate(rng)
return ret
return func
def testRange(self):
for dt in get_float_types() + [dtypes.int32, dtypes.int64]:
sampler = self._Sampler(1000, minv=-2, maxv=8, dtype=dt, use_gpu=True)
x = sampler()
self.assertTrue(-2 <= np.min(x))
self.assertTrue(np.max(x) < 8)
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
for dt in get_float_types() + [dtypes.int32, dtypes.int64]:
maxv = 1.0 if dt.is_floating else 1 << 30
sampler = self._Sampler(1000, minv=0, maxv=maxv, dtype=dt, use_gpu=True)
x = sampler()
y = sampler()
count = (x == y).sum()
count_limit = 10
if dt == dtypes.float16:
count_limit = 50
elif dt == dtypes.bfloat16:
count_limit = 90
if count >= count_limit:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < count_limit)
@test_util.run_deprecated_v1
def testUniformIntsWithInvalidShape(self):
for dtype in dtypes.int32, dtypes.int64:
with self.assertRaisesRegex(
ValueError, "minval must be a scalar; got a tensor of shape"):
random_ops.random_uniform(
[1000], minval=[1, 2], maxval=3, dtype=dtype)
with self.assertRaisesRegex(
ValueError, "maxval must be a scalar; got a tensor of shape"):
random_ops.random_uniform(
[1000], minval=1, maxval=[2, 3], dtype=dtype)
# Check that uniform ints actually follow a uniform distribution.
@test_util.run_deprecated_v1
def testUniformInts(self):
minv = -2
maxv = 15
n = 100000
p = 1 / (maxv - minv)
# The counts should follow an (n, p) binomial distribution.
mean = p * n
std = np.sqrt(n * p * (1 - p))
for dt in dtypes.int32, dtypes.int64:
# Use a fixed seed here to make the test deterministic.
# Without the fixed seed, the 5 * std bound will (very rarely) fail.
sampler = self._Sampler(
n // 10, minv=minv, maxv=maxv, dtype=dt, use_gpu=True, seed=17)
x = sampler().ravel()
self.assertEqual(x.shape, (n,))
counts, _ = np.histogram(x, bins=maxv - minv)
self.assertEqual(counts.shape, (maxv - minv,))
self.assertEqual(counts.sum(), n)
error = np.abs(counts - mean)
self.assertLess(error.max(), 5 * std)
# Check that minval = maxval is fine iff we're producing no numbers
def testUniformIntsDegenerate(self):
for dt in dtypes.int32, dtypes.int64:
def sample(n):
return self._Sampler(n, minv=0, maxv=0, dtype=dt, use_gpu=True)()
self.assertEqual(sample(0).shape, (10, 0))
with self.assertRaisesOpError('Need minval < maxval, got 0 >= 0'):
sample(1)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
@test_util.run_deprecated_v1
def testCPUGPUMatch(self):
for dt in get_float_types() + [dtypes.int32, dtypes.int64]:
maxv = 1.0 if dt.is_floating else 17
results = {}
for use_gpu in False, True:
sampler = self._Sampler(
1000000, minv=0, maxv=maxv, dtype=dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
self.assertAllEqual(results[False], results[True])
@test_util.run_deprecated_v1
def testSeed(self):
for dt in get_float_types() + [dtypes.int32, dtypes.int64]:
for seed in [345, 2**100, -2**100]:
sx = self._Sampler(1000, 0, 17, dtype=dt, use_gpu=True, seed=seed)
sy = self._Sampler(1000, 0, 17, dtype=dt, use_gpu=True, seed=seed)
self.assertAllEqual(sx(), sy())
@test_util.run_deprecated_v1
def testNoCSE(self):
shape = [2, 3, 4]
for dtype in dtypes.float16, dtypes.float32, dtypes.int32:
with self.session():
rnd1 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
rnd2 = random_ops.random_uniform(shape, 0, 17, dtype=dtype)
diff = (rnd2 - rnd1).eval()
self.assertTrue(np.linalg.norm(diff) > 0.1)
@test_util.run_deprecated_v1
def testSingleSessionNotConstant(self):
for use_gpu in [False, True]:
for dt in get_float_types() + [dtypes.int32, dtypes.int64]:
self._testSingleSessionNotConstant(
random_ops.random_uniform, 100, dt, 0, 17, use_gpu=use_gpu)
@test_util.run_deprecated_v1
def testSingleSessionOpSeedNotConstant(self):
for use_gpu in [False, True]:
for dt in get_float_types() + [dtypes.int32, dtypes.int64]:
self._testSingleSessionNotConstant(
random_ops.random_uniform,
100,
dt,
10,
20,
use_gpu=use_gpu,
op_seed=1345)
@test_util.run_deprecated_v1
def testSingleSessionGraphSeedNotConstant(self):
for use_gpu in [False, True]:
for dt in get_float_types() + [dtypes.int32, dtypes.int64]:
self._testSingleSessionNotConstant(
random_ops.random_uniform,
100,
dt,
20,
200,
use_gpu=use_gpu,
graph_seed=965)
| RandomUniformTest |
python | huggingface__transformers | src/transformers/models/llava_next_video/modeling_llava_next_video.py | {
"start": 7740,
"end": 13349
} | class ____(PreTrainedModel):
config: LlavaNextVideoConfig
base_model_prefix = "model"
input_modalities = ("image", "video", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["LlamaDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_flex_attn = True
_supports_attention_backend = True
@torch.no_grad()
def _init_weights(self, module):
std = getattr(self.config, "initializer_range", self.config.get_text_config().initializer_range)
if isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=std)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, LlavaNextVideoModel):
embed_std = 1 / math.sqrt(self.config.text_config.hidden_size)
init.normal_(module.image_newline, mean=0.0, std=embed_std)
def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size):
"""
Calculate the shape of the image patch grid after the preprocessing for images of any resolution.
Args:
image_size (`tuple`):
The size of the input image in the format (width, height).
grid_pinpoints (`List`):
A list containing possible resolutions. Each item in the list should be a tuple or list
of the form `(height, width)`.
patch_size (`int`):
The size of each image patch.
Returns:
tuple: The shape of the image patch grid in the format (width, height).
"""
if not isinstance(grid_pinpoints, list):
raise TypeError("grid_pinpoints should be a list of tuples or lists")
# ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate
if not isinstance(image_size, (list, tuple)):
if not isinstance(image_size, (torch.Tensor, np.ndarray)):
raise TypeError(
f"image_size invalid type: {type(image_size)} not valid, should be either list, tuple, np.ndarray or tensor"
)
image_size = image_size.tolist()
height, width = select_best_resolution(image_size, grid_pinpoints)
return height // patch_size, width // patch_size
def image_size_to_num_patches(image_size, grid_pinpoints, patch_size: int):
"""
Calculate the number of patches after the preprocessing for images of any resolution.
Args:
image_size (`torch.LongTensor` or `np.ndarray` or `tuple[int, int]`):
The size of the input image in the format (height, width). ?
grid_pinpoints (`List`):
A list containing possible resolutions. Each item in the list should be a tuple or list
of the form `(height, width)`.
patch_size (`int`):
The size of each image patch.
Returns:
int: the number of patches
"""
if not isinstance(grid_pinpoints, list):
raise TypeError("grid_pinpoints should be a list of tuples or lists")
# ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate
if not isinstance(image_size, (list, tuple)):
if not isinstance(image_size, (torch.Tensor, np.ndarray)):
raise TypeError(f"image_size invalid type {type(image_size)} with value {image_size}")
image_size = image_size.tolist()
best_resolution = select_best_resolution(image_size, grid_pinpoints)
height, width = best_resolution
num_patches = 0
# consider change to ceil(height/patch_size)*ceil(width/patch_size) + 1
for i in range(0, height, patch_size):
for j in range(0, width, patch_size):
num_patches += 1
# add the base patch
num_patches += 1
return num_patches
def unpad_image(tensor, original_size):
"""
Unpads a PyTorch tensor of a padded and resized image.
Args:
tensor (`torch.Tensor`):
The image tensor, assumed to be of shape (num_channels, height, width).
original_size (`tuple`):
The original size of the image (height, width).
Returns:
`torch.Tensor`: The unpadded image tensor.
"""
if not isinstance(original_size, (list, tuple)):
if not isinstance(original_size, (torch.Tensor, np.ndarray)):
raise TypeError(
f"image_size invalid type: {type(original_size)} not valid, should be either list, tuple, np.ndarray or tensor"
)
original_size = original_size.tolist()
original_height, original_width = original_size
current_height, current_width = tensor.shape[1:]
original_aspect_ratio = original_width / original_height
current_aspect_ratio = current_width / current_height
if original_aspect_ratio > current_aspect_ratio:
scale_factor = current_width / original_width
new_height = int(round(original_height * scale_factor, 7))
padding = (current_height - new_height) // 2
unpadded_tensor = tensor[:, padding : current_height - padding, :]
else:
scale_factor = current_height / original_height
new_width = int(round(original_width * scale_factor, 7))
padding = (current_width - new_width) // 2
unpadded_tensor = tensor[:, :, padding : current_width - padding]
return unpadded_tensor
@auto_docstring(
custom_intro="""
The Llava-Next model which consists of a vision backbone and a language model without language modeling head.
"""
)
| LlavaNextVideoPreTrainedModel |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_run.py | {
"start": 3833,
"end": 6086
} | class ____(GoogleCloudBaseOperator):
"""
Updates a job and wait for the operation to be completed. Pushes the updated job to xcom.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param job_name: Required. The name of the job to update.
:param job: Required. The job descriptor containing the new configuration of the job to update.
The name field will be replaced by job_name
:param gcp_conn_id: The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = ("project_id", "region", "gcp_conn_id", "impersonation_chain", "job_name")
def __init__(
self,
project_id: str,
region: str,
job_name: str,
job: dict | Job,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.job_name = job_name
self.job = job
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook: CloudRunHook = CloudRunHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
job = hook.update_job(
job_name=self.job_name, job=self.job, region=self.region, project_id=self.project_id
)
return Job.to_dict(job)
| CloudRunUpdateJobOperator |
python | django__django | tests/auth_tests/test_auth_backends.py | {
"start": 50557,
"end": 52696
} | class ____(TestCase):
backend = "auth_tests.test_auth_backends.CustomModelBackend"
other_backend = "auth_tests.test_auth_backends.OtherModelBackend"
username = "username"
password = "password"
def assertBackendInSession(self, backend):
request = HttpRequest()
request.session = self.client.session
self.assertEqual(request.session[BACKEND_SESSION_KEY], backend)
@override_settings(AUTHENTICATION_BACKENDS=[backend])
def test_backend_path_login_without_authenticate_single_backend(self):
user = User.objects.create_user(self.username, "email", self.password)
self.client._login(user)
self.assertBackendInSession(self.backend)
@override_settings(AUTHENTICATION_BACKENDS=[backend, other_backend])
def test_backend_path_login_without_authenticate_multiple_backends(self):
user = User.objects.create_user(self.username, "email", self.password)
expected_message = (
"You have multiple authentication backends configured and "
"therefore must provide the `backend` argument or set the "
"`backend` attribute on the user."
)
with self.assertRaisesMessage(ValueError, expected_message):
self.client._login(user)
def test_non_string_backend(self):
user = User.objects.create_user(self.username, "email", self.password)
expected_message = (
"backend must be a dotted import path string (got "
"<class 'django.contrib.auth.backends.ModelBackend'>)."
)
with self.assertRaisesMessage(TypeError, expected_message):
self.client._login(user, backend=ModelBackend)
@override_settings(AUTHENTICATION_BACKENDS=[backend, other_backend])
def test_backend_path_login_with_explicit_backends(self):
user = User.objects.create_user(self.username, "email", self.password)
self.client._login(user, self.other_backend)
self.assertBackendInSession(self.other_backend)
@override_settings(
AUTHENTICATION_BACKENDS=["django.contrib.auth.backends.AllowAllUsersModelBackend"]
)
| SelectingBackendTests |
python | pydata__xarray | xarray/tests/test_datatree.py | {
"start": 85904,
"end": 86416
} | class ____:
def __init__(self):
self.closed = False
def close(self):
if self.closed:
raise RuntimeError("already closed")
self.closed = True
@pytest.fixture
def tree_and_closers():
tree = DataTree.from_dict({"/child/grandchild": None})
closers = {
"/": Closer(),
"/child": Closer(),
"/child/grandchild": Closer(),
}
for path, closer in closers.items():
tree[path].set_close(closer.close)
return tree, closers
| Closer |
python | google__jax | jax/experimental/pallas/ops/tpu/paged_attention/quantization_utils.py | {
"start": 703,
"end": 2556
} | class ____(NamedTuple):
"""A tensor which has been quantized to int8 and its scales.
Attributes:
weight: Weight
scales: Scales
"""
weight: jnp.ndarray
scales: jnp.ndarray
def to_int8(x: jnp.ndarray, h: jnp.ndarray) -> jnp.ndarray:
"""Converts a float array to an int8 array with a scale.
Args:
x: Float array.
h: Quantization scale.
Returns:
Int8 array.
"""
return jnp.int8(jnp.rint(x * (MAX_INT8 / h)))
def from_int8(
x: jnp.ndarray, h: jnp.ndarray, dtype: jnp.dtype = jnp.bfloat16
) -> jnp.ndarray:
"""Converts an int8 array to a float array with a scale.
Args:
x: Int8 array.
h: Quantization scale.
dtype: Float dtype to convert to.
Returns:
Float array.
"""
return x.astype(dtype) * h / MAX_INT8
def get_quantization_scales(x: jnp.ndarray) -> jnp.ndarray:
"""Computes the quantization scales for a float array.
These are the maximum values of the trailing dimension.
Args:
x: Float array to quantize.
Returns:
Array of the same shape as input but with the trailing dimension reduced to
a size 1 absolute max value.
"""
return jnp.max(jnp.abs(x), axis=-1, keepdims=True)
def quantize_to_int8(
x: jnp.ndarray,
) -> QuantizedTensor:
"""Quantizes a float array to an int8 QuantizedTensor.
Args:
x: Float array to quantize.
Returns:
Int8 QuantizedTensor.
"""
x_scales = get_quantization_scales(x)
return QuantizedTensor(weight=to_int8(x, x_scales), scales=x_scales)
def unquantize_from_int8(
x: QuantizedTensor,
dtype: jnp.dtype = jnp.bfloat16,
) -> jnp.ndarray:
"""Unquantizes an int8 QuantizedTensor to a float array.
Args:
x: Int8 QuantizedTensor to unquantize.
dtype: Float dtype to unquantize to.
Returns:
Float array.
"""
return from_int8(x.weight, x.scales, dtype)
| QuantizedTensor |
python | pytorch__pytorch | torch/_dynamo/convert_frame.py | {
"start": 68221,
"end": 76285
} | class ____:
def __init__(
self,
compiler_fn: CompilerFn,
hooks: Hooks,
package: Optional[CompilePackage] = None,
) -> None:
self._torchdynamo_orig_backend = compiler_fn
self._inner_convert = convert_frame_assert(
compiler_fn, one_graph=False, package=package
)
self._hooks = hooks
@property
def _clone_with_backend(self) -> Callable[[WrapBackendDebug], ConvertFrame]:
return lambda backend: convert_frame(
backend,
self._hooks,
)
def __call__(
self,
frame: DynamoFrameType,
cache_entry: Optional[CacheEntry],
hooks: Hooks,
frame_state: dict[str, Union[int, FrameStateSizeEntry]],
skip: int = 0,
) -> ConvertFrameReturn:
input_codes.add(frame.f_code)
counters["frames"]["total"] += 1
try:
result = self._inner_convert(
frame, cache_entry, hooks, frame_state, skip=skip + 1
)
counters["frames"]["ok"] += 1
return result
except Exception as e:
# Do not allow errors to be suppressed if we're tracing a resume function prologue
if isinstance(e, ResumePrologueTracingError):
raise
error_on_graph_break = (
self._inner_convert._box.error_on_graph_break is not None
)
assert error_on_graph_break is not None
if self._inner_convert._box.error_on_graph_break:
# NOTE we _might_ have to wrap the current in a custom exception
# in order to correctly bubble up to the top-level compile wrapper in
# eval_frame.py. But re-raising seems to work for now because exceptions from tracing
# a nested call that results in a top-level frame compile will be handled by the caller
# as an observed exception - we don't expect that exception to be suppressed.
raise
# These two exception types are "soft" failure, in the sense that
# we know this is due to something we didn't implement all the
# way, scare the user less about it. That being said, if you
# are trying to understand why a graph break happened, it's still
# important to have this information, so offer it.
#
# NB: NotImplementedError used to be on this list, but actually
# it is impossible for it to reach here, as it is converted into
# InternalTorchDynamoError. This behavior seemed reasonable
# to me (ezyang, Aug 2023) so I kept it, but maybe at some point
# someone wanted these to also get suppressed. If so, you'll
# need to make these exceptions not get wrapped
# We intentionally don't want to suppress error here.
if isinstance(e, UncapturedHigherOrderOpError):
raise
soft_fail = isinstance(e, Unsupported)
code = frame.f_code
# This is a soft failure. In the sense, the code path reaches here
# when we do not support graph breaks on bytecodes like LOAD_ATTR,
# BUILD_SET etc. In such case, we can fallback to eager without
# scaring users.
if soft_fail and graph_break_log.isEnabledFor(logging.DEBUG):
# Log this message in the graph break. Also use the string
# "skip: " to tell that the whole frame is falling back to
# eager.
if hasattr(e, "compile_id") and hasattr(e, "real_stack"):
with compile_context(CompileContext(e.compile_id)): # type: ignore[attr-defined]
user_stack = e.real_stack
user_stack_formatted = "".join(
traceback.format_list(user_stack)
)
frame_info = exc.format_frame_info(code)
user_stack_trace = (
"Graph break: torch.compile cannot properly resume from this graph break, which results in a skip.\n"
f"torch.compile will skip tracing the frame {frame_info} and fall back to eager.\n"
"The graph break occurred in the following user code:\n"
f"{user_stack_formatted}"
)
torch._logging.trace_structured(
"artifact",
metadata_fn=lambda: {
"name": "dynamo_graph_break_reason",
"encoding": "string",
},
payload_fn=lambda: f"{user_stack_trace}\n{traceback.format_exc()}",
)
graph_break_log.debug(
user_stack_trace,
exc_info=True,
stack_info=config.verbose,
)
if not config.suppress_errors and not soft_fail:
raise
# Suppress the error. NB: It's very important to do the
# suppression logging HERE, where the actual suppression
# happens. Previously it was somewhere else and so it was
# possible to accidentally not log at all.
record_filename = getattr(e, "record_filename", None)
code = frame.f_code
error_msg = format_error_msg(e, code, record_filename, frame)
if soft_fail:
log.info(error_msg, exc_info=True)
else:
log.warning(error_msg, exc_info=True)
if isinstance(e, SkipCodeRecursiveException):
return ConvertFrameReturn(
frame_exec_strategy=FrameExecStrategy(
FrameAction.SKIP, FrameAction.SKIP
)
)
elif isinstance(e, RecompileLimitExceeded):
return ConvertFrameReturn(
frame_exec_strategy=FrameExecStrategy(
FrameAction.RUN_ONLY, FrameAction.RUN_ONLY
)
)
return ConvertFrameReturn()
def convert_frame(
compiler_fn: CompilerFn,
hooks: Hooks,
package: Optional[CompilePackage] = None,
) -> ConvertFrame:
"""Try to convert a frame into an FX graph, if error leave frame unmodified"""
return ConvertFrame(compiler_fn, hooks, package=package)
# TODO mlazos: add support for same args, or record them
def replay(filename: str) -> None:
from .backends.debugging import eager
original_replay_val = config.replay_record_enabled
config.replay_record_enabled = False
with open(filename, "rb") as in_file:
record = ExecutionRecord.load(in_file)
record.globals = dict(itertools.chain(record.globals.items(), globals().items()))
with decorators.error_on_graph_break(False):
try:
_compile(
record.code,
record.globals,
record.locals,
record.builtins,
record.closure,
compiler_fn=eager,
one_graph=False,
export=False,
export_constraints=None,
hooks=Hooks(),
cache_size=CacheSizeRelevantForFrame(0, 0),
cache_entry=None,
frame=None,
frame_state={},
compile_id=CompileId(frame_id=42, frame_compile_id=999),
)
finally:
config.replay_record_enabled = original_replay_val
def first_real_inst_idx(code: CodeType) -> int:
if sys.version_info < (3, 11):
return 0
for inst in dis.get_instructions(code):
if inst.opname == "RESUME":
return inst.offset // 2
raise RuntimeError("RESUME instruction not found in code")
| ConvertFrame |
python | sympy__sympy | sympy/codegen/fnodes.py | {
"start": 18470,
"end": 18605
} | class ____(FFunction):
""" Fortran complex conversion function. """
nargs = 2 # may be extended to (2, 3) at a later point
| cmplx |
python | openai__openai-python | src/openai/types/responses/response_output_text.py | {
"start": 482,
"end": 804
} | class ____(BaseModel):
file_id: str
"""The ID of the file."""
filename: str
"""The filename of the file cited."""
index: int
"""The index of the file in the list of files."""
type: Literal["file_citation"]
"""The type of the file citation. Always `file_citation`."""
| AnnotationFileCitation |
python | huggingface__transformers | src/transformers/models/clvp/modeling_clvp.py | {
"start": 28427,
"end": 34711
} | class ____(nn.Module):
"""
This class processes the log-mel spectrograms(extracted by the Feature Extractor) and text tokens(produced by the
tokenizer) as inputs for the decoder model.
First each log-mel spectrogram is processed into a single vector which captures valuable characteristics from each
of them, then the text tokens are converted into token embeddings and position embeddings are added afterwards.
Both of these vectors are concatenated and then passed to the decoder model.
The text tokens helps to incorporate the "text information" and the log-mel spectrogram is used to specify the
"voice characteristics" into the generated mel tokens.
"""
def __init__(self, config: ClvpConfig):
super().__init__()
self.text_config = config.text_config
self.decoder_config = config.decoder_config
self.text_token_embedding = nn.Embedding(self.text_config.vocab_size, self.decoder_config.hidden_size)
self.text_position_embedding = nn.Embedding(
self.decoder_config.max_text_tokens, self.decoder_config.hidden_size
)
self.mel_conv = nn.Conv1d(self.decoder_config.feature_size, self.decoder_config.hidden_size, kernel_size=1)
# define group norms to be used before each attention layer
num_groups = self.compute_groupnorm_groups(self.decoder_config.hidden_size)
self.group_norms = nn.ModuleList(
[
nn.GroupNorm(num_groups, self.decoder_config.hidden_size, eps=1e-5, affine=True)
for _ in range(self.decoder_config.num_mel_attn_blocks)
]
)
# define the attention layers
self.mel_attn_blocks = nn.ModuleList(
[ClvpSelfAttention(self.decoder_config) for _ in range(self.decoder_config.num_mel_attn_blocks)]
)
self.gradient_checkpointing = False
def compute_groupnorm_groups(self, channels: int, groups: int = 32):
"""
Calculates the value of `num_groups` for nn.GroupNorm. This logic is taken from the official tortoise
repository. link :
https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/models/arch_util.py#L26
"""
if channels <= 16:
groups = 8
elif channels <= 64:
groups = 16
while channels % groups != 0:
groups = int(groups / 2)
if groups <= 2:
raise ValueError(
f"Number of groups for the GroupNorm must be greater than 2, but it is {groups}."
f"Please consider using a different `hidden_size`"
)
return groups
def forward(
self,
input_features: torch.FloatTensor,
input_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
# process text
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
batch_size, seq_length = input_ids.size()
elif inputs_embeds is not None:
batch_size, seq_length = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
# construct attention mask if not given
if attention_mask is None:
attention_mask = torch.ones([batch_size, seq_length], dtype=torch.long, device=input_ids.device)
# We add bos and eos input_ids in the modeling file instead of the tokenizer file to keep the logic simple
# This logic is specific to ClvpConditioningEncoder and not used by other modules.
input_ids, attention_mask = _pad_extra_bos_eos_tokens(
input_ids,
attention_mask,
bos_token_id=self.text_config.bos_token_id,
eos_token_id=self.text_config.eos_token_id,
)
inputs_embeds = self.text_token_embedding(input_ids)
position_ids = attention_mask.cumsum(-1) - 1
position_embeds = self.text_position_embedding(position_ids)
text_embeds = inputs_embeds + position_embeds
if self.gradient_checkpointing and self.training:
# process each log-mel spectrogram into a single vector
mel_spec = torch.utils.checkpoint.checkpoint(self.mel_conv, input_features)
for i, mel_attn_block in enumerate(self.mel_attn_blocks):
residual_mel_spec = mel_spec.transpose(1, 2)
mel_spec = torch.utils.checkpoint.checkpoint(self.group_norms[i], mel_spec).transpose(1, 2)
mel_spec = torch.utils.checkpoint.checkpoint(mel_attn_block, mel_spec)[0] + residual_mel_spec
mel_spec = mel_spec.transpose(1, 2)
else:
# process each log-mel spectrogram into a single vector
mel_spec = self.mel_conv(input_features)
for i, mel_attn_block in enumerate(self.mel_attn_blocks):
residual_mel_spec = mel_spec.transpose(1, 2)
mel_spec = self.group_norms[i](mel_spec).transpose(1, 2)
mel_spec = mel_attn_block(mel_spec)[0] + residual_mel_spec
mel_spec = mel_spec.transpose(1, 2)
mel_spec = mel_spec[:, :, 0]
mel_spec = mel_spec.unsqueeze(1)
# repeat if there is either (1 text vs N audios) or (N texts vs 1 audio)
if text_embeds.shape[0] == 1 and mel_spec.shape[0] != 1:
text_embeds = text_embeds.repeat(mel_spec.shape[0], 1, 1)
elif text_embeds.shape[0] != 1 and mel_spec.shape[0] == 1:
mel_spec = mel_spec.repeat(text_embeds.shape[0], 1, 1)
# If there is N texts and M audios we will raise error since the number of text and audio must be same.
elif text_embeds.shape[0] != mel_spec.shape[0]:
raise ValueError(
f"The number of texts and number of audios must be same. "
f"Found {text_embeds.shape[0]} texts vs {mel_spec.shape[0]} audios"
)
return torch.concat([mel_spec, text_embeds], dim=1)
@auto_docstring
| ClvpConditioningEncoder |
python | python-excel__xlrd | tests/test_biffh.py | {
"start": 272,
"end": 575
} | class ____(unittest.TestCase):
def test_hex_char_dump(self):
sio = StringIO()
biffh.hex_char_dump(b"abc\0e\01", 0, 6, fout=sio)
s = sio.getvalue()
assert "61 62 63 00 65 01" in s, s
assert "abc~e?" in s, s
if __name__=='__main__':
unittest.main()
| TestHexDump |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/history.py | {
"start": 8066,
"end": 9441
} | class ____(History):
"""
:class:`.History` class that stores all strings in a file.
"""
def __init__(self, filename: _StrOrBytesPath) -> None:
self.filename = filename
super().__init__()
def load_history_strings(self) -> Iterable[str]:
strings: list[str] = []
lines: list[str] = []
def add() -> None:
if lines:
# Join and drop trailing newline.
string = "".join(lines)[:-1]
strings.append(string)
if os.path.exists(self.filename):
with open(self.filename, "rb") as f:
for line_bytes in f:
line = line_bytes.decode("utf-8", errors="replace")
if line.startswith("+"):
lines.append(line[1:])
else:
add()
lines = []
add()
# Reverse the order, because newest items have to go first.
return reversed(strings)
def store_string(self, string: str) -> None:
# Save to file.
with open(self.filename, "ab") as f:
def write(t: str) -> None:
f.write(t.encode("utf-8"))
write(f"\n# {datetime.datetime.now()}\n")
for line in string.split("\n"):
write(f"+{line}\n")
| FileHistory |
python | doocs__leetcode | solution/0300-0399/0384.Shuffle an Array/Solution.py | {
"start": 0,
"end": 580
} | class ____:
def __init__(self, nums: List[int]):
self.nums = nums
self.original = nums.copy()
def reset(self) -> List[int]:
self.nums = self.original.copy()
return self.nums
def shuffle(self) -> List[int]:
for i in range(len(self.nums)):
j = random.randrange(i, len(self.nums))
self.nums[i], self.nums[j] = self.nums[j], self.nums[i]
return self.nums
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.reset()
# param_2 = obj.shuffle()
| Solution |
python | joke2k__faker | tests/providers/test_currency.py | {
"start": 11521,
"end": 11946
} | class ____:
"""Test fr_FR currency provider"""
num_samples = 100
@classmethod
def setup_class(cls):
from faker.providers.currency.fr_FR import Provider as FrFrCurrencyProvider
cls.provider = FrFrCurrencyProvider
def test_pricetag(self, faker, num_samples):
for _ in range(num_samples):
pricetag = faker.pricetag()
assert isinstance(pricetag, str)
| TestFrFr |
python | openai__gym | gym/envs/mujoco/inverted_double_pendulum_v4.py | {
"start": 109,
"end": 9332
} | class ____(MujocoEnv, utils.EzPickle):
"""
### Description
This environment originates from control theory and builds on the cartpole
environment based on the work done by Barto, Sutton, and Anderson in
["Neuronlike adaptive elements that can solve difficult learning control problems"](https://ieeexplore.ieee.org/document/6313077),
powered by the Mujoco physics simulator - allowing for more complex experiments
(such as varying the effects of gravity or constraints). This environment involves a cart that can
moved linearly, with a pole fixed on it and a second pole fixed on the other end of the first one
(leaving the second pole as the only one with one free end). The cart can be pushed left or right,
and the goal is to balance the second pole on top of the first pole, which is in turn on top of the
cart, by applying continuous forces on the cart.
### Action Space
The agent take a 1-element vector for actions.
The action space is a continuous `(action)` in `[-1, 1]`, where `action` represents the
numerical force applied to the cart (with magnitude representing the amount of force and
sign representing the direction)
| Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Unit |
|-----|---------------------------|-------------|-------------|----------------------------------|-------|-----------|
| 0 | Force applied on the cart | -1 | 1 | slider | slide | Force (N) |
### Observation Space
The state space consists of positional values of different body parts of the pendulum system,
followed by the velocities of those individual parts (their derivatives) with all the
positions ordered before all the velocities.
The observation is a `ndarray` with shape `(11,)` where the elements correspond to the following:
| Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Unit |
| --- | ----------------------------------------------------------------- | ---- | --- | -------------------------------- | ----- | ------------------------ |
| 0 | position of the cart along the linear surface | -Inf | Inf | slider | slide | position (m) |
| 1 | sine of the angle between the cart and the first pole | -Inf | Inf | sin(hinge) | hinge | unitless |
| 2 | sine of the angle between the two poles | -Inf | Inf | sin(hinge2) | hinge | unitless |
| 3 | cosine of the angle between the cart and the first pole | -Inf | Inf | cos(hinge) | hinge | unitless |
| 4 | cosine of the angle between the two poles | -Inf | Inf | cos(hinge2) | hinge | unitless |
| 5 | velocity of the cart | -Inf | Inf | slider | slide | velocity (m/s) |
| 6 | angular velocity of the angle between the cart and the first pole | -Inf | Inf | hinge | hinge | angular velocity (rad/s) |
| 7 | angular velocity of the angle between the two poles | -Inf | Inf | hinge2 | hinge | angular velocity (rad/s) |
| 8 | constraint force - 1 | -Inf | Inf | | | Force (N) |
| 9 | constraint force - 2 | -Inf | Inf | | | Force (N) |
| 10 | constraint force - 3 | -Inf | Inf | | | Force (N) |
There is physical contact between the robots and their environment - and Mujoco
attempts at getting realisitic physics simulations for the possible physical contact
dynamics by aiming for physical accuracy and computational efficiency.
There is one constraint force for contacts for each degree of freedom (3).
The approach and handling of constraints by Mujoco is unique to the simulator
and is based on their research. Once can find more information in their
[*documentation*](https://mujoco.readthedocs.io/en/latest/computation.html)
or in their paper
["Analytically-invertible dynamics with contacts and constraints: Theory and implementation in MuJoCo"](https://homes.cs.washington.edu/~todorov/papers/TodorovICRA14.pdf).
### Rewards
The reward consists of two parts:
- *alive_bonus*: The goal is to make the second inverted pendulum stand upright
(within a certain angle limit) as long as possible - as such a reward of +10 is awarded
for each timestep that the second pole is upright.
- *distance_penalty*: This reward is a measure of how far the *tip* of the second pendulum
(the only free end) moves, and it is calculated as
*0.01 * x<sup>2</sup> + (y - 2)<sup>2</sup>*, where *x* is the x-coordinate of the tip
and *y* is the y-coordinate of the tip of the second pole.
- *velocity_penalty*: A negative reward for penalising the agent if it moves too
fast *0.001 * v<sub>1</sub><sup>2</sup> + 0.005 * v<sub>2</sub> <sup>2</sup>*
The total reward returned is ***reward*** *=* *alive_bonus - distance_penalty - velocity_penalty*
### Starting State
All observations start in state
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) with a uniform noise in the range
of [-0.1, 0.1] added to the positional values (cart position and pole angles) and standard
normal force with a standard deviation of 0.1 added to the velocity values for stochasticity.
### Episode End
The episode ends when any of the following happens:
1.Truncation: The episode duration reaches 1000 timesteps.
2.Termination: Any of the state space values is no longer finite.
3.Termination: The y_coordinate of the tip of the second pole *is less than or equal* to 1. The maximum standing height of the system is 1.196 m when all the parts are perpendicularly vertical on top of each other).
### Arguments
No additional arguments are currently supported.
```
env = gym.make('InvertedDoublePendulum-v4')
```
There is no v3 for InvertedPendulum, unlike the robot environments where a v3 and
beyond take gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc.
### Version History
* v4: all mujoco environments now use the mujoco bindings in mujoco>=2.1.3
* v3: support for gym.make kwargs such as xml_file, ctrl_cost_weight, reset_noise_scale etc. rgb rendering comes from tracking camera (so agent does not run away from screen)
* v2: All continuous control environments now use mujoco_py >= 1.50
* v1: max_time_steps raised to 1000 for robot based tasks (including inverted pendulum)
* v0: Initial versions release (1.0.0)
"""
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
],
"render_fps": 20,
}
def __init__(self, **kwargs):
observation_space = Box(low=-np.inf, high=np.inf, shape=(11,), dtype=np.float64)
MujocoEnv.__init__(
self,
"inverted_double_pendulum.xml",
5,
observation_space=observation_space,
**kwargs
)
utils.EzPickle.__init__(self, **kwargs)
def step(self, action):
self.do_simulation(action, self.frame_skip)
ob = self._get_obs()
x, _, y = self.data.site_xpos[0]
dist_penalty = 0.01 * x**2 + (y - 2) ** 2
v1, v2 = self.data.qvel[1:3]
vel_penalty = 1e-3 * v1**2 + 5e-3 * v2**2
alive_bonus = 10
r = alive_bonus - dist_penalty - vel_penalty
terminated = bool(y <= 1)
if self.render_mode == "human":
self.render()
return ob, r, terminated, False, {}
def _get_obs(self):
return np.concatenate(
[
self.data.qpos[:1], # cart x pos
np.sin(self.data.qpos[1:]), # link angles
np.cos(self.data.qpos[1:]),
np.clip(self.data.qvel, -10, 10),
np.clip(self.data.qfrc_constraint, -10, 10),
]
).ravel()
def reset_model(self):
self.set_state(
self.init_qpos
+ self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq),
self.init_qvel + self.np_random.standard_normal(self.model.nv) * 0.1,
)
return self._get_obs()
def viewer_setup(self):
assert self.viewer is not None
v = self.viewer
v.cam.trackbodyid = 0
v.cam.distance = self.model.stat.extent * 0.5
v.cam.lookat[2] = 0.12250000000000005 # v.model.stat.center[2]
| InvertedDoublePendulumEnv |
python | PyCQA__pylint | pylint/pyreverse/printer.py | {
"start": 566,
"end": 770
} | class ____(Enum):
INHERITS = "inherits"
COMPOSITION = "composition"
ASSOCIATION = "association"
AGGREGATION = "aggregation"
USES = "uses"
TYPE_DEPENDENCY = "type_dependency"
| EdgeType |
python | aio-libs__aiohttp | aiohttp/_websocket/models.py | {
"start": 1359,
"end": 1688
} | class ____(NamedTuple):
data: bytes
size: int
extra: str | None = None
type: Literal[WSMsgType.BINARY] = WSMsgType.BINARY
def json(
self, *, loads: Callable[[str | bytes | bytearray], Any] = json.loads
) -> Any:
"""Return parsed JSON data."""
return loads(self.data)
| WSMessageBinary |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingFalsy1.py | {
"start": 2006,
"end": 2180
} | class ____(NT1):
pass
def func9(val: NT2) -> None:
if val:
reveal_type(val, expected_text="NT2")
else:
reveal_type(val, expected_text="Never")
| NT2 |
python | getsentry__sentry | src/sentry/rules/conditions/event_frequency.py | {
"start": 32678,
"end": 39322
} | class ____(BaseEventFrequencyCondition):
id = "sentry.rules.conditions.event_frequency.EventFrequencyPercentCondition"
label = "The issue affects more than {value} percent of sessions in {interval}"
logger = logging.getLogger("sentry.rules.event_frequency")
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.intervals = PERCENT_INTERVALS
super().__init__(*args, **kwargs)
# Override form fields interval to hide 1 min option from ui, but leave
# it available to process existing 1m rules.
self.form_fields["interval"] = {
"type": "choice",
"choices": [
(key, label)
for key, (label, duration) in sorted(
PERCENT_INTERVALS_TO_DISPLAY.items(),
key=lambda key____label__duration: key____label__duration[1][1],
)
],
}
def get_session_count(
self, project_id: int, environment_id: int, start: datetime, end: datetime
) -> int:
cache_key = f"r.c.spc:{project_id}-{environment_id}"
session_count_last_hour = cache.get(cache_key)
if session_count_last_hour is None:
with options_override({"consistent": False}):
session_count_last_hour = release_health.backend.get_project_sessions_count(
project_id=project_id,
environment_id=environment_id,
rollup=60,
start=end - timedelta(minutes=60),
end=end,
)
cache.set(cache_key, session_count_last_hour, 600)
return session_count_last_hour
def get_session_interval(self, session_count: int, interval: str) -> int | None:
if session_count >= MIN_SESSIONS_TO_FIRE:
interval_in_minutes = PERCENT_INTERVALS[interval][1].total_seconds() // 60
return int(session_count / (60 / interval_in_minutes))
return None
def query_hook(
self,
event: GroupEvent,
start: datetime,
end: datetime,
environment_id: int,
) -> float:
project_id = event.project_id
session_count_last_hour = self.get_session_count(project_id, environment_id, start, end)
avg_sessions_in_interval = self.get_session_interval(
session_count_last_hour, self.get_option("interval")
)
if avg_sessions_in_interval:
issue_count = self.get_snuba_query_result(
tsdb_function=self.tsdb.get_sums,
keys=[event.group_id],
group_id=event.group.id,
organization_id=event.group.project.organization_id,
model=get_issue_tsdb_group_model(event.group.issue_category),
start=start,
end=end,
environment_id=environment_id,
referrer_suffix="alert_event_frequency_percent",
group_on_time=False,
project_ids=[event.group.project_id],
)[event.group_id]
if issue_count > avg_sessions_in_interval:
# We want to better understand when and why this is happening, so we're logging it for now
self.logger.info(
"EventFrequencyPercentCondition.query_hook",
extra={
"issue_count": issue_count,
"project_id": project_id,
"avg_sessions_in_interval": avg_sessions_in_interval,
},
)
percent: float = 100 * round(issue_count / avg_sessions_in_interval, 4)
return percent
return 0
def batch_query_hook(
self,
group_ids: set[int],
start: datetime,
end: datetime,
environment_id: int,
group_on_time: bool = False,
) -> dict[int, int | float]:
groups = Group.objects.filter(id__in=group_ids).values(
"id", "type", "project_id", "project__organization_id"
)
project_id = self.get_value_from_groups(groups, "project_id")
if not project_id:
return {group["id"]: 0 for group in groups}
session_count_last_hour = self.get_session_count(project_id, environment_id, start, end)
avg_sessions_in_interval = self.get_session_interval(
session_count_last_hour, self.get_option("interval")
)
if not avg_sessions_in_interval:
return {group["id"]: 0 for group in groups}
error_issue_ids, generic_issue_ids = self.get_error_and_generic_group_ids(groups)
organization_id = self.get_value_from_groups(groups, "project__organization_id")
if not (error_issue_ids and organization_id):
return {group["id"]: 0 for group in groups}
error_project_ids = [g["project_id"] for g in groups if g["id"] in error_issue_ids]
error_issue_count = self.get_chunked_result(
tsdb_function=self.tsdb.get_sums,
model=get_issue_tsdb_group_model(GroupCategory.ERROR),
group_ids=error_issue_ids,
organization_id=organization_id,
start=start,
end=end,
environment_id=environment_id,
referrer_suffix="batch_alert_event_frequency_percent",
group_on_time=group_on_time,
project_ids=error_project_ids,
)
batch_percents: dict[int, int | float] = {}
for group_id, count in error_issue_count.items():
percent: float = 100 * round(count / avg_sessions_in_interval, 4)
batch_percents[group_id] = percent
# We do not have sessions for non-error issue types
for group in generic_issue_ids:
batch_percents[group] = 0
return batch_percents
def passes_activity_frequency(
self, activity: ConditionActivity, buckets: dict[datetime, int]
) -> bool:
raise NotImplementedError
def get_form_instance(self) -> EventFrequencyPercentForm:
return EventFrequencyPercentForm(self.data)
def bucket_count(start: datetime, end: datetime, buckets: dict[datetime, int]) -> int:
rounded_end = round_to_five_minute(end)
rounded_start = round_to_five_minute(start)
count = buckets.get(rounded_end, 0) - buckets.get(rounded_start, 0)
return count
def percent_increase(result: int | float, comparison_result: int | float) -> int:
return (
int(max(0, ((result - comparison_result) / comparison_result * 100)))
if comparison_result > 0
else 0
)
| EventFrequencyPercentCondition |
python | PrefectHQ__prefect | tests/utilities/test_importtools.py | {
"start": 596,
"end": 11546
} | class ____:
pass
# Note we use the hosted API to avoid Postgres engine caching errors
pytest.mark.usefixtures("hosted_orion")
@pytest.mark.parametrize(
"obj,expected",
[
(to_qualified_name, "prefect.utilities.importtools.to_qualified_name"),
(prefect.tasks.Task, "prefect.tasks.Task"),
(prefect.tasks.Task.__call__, "prefect.tasks.Task.__call__"),
(lambda x: x + 1, "tests.utilities.test_importtools.<lambda>"),
(my_fn, "tests.utilities.test_importtools.my_fn"),
],
)
def test_to_qualified_name(obj, expected):
assert to_qualified_name(obj) == expected
@pytest.mark.parametrize("obj", [to_qualified_name, prefect.tasks.Task, my_fn, Foo])
def test_to_and_from_qualified_name_roundtrip(obj):
assert from_qualified_name(to_qualified_name(obj)) == obj
@pytest.fixture
def pop_docker_module():
# Allows testing of `lazy_import` on a clean sys
original = sys.modules.pop("docker")
try:
yield
finally:
sys.modules["docker"] = original
@pytest.mark.usefixtures("pop_docker_module")
def test_lazy_import():
docker: ModuleType("docker") = lazy_import("docker")
assert isinstance(docker, importlib.util._LazyModule)
assert isinstance(docker, ModuleType)
assert callable(docker.from_env)
def test_lazy_import_fails_for_missing_modules():
with pytest.raises(ModuleNotFoundError, match="flibbidy"):
lazy_import("flibbidy", error_on_import=True)
def test_lazy_import_allows_deferred_failure_for_missing_module():
module = lazy_import("flibbidy", error_on_import=False)
assert isinstance(module, ModuleType)
with pytest.raises(ModuleNotFoundError, match="No module named 'flibbidy'") as exc:
module.foo
assert "No module named 'flibbidy'" in exc.exconly(), (
"Exception should contain error message"
)
def test_lazy_import_includes_help_message_for_missing_modules():
with pytest.raises(
ModuleNotFoundError, match="No module named 'flibbidy'.\nHello world"
):
lazy_import("flibbidy", error_on_import=True, help_message="Hello world")
def test_lazy_import_includes_help_message_in_deferred_failure():
module = lazy_import(
"flibbidy",
error_on_import=False,
help_message="No module named 'flibbidy'.*Hello world",
)
assert isinstance(module, ModuleType)
with pytest.raises(
ModuleNotFoundError, match="No module named 'flibbidy'.*Hello world"
):
module.foo
@pytest.mark.parametrize(
"working_directory,script_path",
[
# Working directory is not necessary for these imports to work
(
__development_base_path__,
TEST_PROJECTS_DIR / "flat-project" / "implicit_relative.py",
),
(
__development_base_path__,
TEST_PROJECTS_DIR / "flat-project" / "explicit_relative.py",
),
(
__development_base_path__,
TEST_PROJECTS_DIR / "nested-project" / "implicit_relative.py",
),
(
__development_base_path__,
TEST_PROJECTS_DIR / "nested-project" / "explicit_relative.py",
),
# They also work with the working directory set to the project
(TEST_PROJECTS_DIR / "flat-project", "implicit_relative.py"),
(TEST_PROJECTS_DIR / "flat-project", "explicit_relative.py"),
(TEST_PROJECTS_DIR / "nested-project", "implicit_relative.py"),
(TEST_PROJECTS_DIR / "nested-project", "explicit_relative.py"),
# The tree structure requires the working directory to be at the base
(TEST_PROJECTS_DIR / "tree-project", Path("imports") / "implicit_relative.py"),
# below are cases that used to fail; see https://github.com/PrefectHQ/prefect/pull/17524
(
TEST_PROJECTS_DIR,
Path("tree-project") / "imports" / "implicit_relative.py",
),
(TEST_PROJECTS_DIR / "tree-project" / "imports", "implicit_relative.py"),
],
)
def test_import_object_from_script_with_relative_imports(
working_directory, script_path
):
with tmpchdir(working_directory):
foobar = import_object(f"{script_path}:foobar")
assert callable(foobar), f"Expected callable, got {foobar!r}"
assert foobar() == "foobar"
@pytest.mark.parametrize(
"working_directory,script_path",
[
# Explicit relative imports cannot go up levels with script-based imports
(TEST_PROJECTS_DIR / "tree-project", Path("imports") / "explicit_relative.py"),
],
)
def test_import_object_from_script_with_relative_imports_expected_failures(
working_directory, script_path
):
with tmpchdir(working_directory):
with pytest.raises(ScriptError):
import_object(f"{script_path}:foobar")
# Python would raise the same error if running `python <script>`
with pytest.raises(ImportError):
runpy.run_path(str(script_path))
@pytest.mark.parametrize(
"working_directory,import_path",
[
# Implicit relative imports work if the working directory is the project
(TEST_PROJECTS_DIR / "flat-project", "implicit_relative.foobar"),
(TEST_PROJECTS_DIR / "nested-project", "implicit_relative.foobar"),
(TEST_PROJECTS_DIR / "tree-project", "imports.implicit_relative.foobar"),
# below are cases that used to fail; see https://github.com/PrefectHQ/prefect/pull/17524
(TEST_PROJECTS_DIR, "implicit_relative.foobar"),
],
)
def test_import_object_from_module_with_relative_imports(
working_directory, import_path
):
with tmpchdir(working_directory):
foobar = import_object(import_path)
assert foobar() == "foobar"
@pytest.mark.parametrize(
"working_directory,import_path",
[
# Explicit relative imports not expected to work
(TEST_PROJECTS_DIR / "flat-project", "explicit_relative.foobar"),
(TEST_PROJECTS_DIR / "nested-project", "explicit_relative.foobar"),
(TEST_PROJECTS_DIR / "tree-project", "imports.explicit_relative.foobar"),
],
)
def test_import_object_from_module_with_relative_imports_expected_failures(
working_directory, import_path
):
with tmpchdir(working_directory):
with pytest.raises((ValueError, ImportError)):
import_object(import_path)
# Python would raise the same error
with pytest.raises((ValueError, ImportError)):
runpy.run_module(import_path)
def test_safe_load_namespace():
source_code = dedent(
"""
import math
from datetime import datetime
from pydantic import BaseModel
class MyModel(BaseModel):
x: int
def my_fn():
return 42
x = 10
y = math.sqrt(x)
now = datetime.now()
"""
)
namespace = safe_load_namespace(source_code)
# module-level imports should be present
assert "math" in namespace
assert "datetime" in namespace
assert "BaseModel" in namespace
# module-level variables should be present
assert "x" in namespace
assert "y" in namespace
assert "now" in namespace
# module-level classes should be present
assert "MyModel" in namespace
# module-level functions should be present
assert "my_fn" in namespace
assert namespace["MyModel"].__name__ == "MyModel"
def test_safe_load_namespace_ignores_import_errors():
source_code = dedent(
"""
import flibbidy
from pydantic import BaseModel
class MyModel(BaseModel):
x: int
"""
)
# should not raise an ImportError
namespace = safe_load_namespace(source_code)
assert "flibbidy" not in namespace
# other imports and classes should be present
assert "BaseModel" in namespace
assert "MyModel" in namespace
assert namespace["MyModel"].__name__ == "MyModel"
def test_safe_load_namespace_ignore_class_declaration_errors():
source_code = dedent(
"""
from fake_pandas import DataFrame
class CoolDataFrame(DataFrame):
pass
"""
)
# should not raise any errors
namespace = safe_load_namespace(source_code)
assert "DataFrame" not in namespace
assert "CoolDataFrame" not in namespace
def test_safe_load_namespace_ignores_code_in_if_name_equals_main_block():
source_code = dedent(
"""
import math
x = 10
y = math.sqrt(x)
if __name__ == "__main__":
z = 10
"""
)
# should not raise any errors
namespace = safe_load_namespace(source_code)
assert "x" in namespace
assert "y" in namespace
assert "z" not in namespace
def test_safe_load_namespace_does_not_execute_function_body():
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/14402
"""
source_code = dedent(
"""
you_done_goofed = False
def my_fn():
nonlocal you_done_goofed
you_done_goofed = True
def my_other_fn():
foo = my_fn()
"""
)
# should not raise any errors
namespace = safe_load_namespace(source_code)
assert not namespace["you_done_goofed"]
def test_safe_load_namespace_implicit_relative_imports():
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/15352
"""
path = TEST_PROJECTS_DIR / "flat-project" / "implicit_relative.py"
source_code = path.read_text()
namespace = safe_load_namespace(source_code, filepath=str(path))
assert "get_foo" in namespace
assert "get_bar" in namespace
def test_concurrent_script_loading(tmpdir: Path):
"""Test that loading multiple scripts concurrently is thread-safe.
This is a regression test for https://github.com/PrefectHQ/prefect/issues/16452
"""
script_contents = """
def hello():
return "Hello from {}"
"""
scripts: list[str] = []
for i in range(5):
path = tmpdir / f"script_{i}.py"
path.write_text(script_contents.format(i), encoding="utf-8")
scripts.append(str(path))
loaded_modules: list[ModuleType] = []
errors: list[Exception] = []
async def load_script(path: str) -> str:
try:
module = load_script_as_module(path)
loaded_modules.append(module)
return module.hello()
except Exception as e:
errors.append(e)
raise
async def run_concurrent_loads():
futures = [load_script(script) for script in scripts]
return await asyncio.gather(*futures)
results = asyncio.run(run_concurrent_loads())
assert not errors, f"Errors occurred during concurrent loading: {errors}"
assert len(results) == 5
assert sorted(results) == [f"Hello from {i}" for i in range(5)]
module_names = [m.__name__ for m in loaded_modules]
assert len(module_names) == len(set(module_names)), "Duplicate module names found"
| Foo |
python | psf__black | tests/data/cases/class_methods_new_line.py | {
"start": 30,
"end": 68
} | class ____:
a = 1
| ClassWithSingleField |
python | keras-team__keras | keras/src/layers/preprocessing/mel_spectrogram.py | {
"start": 250,
"end": 14697
} | class ____(DataLayer):
"""A preprocessing layer to convert raw audio signals to Mel spectrograms.
This layer takes `float32`/`float64` single or batched audio signal as
inputs and computes the Mel spectrogram using Short-Time Fourier Transform
and Mel scaling. The input should be a 1D (unbatched) or 2D (batched) tensor
representing audio signals. The output will be a 2D or 3D tensor
representing Mel spectrograms.
A spectrogram is an image-like representation that shows the frequency
spectrum of a signal over time. It uses x-axis to represent time, y-axis to
represent frequency, and each pixel to represent intensity.
Mel spectrograms are a special type of spectrogram that use the mel scale,
which approximates how humans perceive sound. They are commonly used in
speech and music processing tasks like speech recognition, speaker
identification, and music genre classification.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
References:
- [Spectrogram](https://en.wikipedia.org/wiki/Spectrogram),
- [Mel scale](https://en.wikipedia.org/wiki/Mel_scale).
Args:
fft_length: Integer, size of the FFT window.
sequence_stride: Integer, number of samples between successive STFT
columns.
sequence_length: Integer, size of the window used for applying
`window` to each audio frame. If `None`, defaults to `fft_length`.
window: String, name of the window function to use. Available values
are `"hann"` and `"hamming"`. If `window` is a tensor, it will be
used directly as the window and its length must be
`sequence_length`. If `window` is `None`, no windowing is
used. Defaults to `"hann"`.
sampling_rate: Integer, sample rate of the input signal.
num_mel_bins: Integer, number of mel bins to generate.
min_freq: Float, minimum frequency of the mel bins.
max_freq: Float, maximum frequency of the mel bins.
If `None`, defaults to `sampling_rate / 2`.
power_to_db: If True, convert the power spectrogram to decibels.
top_db: Float, minimum negative cut-off `max(10 * log10(S)) - top_db`.
mag_exp: Float, exponent for the magnitude spectrogram.
1 for magnitude, 2 for power, etc. Default is 2.
ref_power: Float, the power is scaled relative to it
`10 * log10(S / ref_power)`.
min_power: Float, minimum value for power and `ref_power`.
Examples:
**Unbatched audio signal**
>>> layer = keras.layers.MelSpectrogram(num_mel_bins=64,
... sampling_rate=8000,
... sequence_stride=256,
... fft_length=2048)
>>> layer(keras.random.uniform(shape=(16000,))).shape
(64, 63)
**Batched audio signal**
>>> layer = keras.layers.MelSpectrogram(num_mel_bins=80,
... sampling_rate=8000,
... sequence_stride=128,
... fft_length=2048)
>>> layer(keras.random.uniform(shape=(2, 16000))).shape
(2, 80, 125)
Input shape:
1D (unbatched) or 2D (batched) tensor with shape:`(..., samples)`.
Output shape:
2D (unbatched) or 3D (batched) tensor with
shape:`(..., num_mel_bins, time)`.
"""
def __init__(
self,
fft_length=2048,
sequence_stride=512,
sequence_length=None,
window="hann",
sampling_rate=16000,
num_mel_bins=128,
min_freq=20.0,
max_freq=None,
power_to_db=True,
top_db=80.0,
mag_exp=2.0,
min_power=1e-10,
ref_power=1.0,
**kwargs,
):
self.fft_length = fft_length
self.sequence_stride = sequence_stride
self.sequence_length = sequence_length or fft_length
self.window = window
self.sampling_rate = sampling_rate
self.num_mel_bins = num_mel_bins
self.min_freq = min_freq
self.max_freq = max_freq or int(sampling_rate / 2)
self.power_to_db = power_to_db
self.top_db = top_db
self.mag_exp = mag_exp
self.min_power = min_power
self.ref_power = ref_power
super().__init__(**kwargs)
def call(self, inputs):
dtype = (
"float32"
if self.compute_dtype not in ["float32", "float64"]
else self.compute_dtype
) # jax, tf supports only "float32" and "float64" in stft
inputs = self.backend.convert_to_tensor(inputs, dtype=dtype)
outputs = self._spectrogram(inputs)
outputs = self._melscale(outputs)
if self.power_to_db:
outputs = self._dbscale(outputs)
# swap time & freq axis to have shape of (..., num_mel_bins, time)
outputs = self.backend.numpy.swapaxes(outputs, -1, -2)
outputs = self.backend.cast(outputs, self.compute_dtype)
return outputs
def _spectrogram(self, inputs):
real, imag = self.backend.math.stft(
inputs,
sequence_length=self.sequence_length,
sequence_stride=self.sequence_stride,
fft_length=self.fft_length,
window=self.window,
center=True,
)
# abs of complex = sqrt(real^2 + imag^2)
spec = self.backend.numpy.sqrt(
self.backend.numpy.add(
self.backend.numpy.square(real), self.backend.numpy.square(imag)
)
)
spec = self.backend.numpy.power(spec, self.mag_exp)
return spec
def _melscale(self, inputs):
matrix = self.linear_to_mel_weight_matrix(
num_mel_bins=self.num_mel_bins,
num_spectrogram_bins=self.backend.shape(inputs)[-1],
sampling_rate=self.sampling_rate,
lower_edge_hertz=self.min_freq,
upper_edge_hertz=self.max_freq,
)
return self.backend.numpy.tensordot(inputs, matrix, axes=1)
def _dbscale(self, inputs):
log_spec = 10.0 * (
self.backend.numpy.log10(
self.backend.numpy.maximum(inputs, self.min_power)
)
)
ref_value = self.backend.numpy.abs(
self.backend.convert_to_tensor(self.ref_power)
)
log_spec -= 10.0 * self.backend.numpy.log10(
self.backend.numpy.maximum(ref_value, self.min_power)
)
log_spec = self.backend.numpy.maximum(
log_spec, self.backend.numpy.max(log_spec) - self.top_db
)
return log_spec
def _hertz_to_mel(self, frequencies_hertz):
"""Converts frequencies in `frequencies_hertz` in Hertz to the
mel scale.
Args:
frequencies_hertz: A tensor of frequencies in Hertz.
name: An optional name for the operation.
Returns:
A tensor of the same shape and type of `frequencies_hertz`
containing frequencies in the mel scale.
"""
return _MEL_HIGH_FREQUENCY_Q * self.backend.numpy.log(
1.0 + (frequencies_hertz / _MEL_BREAK_FREQUENCY_HERTZ)
)
def linear_to_mel_weight_matrix(
self,
num_mel_bins=20,
num_spectrogram_bins=129,
sampling_rate=8000,
lower_edge_hertz=125.0,
upper_edge_hertz=3800.0,
dtype="float32",
):
"""Returns a matrix to warp linear scale spectrograms to the mel scale.
Returns a weight matrix that can be used to re-weight a tensor
containing `num_spectrogram_bins` linearly sampled frequency information
from `[0, sampling_rate / 2]` into `num_mel_bins` frequency information
from `[lower_edge_hertz, upper_edge_hertz]` on the mel scale.
This function follows the [Hidden Markov Model Toolkit (HTK)](
http://htk.eng.cam.ac.uk/) convention, defining the mel scale in
terms of a frequency in hertz according to the following formula:
```mel(f) = 2595 * log10( 1 + f/700)```
In the returned matrix, all the triangles (filterbanks) have a peak
value of 1.0.
For example, the returned matrix `A` can be used to right-multiply a
spectrogram `S` of shape `[frames, num_spectrogram_bins]` of linear
scale spectrum values (e.g. STFT magnitudes) to generate a
"mel spectrogram" `M` of shape `[frames, num_mel_bins]`.
```
# `S` has shape [frames, num_spectrogram_bins]
# `M` has shape [frames, num_mel_bins]
M = keras.ops.matmul(S, A)
```
The matrix can be used with `keras.ops.tensordot` to convert an
arbitrary rank `Tensor` of linear-scale spectral bins into the
mel scale.
```
# S has shape [..., num_spectrogram_bins].
# M has shape [..., num_mel_bins].
M = keras.ops.tensordot(S, A, 1)
```
References:
- [Mel scale (Wikipedia)](https://en.wikipedia.org/wiki/Mel_scale)
Args:
num_mel_bins: Python int. How many bands in the resulting
mel spectrum.
num_spectrogram_bins: An integer `Tensor`. How many bins there are
in the source spectrogram data, which is understood to be
`fft_size // 2 + 1`, i.e. the spectrogram only contains the
nonredundant FFT bins.
sampling_rate: An integer or float `Tensor`. Samples per second of
the input signal used to create the spectrogram. Used to figure
out the frequencies corresponding to each spectrogram bin,
which dictates how they are mapped into the mel scale.
lower_edge_hertz: Python float. Lower bound on the frequencies to be
included in the mel spectrum. This corresponds to the lower
edge of the lowest triangular band.
upper_edge_hertz: Python float. The desired top edge of the highest
frequency band.
dtype: The `DType` of the result matrix. Must be a floating point
type.
Returns:
A tensor of shape `[num_spectrogram_bins, num_mel_bins]`.
"""
# This function can be constant folded by graph optimization since
# there are no Tensor inputs.
sampling_rate = self.backend.cast(sampling_rate, dtype)
lower_edge_hertz = self.backend.convert_to_tensor(
lower_edge_hertz,
dtype,
)
upper_edge_hertz = self.backend.convert_to_tensor(
upper_edge_hertz,
dtype,
)
zero = self.backend.convert_to_tensor(0.0, dtype)
# HTK excludes the spectrogram DC bin.
bands_to_zero = 1
nyquist_hertz = sampling_rate / 2.0
linear_frequencies = self.backend.numpy.linspace(
zero, nyquist_hertz, num_spectrogram_bins
)[bands_to_zero:]
spectrogram_bins_mel = self.backend.numpy.expand_dims(
self._hertz_to_mel(linear_frequencies), 1
)
# Compute num_mel_bins triples of (lower_edge, center, upper_edge). The
# center of each band is the lower and upper edge of the adjacent bands.
# Accordingly, we divide [lower_edge_hertz, upper_edge_hertz] into
# num_mel_bins + 2 pieces.
band_edges_mel = self.backend.math.extract_sequences(
self.backend.numpy.linspace(
self._hertz_to_mel(lower_edge_hertz),
self._hertz_to_mel(upper_edge_hertz),
num_mel_bins + 2,
),
sequence_length=3,
sequence_stride=1,
)
# Split the triples up and reshape them into [1, num_mel_bins] tensors.
lower_edge_mel, center_mel, upper_edge_mel = tuple(
self.backend.numpy.reshape(t, [1, num_mel_bins])
for t in self.backend.numpy.split(band_edges_mel, 3, axis=1)
)
# Calculate lower and upper slopes for every spectrogram bin.
# Line segments are linear in the mel domain, not Hertz.
lower_slopes = (spectrogram_bins_mel - lower_edge_mel) / (
center_mel - lower_edge_mel
)
upper_slopes = (upper_edge_mel - spectrogram_bins_mel) / (
upper_edge_mel - center_mel
)
# Intersect the line segments with each other and zero.
mel_weights_matrix = self.backend.numpy.maximum(
zero, self.backend.numpy.minimum(lower_slopes, upper_slopes)
)
# Re-add the zeroed lower bins we sliced out above.
return self.backend.numpy.pad(
mel_weights_matrix,
[[bands_to_zero, 0], [0, 0]],
)
def compute_output_shape(self, input_shape):
if len(input_shape) == 1:
output_shape = [
self.num_mel_bins,
(
(input_shape[0] + self.sequence_stride + 1)
// self.sequence_stride
if input_shape[0] is not None
else None
),
]
else:
output_shape = [
input_shape[0],
self.num_mel_bins,
(
(input_shape[1] + self.sequence_stride + 1)
// self.sequence_stride
if input_shape[1] is not None
else None
),
]
return output_shape
def get_config(self):
config = super().get_config()
config.update(
{
"fft_length": self.fft_length,
"sequence_stride": self.sequence_stride,
"sequence_length": self.sequence_length,
"window": self.window,
"sampling_rate": self.sampling_rate,
"num_mel_bins": self.num_mel_bins,
"min_freq": self.min_freq,
"max_freq": self.max_freq,
"power_to_db": self.power_to_db,
"top_db": self.top_db,
"mag_exp": self.mag_exp,
"min_power": self.min_power,
"ref_power": self.ref_power,
}
)
return config
| MelSpectrogram |
python | pandas-dev__pandas | pandas/tests/series/test_arithmetic.py | {
"start": 33110,
"end": 36304
} | class ____:
@pytest.mark.parametrize(
"dtype1, dtype2, dtype_expected, dtype_mul",
(
("Int64", "Int64", "Int64", "Int64"),
("float", "float", "float", "float"),
("Int64", "float", "Float64", "Float64"),
("Int64", "Float64", "Float64", "Float64"),
),
)
def test_series_inplace_ops(self, dtype1, dtype2, dtype_expected, dtype_mul):
# GH 37910
ser1 = Series([1], dtype=dtype1)
ser2 = Series([2], dtype=dtype2)
ser1 += ser2
expected = Series([3], dtype=dtype_expected)
tm.assert_series_equal(ser1, expected)
ser1 -= ser2
expected = Series([1], dtype=dtype_expected)
tm.assert_series_equal(ser1, expected)
ser1 *= ser2
expected = Series([2], dtype=dtype_mul)
tm.assert_series_equal(ser1, expected)
def test_none_comparison(request, series_with_simple_index):
series = series_with_simple_index
if len(series) < 1:
request.applymarker(
pytest.mark.xfail(reason="Test doesn't make sense on empty data")
)
# bug brought up by #1079
# changed from TypeError in 0.17.0
series.iloc[0] = np.nan
# noinspection PyComparisonWithNone
result = series == None # noqa: E711
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = series != None # noqa: E711
assert result.iat[0]
assert result.iat[1]
result = None == series # noqa: E711
assert not result.iat[0]
assert not result.iat[1]
result = None != series # noqa: E711
assert result.iat[0]
assert result.iat[1]
if lib.is_np_dtype(series.dtype, "M") or isinstance(series.dtype, DatetimeTZDtype):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
msg = "Invalid comparison"
with pytest.raises(TypeError, match=msg):
None > series
with pytest.raises(TypeError, match=msg):
series > None
else:
result = None > series
assert not result.iat[0]
assert not result.iat[1]
result = series < None
assert not result.iat[0]
assert not result.iat[1]
def test_series_varied_multiindex_alignment():
# GH 20414
s1 = Series(
range(8),
index=pd.MultiIndex.from_product(
[list("ab"), list("xy"), [1, 2]], names=["ab", "xy", "num"]
),
)
s2 = Series(
[1000 * i for i in range(1, 5)],
index=pd.MultiIndex.from_product([list("xy"), [1, 2]], names=["xy", "num"]),
)
result = s1.loc[pd.IndexSlice[["a"], :, :]] + s2
expected = Series(
[1000, 2001, 3002, 4003],
index=pd.MultiIndex.from_tuples(
[("a", "x", 1), ("a", "x", 2), ("a", "y", 1), ("a", "y", 2)],
names=["ab", "xy", "num"],
),
)
tm.assert_series_equal(result, expected)
def test_rmod_consistent_large_series():
# GH 29602
result = Series([2] * 10001).rmod(-1)
expected = Series([1] * 10001)
tm.assert_series_equal(result, expected)
| TestInplaceOperations |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/processors.py | {
"start": 1641,
"end": 2255
} | class ____(metaclass=ABCMeta):
"""
Manipulate the fragments for a given line in a
:class:`~prompt_toolkit.layout.controls.BufferControl`.
"""
@abstractmethod
def apply_transformation(
self, transformation_input: TransformationInput
) -> Transformation:
"""
Apply transformation. Returns a :class:`.Transformation` instance.
:param transformation_input: :class:`.TransformationInput` object.
"""
return Transformation(transformation_input.fragments)
SourceToDisplay = Callable[[int], int]
DisplayToSource = Callable[[int], int]
| Processor |
python | pytorch__pytorch | torch/ao/quantization/_learnable_fake_quantize.py | {
"start": 110,
"end": 7959
} | class ____(torch.ao.quantization.FakeQuantizeBase):
r"""Generalized extension of the FakeQuantize module in fake_quantize.py.
This is an extension of the FakeQuantize module in fake_quantize.py, which
supports more generalized lower-bit quantization and supports learning of the scale
and zero point parameters through backpropagation.
In addition to the attributes in the original FakeQuantize module, the _LearnableFakeQuantize
module also includes the following attributes to support quantization parameter learning.
* :attr:`channel_len` defines the length of the channel when initializing scale and zero point
for the per channel case.
* :attr:`use_grad_scaling` defines the flag for whether the gradients for scale and zero point are
normalized by the constant, which is proportional to the square root of the number of
elements in the tensor. The related literature justifying the use of this particular constant
can be found here: https://openreview.net/pdf?id=rkgO66VKDS.
* :attr:`fake_quant_enabled` defines the flag for enabling fake quantization on the output.
* :attr:`static_enabled` defines the flag for using observer's static estimation for
scale and zero point.
* :attr:`learning_enabled` defines the flag for enabling backpropagation for scale and zero point.
"""
def __init__(
self,
observer,
quant_min=0,
quant_max=255,
scale=1.0,
zero_point=0.0,
channel_len=-1,
use_grad_scaling=False,
**observer_kwargs,
):
super().__init__()
if quant_min >= quant_max:
raise AssertionError("quant_min must be strictly less than quant_max.")
self.quant_min = quant_min
self.quant_max = quant_max
# also pass quant_min and quant_max to observer
observer_kwargs["quant_min"] = quant_min
observer_kwargs["quant_max"] = quant_max
self.use_grad_scaling = use_grad_scaling
if channel_len == -1:
self.scale = Parameter(torch.tensor([scale]))
self.zero_point = Parameter(torch.tensor([zero_point]))
else:
if not (isinstance(channel_len, int) and channel_len > 0):
raise AssertionError("Channel size must be a positive integer.")
self.scale = Parameter(torch.tensor([scale] * channel_len))
self.zero_point = Parameter(torch.tensor([zero_point] * channel_len))
self.activation_post_process = observer(**observer_kwargs)
if torch.iinfo(self.activation_post_process.dtype).min > quant_min:
raise AssertionError("quant_min out of bound")
if quant_max > torch.iinfo(self.activation_post_process.dtype).max:
raise AssertionError("quant_max out of bound")
self.dtype = self.activation_post_process.dtype
self.qscheme = self.activation_post_process.qscheme
self.ch_axis = (
self.activation_post_process.ch_axis
if hasattr(self.activation_post_process, "ch_axis")
else -1
)
self.register_buffer("fake_quant_enabled", torch.tensor([1], dtype=torch.uint8))
self.register_buffer("static_enabled", torch.tensor([1], dtype=torch.uint8))
self.register_buffer("learning_enabled", torch.tensor([0], dtype=torch.uint8))
bitrange = torch.tensor(quant_max - quant_min + 1).double()
self.bitwidth = int(torch.log2(bitrange).item())
self.register_buffer("eps", torch.tensor([torch.finfo(torch.float32).eps]))
@torch.jit.export
def enable_param_learning(self):
r"""Enable parameter learning over static observer estimates.
Enables learning of quantization parameters and
disables static observer estimates. Forward path returns fake quantized X.
"""
self.toggle_qparam_learning(enabled=True).toggle_fake_quant(
enabled=True
).toggle_observer_update(enabled=False)
return self
@torch.jit.export
def enable_static_estimate(self):
"""Enable static estimates of quantization parameters.
Enables static observer estimates and disables learning of
quantization parameters. Forward path returns fake quantized X.
"""
self.toggle_qparam_learning(enabled=False).toggle_fake_quant(
enabled=True
).toggle_observer_update(enabled=True)
@torch.jit.export
def enable_static_observation(self):
"""Enable accumulation of data without updating quantization parameters.
Enables static observer accumulating data from input but doesn't
update the quantization parameters. Forward path returns the original X.
"""
self.toggle_qparam_learning(enabled=False).toggle_fake_quant(
enabled=False
).toggle_observer_update(enabled=True)
@torch.jit.export
def toggle_observer_update(self, enabled=True):
self.static_enabled[0] = int(enabled) # type: ignore[operator]
return self
@torch.jit.export
def enable_observer(self, enabled=True):
self.toggle_observer_update(enabled)
@torch.jit.export
def toggle_qparam_learning(self, enabled=True):
self.learning_enabled[0] = int(enabled) # type: ignore[operator]
self.scale.requires_grad = enabled
self.zero_point.requires_grad = enabled
return self
@torch.jit.export
def toggle_fake_quant(self, enabled=True):
self.fake_quant_enabled[0] = int(enabled)
return self
@torch.jit.export
def observe_quant_params(self):
print(f"_LearnableFakeQuantize Scale: {self.scale.detach()}")
print(f"_LearnableFakeQuantize Zero Point: {self.zero_point.detach()}")
@torch.jit.export
def calculate_qparams(self): # type: ignore[override]
self.scale.data.clamp_(min=self.eps.item()) # type: ignore[operator]
scale = self.scale.detach()
zero_point = (
self.zero_point.detach()
.round()
.clamp(self.quant_min, self.quant_max)
.long()
)
return scale, zero_point
def forward(self, X):
if self.static_enabled[0] == 1: # type: ignore[index]
self.activation_post_process(X.detach())
_scale, _zero_point = self.activation_post_process.calculate_qparams()
_scale = _scale.to(self.scale.device)
_zero_point = _zero_point.to(self.zero_point.device)
self.scale.data.copy_(_scale)
self.zero_point.data.copy_(_zero_point)
else:
self.scale.data.clamp_(min=self.eps.item()) # type: ignore[operator]
if self.fake_quant_enabled[0] == 1:
if self.qscheme in (
torch.per_channel_symmetric,
torch.per_tensor_symmetric,
):
self.zero_point.data.zero_()
if self.use_grad_scaling:
grad_factor = 1.0 / (X.numel() * self.quant_max) ** 0.5
else:
grad_factor = 1.0
if self.qscheme in (torch.per_channel_symmetric, torch.per_channel_affine):
X = torch._fake_quantize_learnable_per_channel_affine(
X,
self.scale,
self.zero_point,
self.ch_axis,
self.quant_min,
self.quant_max,
grad_factor,
)
else:
X = torch._fake_quantize_learnable_per_tensor_affine(
X,
self.scale,
self.zero_point,
self.quant_min,
self.quant_max,
grad_factor,
)
return X
| _LearnableFakeQuantize |
python | huggingface__transformers | tests/models/patchtsmixer/test_modeling_patchtsmixer.py | {
"start": 18063,
"end": 20996
} | class ____(unittest.TestCase):
def test_pretrain_head(self):
model = PatchTSMixerForPretraining.from_pretrained("ibm/patchtsmixer-etth1-pretrain").to(torch_device)
batch = prepare_batch()
torch.manual_seed(0)
with torch.no_grad():
output = model(past_values=batch["past_values"].to(torch_device)).prediction_outputs
num_patch = (
max(model.config.context_length, model.config.patch_length) - model.config.patch_length
) // model.config.patch_stride + 1
expected_shape = torch.Size(
[
64,
model.config.num_input_channels,
num_patch,
model.config.patch_length,
]
)
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor([[[-0.9106]],[[1.5326]],[[-0.8245]],[[0.7439]],[[-0.7830]],[[2.6256]],[[-0.6485]],],device=torch_device) # fmt: skip
torch.testing.assert_close(output[0, :7, :1, :1], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_forecasting_head(self):
model = PatchTSMixerForPrediction.from_pretrained("ibm/patchtsmixer-etth1-forecasting").to(torch_device)
batch = prepare_batch(file="forecast_batch.pt")
model.eval()
torch.manual_seed(0)
with torch.no_grad():
output = model(
past_values=batch["past_values"].to(torch_device),
future_values=batch["future_values"].to(torch_device),
).prediction_outputs
expected_shape = torch.Size([64, model.config.prediction_length, model.config.num_input_channels])
self.assertEqual(output.shape, expected_shape)
expected_slice = torch.tensor(
[[0.2471, 0.5036, 0.3596, 0.5401, -0.0985, 0.3423, -0.8439]],
device=torch_device,
)
torch.testing.assert_close(output[0, :1, :7], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
def test_prediction_generation(self):
model = PatchTSMixerForPrediction.from_pretrained("ibm/patchtsmixer-etth1-generate").to(torch_device)
batch = prepare_batch(file="forecast_batch.pt")
print(batch["past_values"])
torch.manual_seed(0)
model.eval()
with torch.no_grad():
outputs = model.generate(past_values=batch["past_values"].to(torch_device))
expected_shape = torch.Size((64, 1, model.config.prediction_length, model.config.num_input_channels))
self.assertEqual(outputs.sequences.shape, expected_shape)
expected_slice = torch.tensor(
[[0.4308, -0.4731, 1.3512, -0.1038, -0.4655, 1.1279, -0.7179]],
device=torch_device,
)
mean_prediction = outputs.sequences.mean(dim=1)
torch.testing.assert_close(mean_prediction[0, -1:], expected_slice, rtol=TOLERANCE, atol=TOLERANCE)
@require_torch
| PatchTSMixerModelIntegrationTests |
python | numpy__numpy | numpy/ma/tests/test_subclassing.py | {
"start": 13507,
"end": 15150
} | class ____:
"""Quantity-like class that does not inherit from ndarray"""
def __init__(self, data, units):
self.magnitude = data
self.units = units
def __getattr__(self, attr):
return getattr(self.magnitude, attr)
def test_array_no_inheritance():
data_masked = np.ma.array([1, 2, 3], mask=[True, False, True])
data_masked_units = ArrayNoInheritance(data_masked, 'meters')
# Get the masked representation of the Quantity-like class
new_array = np.ma.array(data_masked_units)
assert_equal(data_masked.data, new_array.data)
assert_equal(data_masked.mask, new_array.mask)
# Test sharing the mask
data_masked.mask = [True, False, False]
assert_equal(data_masked.mask, new_array.mask)
assert_(new_array.sharedmask)
# Get the masked representation of the Quantity-like class
new_array = np.ma.array(data_masked_units, copy=True)
assert_equal(data_masked.data, new_array.data)
assert_equal(data_masked.mask, new_array.mask)
# Test that the mask is not shared when copy=True
data_masked.mask = [True, False, True]
assert_equal([True, False, False], new_array.mask)
assert_(not new_array.sharedmask)
# Get the masked representation of the Quantity-like class
new_array = np.ma.array(data_masked_units, keep_mask=False)
assert_equal(data_masked.data, new_array.data)
# The change did not affect the original mask
assert_equal(data_masked.mask, [True, False, True])
# Test that the mask is False and not shared when keep_mask=False
assert_(not new_array.mask)
assert_(not new_array.sharedmask)
| ArrayNoInheritance |
python | redis__redis-py | tests/test_credentials.py | {
"start": 22020,
"end": 22867
} | class ____:
@pytest.mark.parametrize(
"r_entra",
[
{
"cred_provider_class": EntraIdCredentialsProvider,
"single_connection_client": False,
},
{
"cred_provider_class": EntraIdCredentialsProvider,
"single_connection_client": True,
},
{
"cred_provider_class": EntraIdCredentialsProvider,
"idp_kwargs": {"auth_type": AuthType.DEFAULT_AZURE_CREDENTIAL},
},
],
ids=["pool", "single", "DefaultAzureCredential"],
indirect=True,
)
@pytest.mark.onlycluster
@pytest.mark.cp_integration
def test_auth_pool_with_credential_provider(self, r_entra: redis.Redis):
assert r_entra.ping() is True
| TestClusterEntraIdCredentialsProvider |
python | numba__numba | numba/tests/test_dictobject.py | {
"start": 28300,
"end": 29450
} | class ____(TestCase):
def check_good(self, fromty, toty):
_sentry_safe_cast(fromty, toty)
def check_bad(self, fromty, toty):
with self.assertRaises(TypingError) as raises:
_sentry_safe_cast(fromty, toty)
self.assertIn(
'cannot safely cast {fromty} to {toty}'.format(**locals()),
str(raises.exception),
)
def test_cast_int_to(self):
self.check_good(types.int32, types.float32)
self.check_good(types.int32, types.float64)
self.check_good(types.int32, types.complex128)
self.check_good(types.int64, types.complex128)
self.check_bad(types.int32, types.complex64)
self.check_good(types.int8, types.complex64)
def test_cast_float_to(self):
self.check_good(types.float32, types.float64)
self.check_good(types.float32, types.complex64)
self.check_good(types.float64, types.complex128)
def test_cast_bool_to(self):
self.check_good(types.boolean, types.int32)
self.check_good(types.boolean, types.float64)
self.check_good(types.boolean, types.complex128)
| TestDictTypeCasting |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/roots/mutation.py | {
"start": 7777,
"end": 8031
} | class ____(graphene.Interface):
"""Interface indicating that a run was terminated."""
run = graphene.Field(graphene.NonNull(GrapheneRun))
class Meta:
name = "TerminatePipelineExecutionSuccess"
| GrapheneTerminatePipelineExecutionSuccess |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_s3.py | {
"start": 16008,
"end": 16946
} | class ____:
def test_execute(self):
operator = S3ListOperator(
task_id="test-s3-list-operator",
bucket=BUCKET_NAME,
prefix="TEST",
delimiter=".csv",
)
operator.hook = mock.MagicMock()
operator.hook.list_keys.return_value = ["TEST1.csv", "TEST2.csv", "TEST3.csv"]
files = operator.execute(None)
operator.hook.list_keys.assert_called_once_with(
bucket_name=BUCKET_NAME,
prefix="TEST",
delimiter=".csv",
apply_wildcard=False,
)
assert sorted(files) == sorted(["TEST1.csv", "TEST2.csv", "TEST3.csv"])
def test_template_fields(self):
operator = S3ListOperator(
task_id="test-s3-list-operator",
bucket=BUCKET_NAME,
prefix="TEST",
delimiter=".csv",
)
validate_template_fields(operator)
| TestS3ListOperator |
python | sqlalchemy__sqlalchemy | test/orm/test_froms.py | {
"start": 35545,
"end": 39132
} | class ____(fixtures.MappedTest, AssertsCompiledSQL):
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
Column("type", String(20)),
Column("bid", Integer, ForeignKey("b.id")),
)
Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
Column("type", String(20)),
)
Table(
"c",
metadata,
Column("id", Integer, ForeignKey("b.id"), primary_key=True),
Column("age", Integer),
)
Table(
"d",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("dede", Integer),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C(B):
pass
class D(A):
pass
@classmethod
def setup_mappers(cls):
a, c, b, d = (cls.tables.a, cls.tables.c, cls.tables.b, cls.tables.d)
A, B, C, D = cls.classes("A", "B", "C", "D")
cls.mapper_registry.map_imperatively(
A,
a,
polymorphic_identity="a",
polymorphic_on=a.c.type,
with_polymorphic=("*", None),
properties={
"link": relationship(B, uselist=False, backref="back")
},
)
cls.mapper_registry.map_imperatively(
B,
b,
polymorphic_identity="b",
polymorphic_on=b.c.type,
with_polymorphic=("*", None),
)
cls.mapper_registry.map_imperatively(
C, c, inherits=B, polymorphic_identity="c"
)
cls.mapper_registry.map_imperatively(
D, d, inherits=A, polymorphic_identity="d"
)
@classmethod
def insert_data(cls, connection):
A, C, B = (cls.classes.A, cls.classes.C, cls.classes.B)
sess = Session(connection)
sess.add_all(
[
B(name="b1"),
A(name="a1", link=C(name="c1", age=3)),
C(name="c2", age=6),
A(name="a2"),
]
)
sess.flush()
def test_add_entity_equivalence(self):
A, C, B = (self.classes.A, self.classes.C, self.classes.B)
sess = fixture_session()
for q in [
sess.query(A, B).join(A.link),
sess.query(A).join(A.link).add_entity(B),
]:
eq_(
q.all(),
[
(
A(bid=2, id=1, name="a1", type="a"),
C(age=3, id=2, name="c1", type="c"),
)
],
)
for q in [
sess.query(B, A).join(B.back),
sess.query(B).join(B.back).add_entity(A),
sess.query(B).add_entity(A).join(B.back),
]:
eq_(
q.all(),
[
(
C(age=3, id=2, name="c1", type="c"),
A(bid=2, id=1, name="a1", type="a"),
)
],
)
| AddEntityEquivalenceTest |
python | viewflow__viewflow | viewflow/workflow/apps.py | {
"start": 91,
"end": 344
} | class ____(AppConfig):
"""Default application config."""
default_auto_field = "django.db.models.BigAutoField"
name = "viewflow.workflow"
label = "viewflow" # keep backward compatible with 1.x
verbose_name = _("Workflow")
| WorkflowConfig |
python | getsentry__sentry | src/sentry/api/endpoints/release_thresholds/release_threshold_status_index.py | {
"start": 3898,
"end": 22101
} | class ____(OrganizationReleasesBaseEndpoint):
owner: ApiOwner = ApiOwner.ENTERPRISE
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
}
@extend_schema(
operation_id="Retrieve Statuses of Release Thresholds (Alpha)",
parameters=[GlobalParams.ORG_ID_OR_SLUG, ReleaseThresholdStatusIndexSerializer],
request=None,
responses={
200: inline_sentry_response_serializer(
"ReleaseThresholdStatusResponse", dict[str, list[EnrichedThreshold]]
),
400: RESPONSE_BAD_REQUEST,
},
examples=ReleaseThresholdExamples.THRESHOLD_STATUS_RESPONSE,
)
def get(self, request: Request, organization: Organization | RpcOrganization) -> HttpResponse:
r"""
**`[WARNING]`**: This API is an experimental Alpha feature and is subject to change!
List all derived statuses of releases that fall within the provided start/end datetimes.
Constructs a response key'd off \{`release_version`\}-\{`project_slug`\} that lists thresholds with their status for *specified* projects.
Each returned enriched threshold will contain the full serialized `release_threshold` instance as well as it's derived health statuses.
"""
# TODO: We should limit/paginate results (this could get really bulky)
# ========================================================================
# STEP 1: Validate request data
#
# NOTE: start/end parameters determine window to query for releases
# This is NOT the window to query snuba for event data - nor the individual threshold windows
# ========================================================================
serializer = ReleaseThresholdStatusIndexSerializer(
data=request.query_params,
)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
environments_list = serializer.validated_data.get(
"environment"
) # list of environment names
project_slug_list = serializer.validated_data.get("projectSlug")
releases_list = serializer.validated_data.get("release") # list of release versions
try:
filter_params = self.get_filter_params(
request, organization, date_filter_optional=True, project_slugs=project_slug_list
)
except NoProjects:
raise NoProjects("No projects available")
start: datetime | None = filter_params["start"]
end: datetime | None = filter_params["end"]
logger.info(
"Checking release status health",
extra={
"start": start,
"end": end,
},
)
metrics.incr("release.threshold_health_status.attempt")
# ========================================================================
# Step 2: Fetch releases, prefetch projects & release_thresholds
# ========================================================================
release_query = Q(organization=organization, date_added__gte=start, date_added__lte=end)
if environments_list:
release_query &= Q(
releaseprojectenvironment__environment__name__in=environments_list,
)
if project_slug_list:
release_query &= Q(
projects__slug__in=project_slug_list,
)
if releases_list:
release_query &= Q(
version__in=releases_list,
)
queryset = (
Release.objects.filter(release_query)
.annotate(
date=F("date_added"), # transforms date_added into 'date'
)
.order_by("-date")
.distinct()
)
# prefetching the release_thresholds via the projects model
queryset.prefetch_related("projects__release_thresholds__environment")
queryset.prefetch_related("releaseprojectenvironment_set")
queryset.prefetch_related("deploy_set")
logger.info(
"Fetched releases",
extra={
"results": len(queryset),
"project_slugs": project_slug_list,
"releases": releases_list,
"environments": environments_list,
},
)
# ========================================================================
# Step 3: flatten thresholds and compile projects/release-thresholds by type
# ========================================================================
thresholds_by_type: DefaultDict[int, dict[str, list[Any]]] = defaultdict()
query_windows_by_type: DefaultDict[int, dict[str, datetime]] = defaultdict()
for release in queryset:
# TODO:
# We should update release model to preserve threshold states.
# if release.failed_thresholds/passed_thresholds exists - then skip calculating and just return thresholds
project_list = [
p
for p in release.projects.all()
if (project_slug_list and p.slug in project_slug_list) or (not project_slug_list)
]
for project in project_list:
thresholds_list: list[ReleaseThreshold] = [
t
for t in project.release_thresholds.all()
if (
environments_list
and t.environment
and t.environment.name in environments_list
)
or (not environments_list)
]
for threshold in thresholds_list:
if threshold.threshold_type not in thresholds_by_type:
thresholds_by_type[threshold.threshold_type] = {
"project_ids": [],
"releases": [],
"thresholds": [],
}
thresholds_by_type[threshold.threshold_type]["project_ids"].append(project.id)
thresholds_by_type[threshold.threshold_type]["releases"].append(release.version)
if threshold.threshold_type not in query_windows_by_type:
query_windows_by_type[threshold.threshold_type] = {
"start": datetime.now(tz=timezone.utc),
"end": datetime.now(tz=timezone.utc),
}
latest_deploy: Deploy | None = None
if threshold.environment:
# NOTE: if a threshold has no environment set, we monitor from start of the release creation
# If a deploy does not exist for the thresholds environment, we monitor from start of release creation
# ReleaseProjectEnvironment model
rpe_entry: ReleaseProjectEnvironment | None = next(
(
rpe
for rpe in release.releaseprojectenvironment_set.all()
if rpe.environment == threshold.environment
and rpe.project == project
),
None,
)
if rpe_entry:
last_deploy_id = rpe_entry.last_deploy_id
latest_deploy = next(
(
deploy
for deploy in release.deploy_set.all()
if deploy.id == last_deploy_id
),
None,
)
# NOTE: query window starts at the earliest release up until the latest threshold window
if latest_deploy:
threshold_start = latest_deploy.date_finished
else:
threshold_start = release.date
query_windows_by_type[threshold.threshold_type]["start"] = min(
threshold_start, query_windows_by_type[threshold.threshold_type]["start"]
)
query_windows_by_type[threshold.threshold_type]["end"] = max(
threshold_start + timedelta(seconds=threshold.window_in_seconds),
query_windows_by_type[threshold.threshold_type]["end"],
)
# NOTE: enriched threshold is SERIALIZED
# meaning project and environment models are dictionaries
enriched_threshold: EnrichedThreshold = serialize(threshold)
# NOTE: start/end for a threshold are different than start/end for querying data
enriched_threshold.update(
{
"key": self.construct_threshold_key(release=release, project=project),
"start": threshold_start,
"end": threshold_start
+ timedelta(
seconds=threshold.window_in_seconds
), # start + threshold.window
"release": release.version,
"project_slug": project.slug,
"project_id": project.id,
"is_healthy": False,
}
)
thresholds_by_type[threshold.threshold_type]["thresholds"].append(
enriched_threshold
)
# ========================================================================
# Step 4: Determine threshold status per threshold type and return results
# ========================================================================
release_threshold_health = defaultdict(list)
for threshold_type, filter_list in thresholds_by_type.items():
project_id_list = [proj_id for proj_id in filter_list["project_ids"]]
release_value_list = [release_version for release_version in filter_list["releases"]]
category_thresholds: list[EnrichedThreshold] = filter_list["thresholds"]
query_window = query_windows_by_type[threshold_type]
if threshold_type == ReleaseThresholdType.TOTAL_ERROR_COUNT:
metrics.incr("release.threshold_health_status.check.error_count")
"""
Fetch errors timeseries for all projects with an error_count threshold in desired releases
Iterate through timeseries given threshold window and determine health status
NOTE: Timeseries query start & end are determined by API param window (_not_ threshold window)
derived from fetched releases (earliest start & latest end)
IF the param window doesn't cover the full threshold window, results will be inaccurate
TODO: If too many results, then throw an error and request user to narrow their search window
"""
error_counts = get_errors_counts_timeseries_by_project_and_release(
end=query_window["end"],
environments_list=environments_list,
organization_id=organization.id,
project_id_list=project_id_list,
release_value_list=release_value_list,
start=query_window["start"],
)
logger.info(
"querying error counts",
extra={
"start": query_window["start"],
"end": query_window["end"],
"project_ids": project_id_list,
"releases": release_value_list,
"environments": environments_list,
"error_count_data": error_counts,
},
)
for ethreshold in category_thresholds:
is_healthy, metric_count = is_error_count_healthy(ethreshold, error_counts)
ethreshold.update({"is_healthy": is_healthy, "metric_value": metric_count})
release_threshold_health[ethreshold["key"]].append(
ethreshold
) # so we can fill all thresholds under the same key
elif threshold_type == ReleaseThresholdType.NEW_ISSUE_COUNT:
metrics.incr("release.threshold_health_status.check.new_issue_count")
"""
Query new issue counts for all projects with a new_issue_count threshold in desired releases
"""
new_issue_counts = get_new_issue_counts(
organization_id=organization.id,
thresholds=category_thresholds,
)
logger.info(
"querying new issue counts",
extra={
"start": query_window["start"],
"end": query_window["end"],
"project_ids": project_id_list,
"releases": release_value_list,
"environments": environments_list,
"new_issue_counts_data": new_issue_counts,
},
)
for ethreshold in category_thresholds:
is_healthy, metric_count = is_new_issue_count_healthy(
ethreshold, new_issue_counts
)
ethreshold.update({"is_healthy": is_healthy, "metric_value": metric_count})
release_threshold_health[ethreshold["key"]].append(
ethreshold
) # so we can fill all thresholds under the same key
elif threshold_type == ReleaseThresholdType.UNHANDLED_ISSUE_COUNT:
metrics.incr("release.threshold_health_status.check.unhandled_issue_count")
for ethreshold in category_thresholds:
release_threshold_health[ethreshold["key"]].append(
ethreshold
) # so we can fill all thresholds under the same key
elif threshold_type == ReleaseThresholdType.REGRESSED_ISSUE_COUNT:
metrics.incr("release.threshold_health_status.check.regressed_issue_count")
for ethreshold in category_thresholds:
release_threshold_health[ethreshold["key"]].append(
ethreshold
) # so we can fill all thresholds under the same key
elif threshold_type == ReleaseThresholdType.FAILURE_RATE:
metrics.incr("release.threshold_health_status.check.failure_rate")
for ethreshold in category_thresholds:
release_threshold_health[ethreshold["key"]].append(
ethreshold
) # so we can fill all thresholds under the same key
elif threshold_type == ReleaseThresholdType.CRASH_FREE_SESSION_RATE:
metrics.incr("release.threshold_health_status.check.crash_free_session_rate")
query_window = query_windows_by_type[threshold_type]
sessions_data: SessionsQueryResult | None = None
try:
sessions_data = fetch_sessions_data(
end=query_window["end"],
request=request,
organization=organization,
params=filter_params,
start=query_window["start"],
)
except Exception as exc:
# TODO: handle InvalidPararms
# sentry.exceptions.InvalidParams: Your interval and date range would create too many results. Use a larger interval, or a smaller date range.
logger.exception(str(exc))
logger.info(
"fetching sessions data",
extra={
"start": query_window["start"],
"end": query_window["end"],
"project_ids": project_id_list,
"releases": release_value_list,
"environments": environments_list,
"error_count_data": error_counts,
},
)
if sessions_data:
for ethreshold in category_thresholds:
is_healthy, rate = is_crash_free_rate_healthy_check(
ethreshold, dict(sessions_data), CRASH_SESSIONS_DISPLAY
)
ethreshold.update({"is_healthy": is_healthy, "metric_value": rate})
release_threshold_health[ethreshold["key"]].append(ethreshold)
elif threshold_type == ReleaseThresholdType.CRASH_FREE_USER_RATE:
metrics.incr("release.threshold_health_status.check.crash_free_user_rate")
for ethreshold in category_thresholds:
release_threshold_health[ethreshold["key"]].append(
ethreshold
) # so we can fill all thresholds under the same key
return Response(release_threshold_health, status=200)
def construct_threshold_key(self, project: Project, release: Release) -> str:
"""
Consistent key helps to determine which thresholds can be grouped together.
project_slug - release_version
NOTE: release versions can contain special characters... `-` delimiter may not be appropriate
TODO: move this into a separate helper?
"""
return f"{project.slug}-{release.version}"
| ReleaseThresholdStatusIndexEndpoint |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/api.py | {
"start": 4400,
"end": 7806
} | class ____(ABC):
"""Main rendezvous interface.
Note:
Distributed Torch users normally **do not** need to implement their own
``RendezvousHandler``. An implementation based on C10d Store is already
provided, and is recommended for most users.
"""
@abstractmethod
def get_backend(self) -> str:
"""Return the name of the rendezvous backend."""
@property
def use_agent_store(self) -> bool:
"""Indicates that store reference returned by :py:meth:`next_rendezvous` can be shared with user
applications and will be available during application lifecycle.
Rendezvous handler impl will share store details as instance of :py:class:`RendezvousStoreInfo`.
Applications as a convention use `MASTER_ADDR`/`MASTER_PORT` env variables to lookup the store.
"""
return False
@abstractmethod
def next_rendezvous(self) -> RendezvousInfo:
"""Main entry-point into the rendezvous barrier.
Blocks until the rendezvous is complete and the current process is
included in the formed worker group, or a timeout occurs, or the
rendezvous was marked closed.
Returns:
Instance of :py:class:`RendezvousInfo`.
Raises:
RendezvousClosedError:
The rendezvous is closed.
RendezvousConnectionError:
The connection to the rendezvous backend has failed.
RendezvousStateError:
The rendezvous state is corrupt.
RendezvousTimeoutError:
The rendezvous did not complete on time.
"""
@abstractmethod
def is_closed(self) -> bool:
"""Check whether the rendezvous has been closed.
A closed rendezvous means all future attempts to re-rendezvous within
same job will fail.
``is_closed()`` and :py:meth:`set_closed` have semantics of eventual
propagation and should not be used for synchronization. The intention is
that if at least one node decides the job is finished, it will close the
rendezvous, and other nodes will soon observe this and stop running as
well.
"""
@abstractmethod
def set_closed(self):
"""Mark the rendezvous as closed."""
@abstractmethod
def num_nodes_waiting(self) -> int:
"""Return the number of nodes who arrived late at the rendezvous
barrier, hence were not included in the current worker group.
Callers should periodically call this method to check whether new
nodes are waiting to join the job and if so admit them by calling
:py:meth:`next_rendezvous()` (re-rendezvous).
"""
@abstractmethod
def get_run_id(self) -> str:
"""Return the run id of the rendezvous.
The run id is a user-defined id that uniquely identifies an instance of
a distributed application. It typically maps to a job id and is used to
allow nodes to join the correct distributed application.
"""
@abstractmethod
def shutdown(self) -> bool:
"""Close all resources that were open for the rendezvous.
Example::
rdzv_handler = ...
try:
store, rank, world_size = rdzv_handler.next_rendezvous()
finally:
rdzv_handler.shutdown()
"""
| RendezvousHandler |
python | pyinstaller__pyinstaller | bootloader/waflib/Task.py | {
"start": 19098,
"end": 27668
} | class ____(object):
def __init__(self, prev, next):
self.prev = prev
self.next = next
self.done = False
def get_hasrun(self):
for k in self.prev:
if not k.hasrun:
return NOT_RUN
return SUCCESS
hasrun = property(get_hasrun, None)
def set_precedence_constraints(tasks):
cstr_groups = Utils.defaultdict(list)
for x in tasks:
h = x.hash_constraints()
cstr_groups[h].append(x)
keys = list(cstr_groups.keys())
maxi = len(keys)
for i in range(maxi):
t1 = cstr_groups[keys[i]][0]
for j in range(i + 1, maxi):
t2 = cstr_groups[keys[j]][0]
if is_before(t1, t2):
a = i
b = j
elif is_before(t2, t1):
a = j
b = i
else:
continue
a = cstr_groups[keys[a]]
b = cstr_groups[keys[b]]
if len(a) < 2 or len(b) < 2:
for x in b:
x.run_after.update(a)
else:
group = TaskGroup(set(a), set(b))
for x in b:
x.run_after.add(group)
def funex(c):
dc = {}
exec(c, dc)
return dc['f']
re_cond = re.compile(r'(?P<var>\w+)|(?P<or>\|)|(?P<and>&)')
re_novar = re.compile(r'^(SRC|TGT)\W+.*?$')
reg_act = re.compile(r'(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})', re.M)
def compile_fun_shell(line):
extr = []
def repl(match):
g = match.group
if g('dollar'):
return "$"
elif g('backslash'):
return '\\\\'
elif g('subst'):
extr.append((g('var'), g('code')))
return "%s"
return None
line = reg_act.sub(repl, line) or line
dvars = []
def add_dvar(x):
if x not in dvars:
dvars.append(x)
def replc(m):
if m.group('and'):
return ' and '
elif m.group('or'):
return ' or '
else:
x = m.group('var')
add_dvar(x)
return 'env[%r]' % x
parm = []
app = parm.append
for (var, meth) in extr:
if var == 'SRC':
if meth:
app('tsk.inputs%s' % meth)
else:
app('" ".join([a.path_from(cwdx) for a in tsk.inputs])')
elif var == 'TGT':
if meth:
app('tsk.outputs%s' % meth)
else:
app('" ".join([a.path_from(cwdx) for a in tsk.outputs])')
elif meth:
if meth.startswith(':'):
add_dvar(var)
m = meth[1:]
if m == 'SRC':
m = '[a.path_from(cwdx) for a in tsk.inputs]'
elif m == 'TGT':
m = '[a.path_from(cwdx) for a in tsk.outputs]'
elif re_novar.match(m):
m = '[tsk.inputs%s]' % m[3:]
elif re_novar.match(m):
m = '[tsk.outputs%s]' % m[3:]
else:
add_dvar(m)
if m[:3] not in ('tsk', 'gen', 'bld'):
m = '%r' % m
app('" ".join(tsk.colon(%r, %s))' % (var, m))
elif meth.startswith('?'):
expr = re_cond.sub(replc, meth[1:])
app('p(%r) if (%s) else ""' % (var, expr))
else:
call = '%s%s' % (var, meth)
add_dvar(call)
app(call)
else:
add_dvar(var)
app("p('%s')" % var)
if parm:
parm = "%% (%s) " % (',\n\t\t'.join(parm))
else:
parm = ''
c = COMPILE_TEMPLATE_SHELL % (line, parm)
Logs.debug('action: %s', c.strip().splitlines())
return (funex(c), dvars)
reg_act_noshell = re.compile(
r"(?P<space>\s+)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})|(?P<text>([^$ \t\n\r\f\v]|\$\$)+)", re.M
)
def compile_fun_noshell(line):
buf = []
dvars = []
merge = False
app = buf.append
def add_dvar(x):
if x not in dvars:
dvars.append(x)
def replc(m):
if m.group('and'):
return ' and '
elif m.group('or'):
return ' or '
else:
x = m.group('var')
add_dvar(x)
return 'env[%r]' % x
for m in reg_act_noshell.finditer(line):
if m.group('space'):
merge = False
continue
elif m.group('text'):
app('[%r]' % m.group('text').replace('$$', '$'))
elif m.group('subst'):
var = m.group('var')
code = m.group('code')
if var == 'SRC':
if code:
app('[tsk.inputs%s]' % code)
else:
app('[a.path_from(cwdx) for a in tsk.inputs]')
elif var == 'TGT':
if code:
app('[tsk.outputs%s]' % code)
else:
app('[a.path_from(cwdx) for a in tsk.outputs]')
elif code:
if code.startswith(':'):
add_dvar(var)
m = code[1:]
if m == 'SRC':
m = '[a.path_from(cwdx) for a in tsk.inputs]'
elif m == 'TGT':
m = '[a.path_from(cwdx) for a in tsk.outputs]'
elif re_novar.match(m):
m = '[tsk.inputs%s]' % m[3:]
elif re_novar.match(m):
m = '[tsk.outputs%s]' % m[3:]
else:
add_dvar(m)
if m[:3] not in ('tsk', 'gen', 'bld'):
m = '%r' % m
app('tsk.colon(%r, %s)' % (var, m))
elif code.startswith('?'):
expr = re_cond.sub(replc, code[1:])
app('to_list(env[%r] if (%s) else [])' % (var, expr))
else:
call = '%s%s' % (var, code)
add_dvar(call)
app('to_list(%s)' % call)
else:
app('to_list(env[%r])' % var)
add_dvar(var)
if merge:
tmp = 'merge(%s, %s)' % (buf[-2], buf[-1])
del buf[-1]
buf[-1] = tmp
merge = True
buf = ['lst.extend(%s)' % x for x in buf]
fun = COMPILE_TEMPLATE_NOSHELL % "\n\t".join(buf)
Logs.debug('action: %s', fun.strip().splitlines())
return (funex(fun), dvars)
def compile_fun(line, shell=False):
if isinstance(line, str):
if line.find('<') > 0 or line.find('>') > 0 or line.find('&&') > 0:
shell = True
else:
dvars_lst = []
funs_lst = []
for x in line:
if isinstance(x, str):
fun, dvars = compile_fun(x, shell)
dvars_lst += dvars
funs_lst.append(fun)
else:
funs_lst.append(x)
def composed_fun(task):
for x in funs_lst:
ret = x(task)
if ret:
return ret
return None
return composed_fun, dvars_lst
if shell:
return compile_fun_shell(line)
else:
return compile_fun_noshell(line)
def compile_sig_vars(vars):
buf = []
for x in sorted(vars):
if x[:3] in ('tsk', 'gen', 'bld'):
buf.append('buf.append(%s)' % x)
if buf:
return funex(COMPILE_TEMPLATE_SIG_VARS % '\n\t'.join(buf))
return None
def task_factory(
name, func=None, vars=None, color='GREEN', ext_in=[], ext_out=[], before=[], after=[], shell=False, scan=None
):
params = {
'vars': vars or [],
'color': color,
'name': name,
'shell': shell,
'scan': scan,
}
if isinstance(func, str) or isinstance(func, tuple):
params['run_str'] = func
else:
params['run'] = func
cls = type(Task)(name, (Task,), params)
classes[name] = cls
if ext_in:
cls.ext_in = Utils.to_list(ext_in)
if ext_out:
cls.ext_out = Utils.to_list(ext_out)
if before:
cls.before = Utils.to_list(before)
if after:
cls.after = Utils.to_list(after)
return cls
def deep_inputs(cls):
def sig_explicit_deps(self):
Task.sig_explicit_deps(self)
Task.sig_deep_inputs(self)
cls.sig_explicit_deps = sig_explicit_deps
return cls
TaskBase = Task
| TaskGroup |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py | {
"start": 1530,
"end": 2213
} | class ____(ProcessingKwargs, total=False):
videos_kwargs: Qwen2_5_OmniVideosKwargs
_defaults = {
"text_kwargs": {
"padding": False,
"padding_side": "left",
},
"videos_kwargs": {
"seconds_per_chunk": 2.0,
"position_id_per_seconds": 25,
"use_audio_in_video": False,
"size": {
"shortest_edge": 128 * 28 * 28,
"longest_edge": 768 * 28 * 28,
},
},
"audio_kwargs": {
"sampling_rate": 16000,
"padding": "max_length",
"return_attention_mask": True,
},
}
| Qwen2_5OmniProcessorKwargs |
python | ansible__ansible | test/lib/ansible_test/_internal/util_common.py | {
"start": 2147,
"end": 2930
} | class ____:
"""A simple substitution template for shell scripts."""
def __init__(self, template: str) -> None:
self.template = template
def substitute(self, **kwargs: t.Union[str, list[str]]) -> str:
"""Return a string templated with the given arguments."""
kvp = dict((k, self.quote(v)) for k, v in kwargs.items())
pattern = re.compile(r'#{(?P<name>[^}]+)}')
value = pattern.sub(lambda match: kvp[match.group('name')], self.template)
return value
@staticmethod
def quote(value: t.Union[str, list[str]]) -> str:
"""Return a shell quoted version of the given value."""
if isinstance(value, list):
return shlex.quote(' '.join(value))
return shlex.quote(value)
| ShellScriptTemplate |
python | streamlit__streamlit | lib/streamlit/components/v2/bidi_component/serialization.py | {
"start": 6697,
"end": 9403
} | class ____:
"""Serialization and deserialization logic for a bidirectional component.
This class handles the conversion of component state between the frontend
(JSON strings) and the backend (Python objects).
The canonical shape is a flat mapping of state keys to values.
Parameters
----------
default
A dictionary of default values to be applied to the state when
deserializing, if the corresponding keys are not already present.
"""
default: dict[str, Any] | None = None
def deserialize(self, ui_value: str | dict[str, Any] | None) -> BidiComponentState:
"""Deserialize the component's state from a frontend value.
Parameters
----------
ui_value
The value received from the frontend, which can be a JSON string,
a dictionary, or `None`.
Returns
-------
BidiComponentState
The deserialized state as a flat mapping.
"""
# Normalize the incoming JSON payload into a dict. Any failure to decode
# (or an unexpected non-mapping structure) results in an empty mapping
# so that the returned type adheres to :class:`BidiComponentState`.
deserialized_value: dict[str, Any]
if isinstance(ui_value, dict):
deserialized_value = ui_value
elif isinstance(ui_value, str):
try:
parsed = json.loads(ui_value)
deserialized_value = parsed if isinstance(parsed, dict) else {}
except (json.JSONDecodeError, TypeError) as e:
_LOGGER.warning(
"Failed to deserialize component state from frontend: %s",
e,
exc_info=e,
)
deserialized_value = {}
else:
deserialized_value = {}
# Apply default values for keys that don't exist in the current state
if self.default is not None:
for default_key, default_value in self.default.items():
if default_key not in deserialized_value:
deserialized_value[default_key] = default_value
state: BidiComponentState = cast(
"BidiComponentState", AttributeDictionary(deserialized_value)
)
return state
def serialize(self, value: Any) -> str:
"""Serialize the component's state into a JSON string for the frontend.
Parameters
----------
value
The component state to serialize.
Returns
-------
str
A JSON string representation of the value.
"""
return json.dumps(value)
| BidiComponentSerde |
python | huggingface__transformers | src/transformers/models/seggpt/image_processing_seggpt.py | {
"start": 3143,
"end": 31072
} | class ____(BaseImageProcessor):
r"""
Constructs a SegGpt image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
size (`dict`, *optional*, defaults to `{"height": 448, "width": 448}`):
Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
`preprocess` method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
parameter in the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
`preprocess` method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the prompt mask to RGB format. Can be overridden by the `do_convert_rgb` parameter in the
`preprocess` method.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"height": 448, "width": 448}
size = get_size_dict(size)
self.do_resize = do_resize
self.do_rescale = do_rescale
self.do_normalize = do_normalize
self.size = size
self.resample = resample
self.rescale_factor = rescale_factor
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.do_convert_rgb = do_convert_rgb
def get_palette(self, num_labels: int) -> list[tuple[int, int]]:
"""Build a palette to map the prompt mask from a single channel to a 3 channel RGB.
Args:
num_labels (`int`):
Number of classes in the segmentation task (excluding the background).
Returns:
`list[tuple[int, int]]`: Palette to map the prompt mask from a single channel to a 3 channel RGB.
"""
return build_palette(num_labels)
def mask_to_rgb(
self,
image: np.ndarray,
palette: Optional[list[tuple[int, int]]] = None,
data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""Converts a segmentation map to RGB format.
Args:
image (`np.ndarray`):
Segmentation map with dimensions (height, width) where pixel values represent the class index.
palette (`list[tuple[int, int]]`, *optional*, defaults to `None`):
Palette to use to convert the mask to RGB format. If unset, the mask is duplicated across the channel
dimension.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
Returns:
`np.ndarray`: The mask in RGB format.
"""
return mask_to_rgb(image, palette=palette, data_format=data_format)
# Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize with PILImageResampling.BILINEAR->PILImageResampling.BICUBIC
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
output_size = (size["height"], size["width"])
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def _preprocess_step(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
do_convert_rgb: Optional[bool] = None,
num_labels: Optional[int] = None,
**kwargs,
):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
resizing.
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has
an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the prompt mask to RGB format. If `num_labels` is specified, a palette will be built
to map the prompt mask from a single channel to a 3 channel RGB. If unset, the prompt mask is duplicated
across the channel dimension. Must be set to `False` if the prompt mask is already in RGB format.
num_labels: (`int`, *optional*):
Number of classes in the segmentation task (excluding the background). If specified, a palette will be
built, assuming that class_idx 0 is the background, to map the prompt mask from a single class_idx
channel to a 3 channel RGB. Not specifying this will result in the prompt mask either being passed
through as is if it is already in RGB format or being duplicated across the channel dimension.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
resample = resample if resample is not None else self.resample
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
size = size if size is not None else self.size
size_dict = get_size_dict(size)
# If segmentation map is passed we expect 2D images
images = make_flat_list_of_images(images, expected_ndims=2 if do_convert_rgb else 3)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None and not do_convert_rgb:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
if do_convert_rgb:
palette = self.get_palette(num_labels) if num_labels is not None else None
# Since this is the input for the next transformations its format should be the same as the input_data_format
images = [
self.mask_to_rgb(image=image, palette=palette, data_format=ChannelDimension.FIRST) for image in images
]
input_data_format = ChannelDimension.FIRST
if do_resize:
images = [
self.resize(image=image, size=size_dict, resample=resample, input_data_format=input_data_format)
for image in images
]
if do_rescale:
images = [
self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
for image in images
]
if do_normalize:
images = [
self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
]
return images
def preprocess(
self,
images: Optional[ImageInput] = None,
prompt_images: Optional[ImageInput] = None,
prompt_masks: Optional[ImageInput] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: Optional[bool] = None,
num_labels: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
):
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
prompt_images (`ImageInput`):
Prompt image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
prompt_masks (`ImageInput`):
Prompt mask from prompt image to _preprocess that specify prompt_masks value in the preprocessed output.
Can either be in the format of segmentation maps (no channels) or RGB images. If in the format of
RGB images, `do_convert_rgb` should be set to `False`. If in the format of segmentation maps, `num_labels`
specifying `num_labels` is recommended to build a palette to map the prompt mask from a single channel to
a 3 channel RGB. If `num_labels` is not specified, the prompt mask will be duplicated across the channel
dimension.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
resizing.
resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has
an effect if `do_resize` is set to `True`. Doesn't apply to prompt mask as it is resized using nearest.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image values between [0 - 1].
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use if `do_normalize` is set to `True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the prompt mask to RGB format. If `num_labels` is specified, a palette will be built
to map the prompt mask from a single channel to a 3 channel RGB. If unset, the prompt mask is duplicated
across the channel dimension. Must be set to `False` if the prompt mask is already in RGB format.
num_labels: (`int`, *optional*):
Number of classes in the segmentation task (excluding the background). If specified, a palette will be
built, assuming that class_idx 0 is the background, to map the prompt mask from a plain segmentation map
with no channels to a 3 channel RGB. Not specifying this will result in the prompt mask either being passed
through as is if it is already in RGB format (if `do_convert_rgb` is false) or being duplicated
across the channel dimension.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
if all(v is None for v in [images, prompt_images, prompt_masks]):
raise ValueError("At least one of images, prompt_images, prompt_masks must be specified.")
data = {}
if images is not None:
images = self._preprocess_step(
images,
is_mask=False,
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_convert_rgb=False,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
data["pixel_values"] = images
if prompt_images is not None:
prompt_images = self._preprocess_step(
prompt_images,
is_mask=False,
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_convert_rgb=False,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
data["prompt_pixel_values"] = prompt_images
if prompt_masks is not None:
prompt_masks = self._preprocess_step(
prompt_masks,
do_resize=do_resize,
size=size,
resample=PILImageResampling.NEAREST,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_convert_rgb=do_convert_rgb,
num_labels=num_labels,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
data["prompt_masks"] = prompt_masks
return BatchFeature(data=data, tensor_type=return_tensors)
def post_process_semantic_segmentation(
self, outputs, target_sizes: Optional[list[tuple[int, int]]] = None, num_labels: Optional[int] = None
):
"""
Converts the output of [`SegGptImageSegmentationOutput`] into segmentation maps. Only supports
PyTorch.
Args:
outputs ([`SegGptImageSegmentationOutput`]):
Raw outputs of the model.
target_sizes (`list[tuple[int, int]]`, *optional*):
List of length (batch_size), where each list item (`tuple[int, int]`) corresponds to the requested
final size (height, width) of each prediction. If left to None, predictions will not be resized.
num_labels (`int`, *optional*):
Number of classes in the segmentation task (excluding the background). If specified, a palette will be
built, assuming that class_idx 0 is the background, to map prediction masks from RGB values to class
indices. This value should be the same used when preprocessing inputs.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
requires_backends(self, ["torch"])
# batch_size x num_channels x 2*height x width
masks = outputs.pred_masks
# Predicted mask and prompt are concatenated in the height dimension
# batch_size x num_channels x height x width
masks = masks[:, :, masks.shape[2] // 2 :, :]
# To unnormalize we need to permute to channel last
# batch_size x height x width x num_channels
std = torch.tensor(self.image_std).to(masks.device)
mean = torch.tensor(self.image_mean).to(masks.device)
masks = masks.permute(0, 2, 3, 1) * std + mean
# batch_size x num_channels x height x width
masks = masks.permute(0, 3, 1, 2)
# Clip to match with palette if specified
masks = torch.clip(masks * 255, 0, 255)
semantic_segmentation = []
palette_tensor = None
palette = self.get_palette(num_labels) if num_labels is not None else None
if palette is not None:
palette_tensor = torch.tensor(palette).to(device=masks.device, dtype=torch.float)
_, num_channels, _, _ = masks.shape
palette_tensor = palette_tensor.view(1, 1, num_labels + 1, num_channels)
for idx, mask in enumerate(masks):
if target_sizes is not None:
mask = torch.nn.functional.interpolate(
mask.unsqueeze(0),
size=target_sizes[idx],
mode="nearest",
)[0]
if num_labels is not None:
channels, height, width = mask.shape
dist = mask.permute(1, 2, 0).view(height, width, 1, channels)
dist = dist - palette_tensor
dist = torch.pow(dist, 2)
dist = torch.sum(dist, dim=-1)
pred = dist.argmin(dim=-1)
else:
# If no palette is specified SegGpt will try to paint using the mask class idx as RGB
pred = mask.mean(dim=0).int()
semantic_segmentation.append(pred)
return semantic_segmentation
__all__ = ["SegGptImageProcessor"]
| SegGptImageProcessor |
python | pandas-dev__pandas | pandas/tests/extension/list/array.py | {
"start": 793,
"end": 3973
} | class ____(ExtensionArray):
dtype = ListDtype()
__array_priority__ = 1000
def __init__(self, values, dtype=None, copy=False) -> None:
if not isinstance(values, np.ndarray):
raise TypeError("Need to pass a numpy array as values")
for val in values:
if not isinstance(val, self.dtype.type) and not pd.isna(val):
raise TypeError("All values must be of type " + str(self.dtype.type))
self.data = values
@classmethod
def _from_sequence(cls, scalars, *, dtype=None, copy=False):
data = np.empty(len(scalars), dtype=object)
data[:] = scalars
return cls(data)
def __getitem__(self, item):
if isinstance(item, numbers.Integral):
return self.data[item]
else:
# slice, list-like, mask
return type(self)(self.data[item])
def __len__(self) -> int:
return len(self.data)
def isna(self):
return np.array(
[not isinstance(x, list) and np.isnan(x) for x in self.data], dtype=bool
)
def take(self, indexer, allow_fill=False, fill_value=None):
# re-implement here, since NumPy has trouble setting
# sized objects like UserDicts into scalar slots of
# an ndarary.
indexer = np.asarray(indexer)
msg = (
"Index is out of bounds or cannot do a non-empty take from an empty array."
)
if allow_fill:
if fill_value is None:
fill_value = self.dtype.na_value
# bounds check
if (indexer < -1).any():
raise ValueError
try:
output = [
self.data[loc] if loc != -1 else fill_value for loc in indexer
]
except IndexError as err:
raise IndexError(msg) from err
else:
try:
output = [self.data[loc] for loc in indexer]
except IndexError as err:
raise IndexError(msg) from err
return self._from_sequence(output)
def copy(self):
return type(self)(self.data[:])
def astype(self, dtype, copy=True):
if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:
if copy:
return self.copy()
return self
elif is_string_dtype(dtype) and not is_object_dtype(dtype):
# numpy has problems with astype(str) for nested elements
return np.array([str(x) for x in self.data], dtype=dtype)
elif not copy:
return np.asarray(self.data, dtype=dtype)
else:
return np.array(self.data, dtype=dtype, copy=copy)
@classmethod
def _concat_same_type(cls, to_concat):
data = np.concatenate([x.data for x in to_concat])
return cls(data)
def make_data(n: int):
# TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer
rng = np.random.default_rng(2)
data = np.empty(n, dtype=object)
data[:] = [
[rng.choice(list(string.ascii_letters)) for _ in range(rng.integers(0, 10))]
for _ in range(n)
]
return data
| ListArray |
python | lepture__authlib | tests/flask/test_oauth1/oauth1_server.py | {
"start": 2227,
"end": 3141
} | class ____(TemporaryCredentialMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("user.id", ondelete="CASCADE"))
user = db.relationship("User")
client_id = db.Column(db.String(48), index=True)
oauth_token = db.Column(db.String(84), unique=True, index=True)
oauth_token_secret = db.Column(db.String(84))
oauth_verifier = db.Column(db.String(84))
oauth_callback = db.Column(db.Text, default="")
def get_user_id(self):
return self.user_id
def get_client_id(self):
return self.client_id
def get_redirect_uri(self):
return self.oauth_callback
def check_verifier(self, verifier):
return self.oauth_verifier == verifier
def get_oauth_token(self):
return self.oauth_token
def get_oauth_token_secret(self):
return self.oauth_token_secret
| TemporaryCredential |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 129685,
"end": 130564
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, api_key: str, start_date: str, interval: str):
"""Airbyte Source for Chartmogul.
Documentation can be found at https://docs.airbyte.com/integrations/sources/chartmogul
Args:
name (str): The name of the destination.
api_key (str): Chartmogul API key
start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. When feasible, any data before this date will not be replicated.
interval (str): Some APIs such as Metrics require intervals to cluster data.
"""
self.api_key = check.str_param(api_key, "api_key")
self.start_date = check.str_param(start_date, "start_date")
self.interval = check.str_param(interval, "interval")
super().__init__("Chartmogul", name)
| ChartmogulSource |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 3035,
"end": 3094
} | class ____(StreamlitAPIException):
pass
| DuplicateWidgetID |
python | Pylons__pyramid | src/pyramid/static.py | {
"start": 11395,
"end": 12265
} | class ____:
"""
An implementation of :class:`~pyramid.interfaces.ICacheBuster` which adds
a token for cache busting in the query string of an asset URL.
The optional ``param`` argument determines the name of the parameter added
to the query string and defaults to ``'x'``.
To use this class, subclass it and provide a ``tokenize`` method which
accepts ``request, pathspec, kw`` and returns a token.
.. versionadded:: 1.6
"""
def __init__(self, param='x'):
self.param = param
def __call__(self, request, subpath, kw):
token = self.tokenize(request, subpath, kw)
query = kw.setdefault('_query', {})
if isinstance(query, dict):
query[self.param] = token
else:
kw['_query'] = tuple(query) + ((self.param, token),)
return subpath, kw
| QueryStringCacheBuster |
python | palantir__python-language-server | pyls/config/source.py | {
"start": 138,
"end": 2481
} | class ____(object):
"""Base class for implementing a config source."""
def __init__(self, root_path):
self.root_path = root_path
self.is_windows = sys.platform == 'win32'
self.xdg_home = os.environ.get(
'XDG_CONFIG_HOME', os.path.expanduser('~/.config')
)
def user_config(self):
"""Return user-level (i.e. home directory) configuration."""
raise NotImplementedError()
def project_config(self, document_path):
"""Return project-level (i.e. workspace directory) configuration."""
raise NotImplementedError()
@staticmethod
def read_config_from_files(files):
config = configparser.RawConfigParser()
for filename in files:
if os.path.exists(filename) and not os.path.isdir(filename):
config.read(filename)
return config
@staticmethod
def parse_config(config, key, options):
"""Parse the config with the given options."""
conf = {}
for source, destination, opt_type in options:
opt_value = _get_opt(config, key, source, opt_type)
if opt_value is not None:
_set_opt(conf, destination, opt_value)
return conf
def _get_opt(config, key, option, opt_type):
"""Get an option from a configparser with the given type."""
for opt_key in [option, option.replace('-', '_')]:
if not config.has_option(key, opt_key):
continue
if opt_type == bool:
return config.getboolean(key, opt_key)
if opt_type == int:
return config.getint(key, opt_key)
if opt_type == str:
return config.get(key, opt_key)
if opt_type == list:
return _parse_list_opt(config.get(key, opt_key))
raise ValueError("Unknown option type: %s" % opt_type)
def _parse_list_opt(string):
return [s.strip() for s in string.split(",") if s.strip()]
def _set_opt(config_dict, path, value):
"""Set the value in the dictionary at the given path if the value is not None."""
if value is None:
return
if '.' not in path:
config_dict[path] = value
return
key, rest = path.split(".", 1)
if key not in config_dict:
config_dict[key] = {}
_set_opt(config_dict[key], rest, value)
| ConfigSource |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_X.py | {
"start": 2778,
"end": 4204
} | class ____(Benchmark):
r"""
Xin-She Yang 3 objective function.
This class defines the Xin-She Yang 3 [1]_ global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\text{XinSheYang03}}(x) = e^{-\sum_{i=1}^{n} (x_i/\beta)^{2m}}
- 2e^{-\sum_{i=1}^{n} x_i^2}
\prod_{i=1}^{n} \cos^2(x_i)
Where, in this exercise, :math:`\beta = 15` and :math:`m = 3`.
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-20, 20]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = -1` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-20.0] * self.N, [20.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = -1.0
def fun(self, x, *args):
self.nfev += 1
beta, m = 15.0, 5.0
u = sum((x / beta) ** (2 * m))
v = sum(x ** 2)
w = prod(cos(x) ** 2)
return exp(-u) - 2 * exp(-v) * w
| XinSheYang03 |
python | realpython__materials | python-unittest/vehicles.py | {
"start": 106,
"end": 249
} | class ____(Vehicle):
def __init__(self, make, model, max_speed):
super().__init__(make, model)
self.max_speed = max_speed
| Car |
python | plotly__plotly.py | plotly/graph_objs/volume/colorbar/_tickformatstop.py | {
"start": 233,
"end": 8509
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "volume.colorbar"
_path_str = "volume.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.volume.colorba
r.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.volume.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Tickformatstop |
python | cookiecutter__cookiecutter | cookiecutter/exceptions.py | {
"start": 361,
"end": 643
} | class ____(CookiecutterException):
"""
Exception for when a project's input dir is not templated.
The name of the input directory should always contain a string that is
rendered to something else, so that input_dir != output_dir.
"""
| NonTemplatedInputDirException |
python | anthropics__anthropic-sdk-python | src/anthropic/types/shared/error_response.py | {
"start": 256,
"end": 377
} | class ____(BaseModel):
error: ErrorObject
request_id: Optional[str] = None
type: Literal["error"]
| ErrorResponse |
python | ansible__ansible | test/lib/ansible_test/_internal/host_profiles.py | {
"start": 14981,
"end": 15338
} | class ____[THostConfig: HostConfig](HostProfile[THostConfig], metaclass=abc.ABCMeta):
"""Base class for profiles offering SSH connectivity."""
@abc.abstractmethod
def get_controller_target_connections(self) -> list[SshConnection]:
"""Return SSH connection(s) for accessing the host as a target from the controller."""
| SshTargetHostProfile |
python | sqlalchemy__sqlalchemy | examples/sharding/separate_tables.py | {
"start": 3354,
"end": 10852
} | class ____(Base):
__tablename__ = "_prefix__weather_reports"
id: Mapped[int] = mapped_column(primary_key=True)
location_id: Mapped[int] = mapped_column(
ForeignKey("_prefix__weather_locations.id")
)
temperature: Mapped[float]
report_time: Mapped[datetime.datetime] = mapped_column(
default=datetime.datetime.now
)
location: Mapped[WeatherLocation] = relationship(back_populates="reports")
def __init__(self, temperature: float):
self.temperature = temperature
# define sharding functions.
# we'll use a straight mapping of a particular set of "country"
# attributes to shard id.
shard_lookup = {
"North America": "north_america",
"Asia": "asia",
"Europe": "europe",
"South America": "south_america",
}
def shard_chooser(mapper, instance, clause=None):
"""shard chooser.
looks at the given instance and returns a shard id
note that we need to define conditions for
the WeatherLocation class, as well as our secondary Report class which will
point back to its WeatherLocation via its 'location' attribute.
"""
if isinstance(instance, WeatherLocation):
return shard_lookup[instance.continent]
else:
return shard_chooser(mapper, instance.location)
def identity_chooser(mapper, primary_key, *, lazy_loaded_from, **kw):
"""identity chooser.
given a primary key, returns a list of shards
to search. here, we don't have any particular information from a
pk so we just return all shard ids. often, you'd want to do some
kind of round-robin strategy here so that requests are evenly
distributed among DBs.
"""
if lazy_loaded_from:
# if we are in a lazy load, we can look at the parent object
# and limit our search to that same shard, assuming that's how we've
# set things up.
return [lazy_loaded_from.identity_token]
else:
return ["north_america", "asia", "europe", "south_america"]
def execute_chooser(context):
"""statement execution chooser.
this also returns a list of shard ids, which can just be all of them. but
here we'll search into the execution context in order to try to narrow down
the list of shards to SELECT.
"""
ids = []
# we'll grab continent names as we find them
# and convert to shard ids
for column, operator, value in _get_select_comparisons(context.statement):
# "shares_lineage()" returns True if both columns refer to the same
# statement column, adjusting for any annotations present.
# (an annotation is an internal clone of a Column object
# and occur when using ORM-mapped attributes like
# "WeatherLocation.continent"). A simpler comparison, though less
# accurate, would be "column.key == 'continent'".
if column.shares_lineage(WeatherLocation.__table__.c.continent):
if operator == operators.eq:
ids.append(shard_lookup[value])
elif operator == operators.in_op:
ids.extend(shard_lookup[v] for v in value)
if len(ids) == 0:
return ["north_america", "asia", "europe", "south_america"]
else:
return ids
def _get_select_comparisons(statement):
"""Search a Select or Query object for binary expressions.
Returns expressions which match a Column against one or more
literal values as a list of tuples of the form
(column, operator, values). "values" is a single value
or tuple of values depending on the operator.
"""
binds = {}
clauses = set()
comparisons = []
def visit_bindparam(bind):
# visit a bind parameter.
value = bind.effective_value
binds[bind] = value
def visit_column(column):
clauses.add(column)
def visit_binary(binary):
if binary.left in clauses and binary.right in binds:
comparisons.append(
(binary.left, binary.operator, binds[binary.right])
)
elif binary.left in binds and binary.right in clauses:
comparisons.append(
(binary.right, binary.operator, binds[binary.left])
)
# here we will traverse through the query's criterion, searching
# for SQL constructs. We will place simple column comparisons
# into a list.
if statement.whereclause is not None:
visitors.traverse(
statement.whereclause,
{},
{
"bindparam": visit_bindparam,
"binary": visit_binary,
"column": visit_column,
},
)
return comparisons
# further configure create_session to use these functions
Session.configure(
shard_chooser=shard_chooser,
identity_chooser=identity_chooser,
execute_chooser=execute_chooser,
)
def setup():
# create tables
for db in (db1, db2, db3, db4):
Base.metadata.create_all(db)
# establish initial "id" in db1
with db1.begin() as conn:
conn.execute(ids.insert(), {"nextid": 1})
def main():
setup()
# save and load objects!
tokyo = WeatherLocation("Asia", "Tokyo")
newyork = WeatherLocation("North America", "New York")
toronto = WeatherLocation("North America", "Toronto")
london = WeatherLocation("Europe", "London")
dublin = WeatherLocation("Europe", "Dublin")
brasilia = WeatherLocation("South America", "Brasila")
quito = WeatherLocation("South America", "Quito")
tokyo.reports.append(Report(80.0))
newyork.reports.append(Report(75))
quito.reports.append(Report(85))
with Session() as sess:
sess.add_all(
[tokyo, newyork, toronto, london, dublin, brasilia, quito]
)
sess.commit()
t = sess.get(WeatherLocation, tokyo.id)
assert t.city == tokyo.city
assert t.reports[0].temperature == 80.0
# optionally set a shard id for the query and all related loaders
north_american_cities_w_t = sess.execute(
select(WeatherLocation)
.filter(WeatherLocation.city.startswith("T"))
.options(set_shard_id("north_america"))
).scalars()
# Tokyo not included since not in the north_america shard
assert {c.city for c in north_american_cities_w_t} == {
"Toronto",
}
asia_and_europe = sess.execute(
select(WeatherLocation).filter(
WeatherLocation.continent.in_(["Europe", "Asia"])
)
).scalars()
assert {c.city for c in asia_and_europe} == {
"Tokyo",
"London",
"Dublin",
}
# the Report class uses a simple integer primary key. So across two
# databases, a primary key will be repeated. The "identity_token"
# tracks in memory that these two identical primary keys are local to
# different shards.
newyork_report = newyork.reports[0]
tokyo_report = tokyo.reports[0]
assert inspect(newyork_report).identity_key == (
Report,
(1,),
"north_america",
)
assert inspect(tokyo_report).identity_key == (Report, (1,), "asia")
# the token representing the originating shard is also available
# directly
assert inspect(newyork_report).identity_token == "north_america"
assert inspect(tokyo_report).identity_token == "asia"
if __name__ == "__main__":
main()
| Report |
python | getsentry__sentry | tests/sentry/api/endpoints/test_admin_project_configs.py | {
"start": 248,
"end": 8722
} | class ____(APITestCase):
endpoint = "sentry-api-0-internal-project-config"
def setUp(self) -> None:
super().setUp()
self.owner = self.create_user(
email="example@example.com", is_superuser=False, is_staff=True, is_active=True
)
self.org = self.create_organization(owner=self.owner)
self.first_team = self.create_team(organization=self.org)
self.proj1 = self.create_project(
name="proj1", organization=self.org, teams=[self.first_team]
)
self.proj2 = self.create_project(
name="proj2", organization=self.org, teams=[self.first_team]
)
self.superuser = self.create_user(
"superuser@example.com", is_superuser=True, is_staff=True, is_active=True
)
self.path = "sentry-api-0-internal-project-config"
self.p1_pk = self.create_project_key(self.proj1)
self.p2_pk = self.create_project_key(self.proj2)
projectconfig_cache.backend.set_many(
{
self.p1_pk.public_key: {"proj1": "config"},
}
)
def get_url(self, proj_id: str | int | None = None, key: str | int | None = None) -> str:
query = {}
if proj_id is not None:
query["projectId"] = proj_id
if key is not None:
query["projectKey"] = key
query_string = parse.urlencode(query)
ret_val = reverse(self.path)
ret_val += f"?{query_string}"
return ret_val
def test_normal_users_do_not_have_access(self) -> None:
"""
Request denied for non super-users
"""
self.login_as(self.owner)
url = self.get_url(proj_id=self.proj1.id)
response = self.client.get(url)
assert response.status_code == status.HTTP_403_FORBIDDEN
def test_retrieving_project_configs(self) -> None:
"""
Asking for a project will return all project configs from all public
keys in redis
"""
self.login_as(self.superuser, superuser=True)
url = self.get_url(proj_id=self.proj1.id)
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
expected = {"configs": {self.p1_pk.public_key: {"proj1": "config"}}}
actual = response.json()
assert actual == expected
def test_retrieving_public_key_configs(self) -> None:
"""
Asking for a particular public key will return only the project config
for that public key
"""
self.login_as(self.superuser, superuser=True)
url = self.get_url(key=self.p1_pk.public_key)
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
expected = {"configs": {self.p1_pk.public_key: {"proj1": "config"}}}
actual = response.json()
assert actual == expected
def test_uncached_project(self) -> None:
"""
Asking for a project that was not cached in redis will return
an empty marker
"""
outdated = {"configs": {self.p2_pk.public_key: None}}
self.login_as(self.superuser, superuser=True)
url = self.get_url(proj_id=self.proj2.id)
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
actual = response.json()
assert actual != outdated
url = self.get_url(key=self.p2_pk.public_key)
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
actual = response.json()
assert actual != outdated
def test_inexistent_project(self) -> None:
"""
Asking for an inexistent project will return 404
"""
inexistent_project_id = 2 ^ 32
self.login_as(self.superuser, superuser=True)
url = self.get_url(proj_id=inexistent_project_id)
response = self.client.get(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_inexistent_key(self) -> None:
"""
Asking for an inexistent project key will return an empty result
"""
inexistent = 123
self.login_as(self.superuser, superuser=True)
url = self.get_url(key=inexistent)
response = self.client.get(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
def test_invalidate_project_config_unauthorized(self) -> None:
url = self.get_url()
data = {"projectId": self.project.id}
response = self.client.post(url, data=data)
assert response.status_code == 401
def test_invalidate_project_config_non_superuser(self) -> None:
url = self.get_url()
data = {"projectId": self.project.id}
self.login_as(self.user, superuser=False)
response = self.client.post(url, data=data)
assert response.status_code == 403
def test_invalidate_project_config_missing_project_id(self) -> None:
url = self.get_url()
self.login_as(self.superuser, superuser=True)
response = self.client.post(url)
assert response.status_code == 400
def test_invalidate_project_config_cached_project(self) -> None:
url = self.get_url()
self.login_as(self.superuser, superuser=True)
data = {"projectId": self.proj2.id}
projectconfig_cache.backend.set_many(
{
self.p2_pk.public_key: {"proj2": "config"},
}
)
response = self.client.post(url, data=data)
assert response.status_code == 201
def test_invalidate_project_config_cached_project_sets_correct_config(self) -> None:
url = self.get_url()
self.login_as(self.superuser, superuser=True)
data = {"projectId": self.proj2.id}
projectconfig_cache.backend.set_many(
{
self.p2_pk.public_key: {"proj2": "config"},
}
)
response = self.client.post(url, data=data)
assert response.status_code == 201
assert projectconfig_cache.backend.get(self.p2_pk.public_key) != {"proj2": "config"}
def test_invalidate_project_config_uncached_project(self) -> None:
url = self.get_url()
self.login_as(self.superuser, superuser=True)
data = {"projectId": self.proj1.id}
response = self.client.post(url, data=data)
assert response.status_code == 201
def test_invalidate_project_config_uncached_project_returns_correct_config(self) -> None:
url = self.get_url()
self.login_as(self.superuser, superuser=True)
data = {"projectId": self.proj1.id}
response = self.client.post(url, data=data)
assert response.status_code == 201
def test_invalidate_project_config_with_multiple_project_keys(self) -> None:
url = self.get_url()
self.login_as(self.superuser, superuser=True)
# Create new project with two keys
test_project = self.create_project(
name="test_proj", organization=self.org, teams=[self.first_team]
)
first_key = self.create_project_key(test_project)
second_key = test_project.key_set.create()
# Set configs for both keys
projectconfig_cache.backend.set_many(
{
first_key.public_key: {"test_proj": "config1"},
second_key.public_key: {"test_proj": "config2"},
}
)
data = {"projectId": test_project.id}
response = self.client.post(url, data=data)
assert response.status_code == 201
assert projectconfig_cache.backend.get(first_key.public_key) != {"test_proj": "config1"}
assert projectconfig_cache.backend.get(second_key.public_key) != {"test_proj": "config2"}
def test_get_caches_uncached_project_config(self) -> None:
"""
Tests that making a GET request for an uncached project results
in its configuration being cached.
"""
initial_cached_config = projectconfig_cache.backend.get(self.p2_pk.public_key)
assert initial_cached_config is None
self.login_as(self.superuser, superuser=True)
url = self.get_url(proj_id=self.proj2.id)
response = self.client.get(url)
assert response.status_code == status.HTTP_200_OK
final_cached_config = projectconfig_cache.backend.get(self.p2_pk.public_key)
assert final_cached_config is not None
| AdminRelayProjectConfigsEndpointTest |
python | encode__django-rest-framework | tests/schemas/test_coreapi.py | {
"start": 1582,
"end": 1640
} | class ____(serializers.Serializer):
pass
| EmptySerializer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.