language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pypa__hatch | tests/backend/builders/test_config.py | {
"start": 22919,
"end": 26353
} | class ____:
def test_default(self, isolation):
builder = MockBuilder(str(isolation))
assert builder.config.packages == builder.config.packages == []
def test_global_invalid_type(self, isolation):
config = {"tool": {"hatch": {"build": {"packages": ""}}}}
builder = MockBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match="Field `tool.hatch.build.packages` must be an array of strings"):
_ = builder.config.packages
def test_global(self, isolation):
config = {"tool": {"hatch": {"build": {"packages": ["src/foo"]}}}}
builder = MockBuilder(str(isolation), config=config)
assert len(builder.config.packages) == 1
assert builder.config.packages[0] == pjoin("src", "foo")
def test_global_package_not_string(self, isolation):
config = {"tool": {"hatch": {"build": {"packages": [0]}}}}
builder = MockBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match="Package #1 in field `tool.hatch.build.packages` must be a string"):
_ = builder.config.packages
def test_global_package_empty_string(self, isolation):
config = {"tool": {"hatch": {"build": {"packages": [""]}}}}
builder = MockBuilder(str(isolation), config=config)
with pytest.raises(
ValueError, match="Package #1 in field `tool.hatch.build.packages` cannot be an empty string"
):
_ = builder.config.packages
def test_target(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"packages": ["src/foo"]}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert len(builder.config.packages) == 1
assert builder.config.packages[0] == pjoin("src", "foo")
def test_target_package_not_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"packages": [0]}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(
TypeError, match="Package #1 in field `tool.hatch.build.targets.foo.packages` must be a string"
):
_ = builder.config.packages
def test_target_package_empty_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"foo": {"packages": [""]}}}}}}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
with pytest.raises(
ValueError, match="Package #1 in field `tool.hatch.build.targets.foo.packages` cannot be an empty string"
):
_ = builder.config.packages
def test_target_overrides_global(self, isolation):
config = {
"tool": {"hatch": {"build": {"packages": ["src/foo"], "targets": {"foo": {"packages": ["pkg/foo"]}}}}}
}
builder = MockBuilder(str(isolation), config=config)
builder.PLUGIN_NAME = "foo"
assert len(builder.config.packages) == 1
assert builder.config.packages[0] == pjoin("pkg", "foo")
def test_no_source(self, isolation):
config = {"tool": {"hatch": {"build": {"packages": ["foo"]}}}}
builder = MockBuilder(str(isolation), config=config)
assert len(builder.config.packages) == 1
assert builder.config.packages[0] == pjoin("foo")
| TestPackages |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarTuple15.py | {
"start": 268,
"end": 625
} | class ____(Generic[Unpack[Shape]]): ...
def func0(x: Array[Unpack[Shape]]) -> Array[Unpack[Shape]]: ...
def func1(y: Array[int, Unpack[tuple[Any, ...]]]):
reveal_type(func0(y), expected_text="Array[int, *tuple[Any, ...]]")
def func2(y: Array[Unpack[tuple[int, ...]], int]):
reveal_type(func0(y), expected_text="Array[*tuple[int, ...], int]")
| Array |
python | bokeh__bokeh | src/bokeh/models/sources.py | {
"start": 32125,
"end": 35177
} | class ____(WebDataSource):
''' A data source that can populate columns by making Ajax calls to REST
endpoints.
The ``AjaxDataSource`` can be especially useful if you want to make a
standalone document (i.e. not backed by the Bokeh server) that can still
dynamically update using an existing REST API.
The response from the REST API should match the ``.data`` property of a
standard ``ColumnDataSource``, i.e. a JSON dict that maps names to arrays
of values:
.. code-block:: python
{
'x' : [1, 2, 3, ...],
'y' : [9, 3, 2, ...]
}
Alternatively, if the REST API returns a different format, a ``CustomJS``
callback can be provided to convert the REST response into Bokeh format,
via the ``adapter`` property of this data source.
Initial data can be set by specifying the ``data`` property directly.
This is necessary when used in conjunction with a ``FactorRange``, even
if the columns in `data`` are empty.
A full example can be seen at :bokeh-tree:`examples/basic/data/ajax_source.py`
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
polling_interval = Nullable(Int, help="""
A polling interval (in milliseconds) for updating data source.
""")
method = Enum('POST', 'GET', help="""
Specify the HTTP method to use for the Ajax request (GET or POST)
""")
if_modified = Bool(False, help="""
Whether to include an ``If-Modified-Since`` header in Ajax requests
to the server. If this header is supported by the server, then only
new data since the last request will be returned.
""")
content_type = String(default='application/json', help="""
Set the "contentType" parameter for the Ajax request.
""")
http_headers = Dict(String, String, help="""
Specify HTTP headers to set for the Ajax request.
Example:
.. code-block:: python
ajax_source.headers = { 'x-my-custom-header': 'some value' }
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _check_slice(s: slice) -> None:
if (s.start is not None and s.stop is not None and s.start > s.stop):
raise ValueError(f"Patch slices must have start < end, got {s}")
if (s.start is not None and s.start < 0) or \
(s.stop is not None and s.stop < 0) or \
(s.step is not None and s.step < 0):
raise ValueError(f"Patch slices must have non-negative (start, stop, step) values, got {s}")
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| AjaxDataSource |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/widgets/toolbars.py | {
"start": 5954,
"end": 6546
} | class ____:
def __init__(self) -> None:
def get_formatted_text() -> StyleAndTextTuples:
arg = get_app().key_processor.arg or ""
if arg == "-":
arg = "-1"
return [
("class:arg-toolbar", "Repeat: "),
("class:arg-toolbar.text", arg),
]
self.window = Window(FormattedTextControl(get_formatted_text), height=1)
self.container = ConditionalContainer(content=self.window, filter=has_arg)
def __pt_container__(self) -> Container:
return self.container
| ArgToolbar |
python | crytic__slither | slither/detectors/statements/pyth_unchecked_publishtime.py | {
"start": 143,
"end": 1553
} | class ____(PythUnchecked):
"""
Documentation: This detector finds when the publishTime of a Pyth price is not checked
"""
ARGUMENT = "pyth-unchecked-publishtime"
HELP = "Detect when the publishTime of a Pyth price is not checked"
IMPACT = DetectorClassification.MEDIUM
CONFIDENCE = DetectorClassification.HIGH
WIKI = (
"https://github.com/crytic/slither/wiki/Detector-Documentation#pyth-unchecked-publishtime"
)
WIKI_TITLE = "Pyth unchecked publishTime"
WIKI_DESCRIPTION = "Detect when the publishTime of a Pyth price is not checked"
WIKI_RECOMMENDATION = "Check the publishTime of a Pyth price."
WIKI_EXPLOIT_SCENARIO = """
```solidity
import "@pythnetwork/pyth-sdk-solidity/IPyth.sol";
import "@pythnetwork/pyth-sdk-solidity/PythStructs.sol";
contract C {
IPyth pyth;
constructor(IPyth _pyth) {
pyth = _pyth;
}
function bad(bytes32 id) public {
PythStructs.Price memory price = pyth.getEmaPriceUnsafe(id);
// Use price
}
}
```
The function `A` uses the price without checking its `publishTime` coming from the `getEmaPriceUnsafe` function.
"""
PYTH_FUNCTIONS = [
"getEmaPrice",
# "getEmaPriceNoOlderThan",
"getEmaPriceUnsafe",
"getPrice",
# "getPriceNoOlderThan",
"getPriceUnsafe",
]
PYTH_FIELD = "publishTime"
| PythUncheckedPublishTime |
python | pallets__werkzeug | examples/cupoftee/network.py | {
"start": 408,
"end": 1921
} | class ____(Syncable):
def __init__(self, cup):
self.cup = cup
self.servers = cup.db.setdefault("servers", dict)
def _sync(self):
to_delete = set(self.servers)
for x in range(1, 17):
addr = (f"master{x}.teeworlds.com", 8300)
print(addr)
try:
self._sync_server_browser(addr, to_delete)
except (OSError, socket.timeout):
continue
for server_id in to_delete:
self.servers.pop(server_id, None)
if not self.servers:
raise OSError("no servers found")
self.cup.db.sync()
def _sync_server_browser(self, addr, to_delete):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(5)
s.sendto(b"\x20\x00\x00\x00\x00\x48\xff\xff\xff\xffreqt", addr)
data = s.recvfrom(1024)[0][14:]
s.close()
for n in range(0, len(data) // 6):
addr = (
".".join(map(str, map(ord, data[n * 6 : n * 6 + 4]))),
ord(data[n * 6 + 5]) * 256 + ord(data[n * 6 + 4]),
)
server_id = f"{addr[0]}:{addr[1]}"
if server_id in self.servers:
if not self.servers[server_id].sync():
continue
else:
try:
self.servers[server_id] = Server(addr, server_id)
except ServerError:
pass
to_delete.discard(server_id)
| ServerBrowser |
python | redis__redis-py | redis/asyncio/multidb/database.py | {
"start": 1083,
"end": 1835
} | class ____(BaseDatabase, AsyncDatabase):
def __init__(
self,
client: Union[Redis, RedisCluster],
circuit: CircuitBreaker,
weight: float,
health_check_url: Optional[str] = None,
):
self._client = client
self._cb = circuit
self._cb.database = self
super().__init__(weight, health_check_url)
@property
def client(self) -> Union[Redis, RedisCluster]:
return self._client
@client.setter
def client(self, client: Union[Redis, RedisCluster]):
self._client = client
@property
def circuit(self) -> CircuitBreaker:
return self._cb
@circuit.setter
def circuit(self, circuit: CircuitBreaker):
self._cb = circuit
| Database |
python | PrefectHQ__prefect | src/prefect/flows.py | {
"start": 3568,
"end": 4232
} | class ____(Protocol, Generic[P, R]):
"""
A callable that is invoked when a flow enters a given state.
"""
__name__: str
def __call__(
self, flow: Flow[P, R], flow_run: FlowRun, state: State
) -> Awaitable[None] | None: ...
if TYPE_CHECKING:
import logging
from prefect.client.orchestration import PrefectClient
from prefect.client.schemas.objects import FlowRun
from prefect.client.types.flexible_schedule_list import FlexibleScheduleList
from prefect.deployments.runner import RunnerDeployment
from prefect.runner.storage import RunnerStorage
logger: "logging.Logger" = get_logger("flows")
| FlowStateHook |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-firebolt/destination_firebolt/writer.py | {
"start": 6660,
"end": 8421
} | class ____(FireboltWriter):
"""
Data writer using the SQL writing strategy. Data is buffered in memory
and flushed using INSERT INTO SQL statement. This is less effective strategy
better suited for testing and small data sets.
"""
flush_interval = 1000
def __init__(self, connection: Connection) -> None:
"""
:param connection: Firebolt SDK connection class with established connection
to the databse.
"""
super().__init__(connection)
def _flush(self) -> None:
"""
Intermediate data flush that's triggered during the
buffering operation. Writes data stored in memory via SQL commands.
"""
cursor = self.connection.cursor()
# id, written_at, data
for table, data in self._buffer.items():
cursor.executemany(f"INSERT INTO _airbyte_raw_{table} VALUES (?, ?, ?)", parameters_seq=data)
self._buffer.clear()
self._values = 0
def flush(self) -> None:
"""
Final data flush after all data has been written to memory.
"""
self._flush()
def create_firebolt_wirter(connection: Connection, config: json, logger: logging.Logger) -> FireboltWriter:
if config["loading_method"]["method"] == "S3":
logger.info("Using the S3 writing strategy")
writer = FireboltS3Writer(
connection,
config["loading_method"]["s3_bucket"],
config["loading_method"]["aws_key_id"],
config["loading_method"]["aws_key_secret"],
config["loading_method"]["s3_region"],
)
else:
logger.info("Using the SQL writing strategy")
writer = FireboltSQLWriter(connection)
return writer
| FireboltSQLWriter |
python | django-haystack__django-haystack | haystack/apps.py | {
"start": 173,
"end": 963
} | class ____(AppConfig):
name = "haystack"
signal_processor = None
stream = None
def ready(self):
# Setup default logging.
log = logging.getLogger("haystack")
self.stream = logging.StreamHandler()
self.stream.setLevel(logging.INFO)
log.addHandler(self.stream)
# Setup the signal processor.
if not self.signal_processor:
signal_processor_path = getattr(
settings,
"HAYSTACK_SIGNAL_PROCESSOR",
"haystack.signals.BaseSignalProcessor",
)
signal_processor_class = loading.import_class(signal_processor_path)
self.signal_processor = signal_processor_class(
connections, connection_router
)
| HaystackConfig |
python | google__pytype | pytype/errors/error_printer.py | {
"start": 567,
"end": 661
} | class ____(enum.Enum):
OBJECT = 0
SYMBOL = 1
MODULE = 2
@dataclasses.dataclass
| BadAttrType |
python | huggingface__transformers | src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py | {
"start": 64144,
"end": 69367
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: BigBirdPegasusConfig, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BigBirdPegasusDecoderAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
bias=config.use_bias,
config=config,
layer_idx=layer_idx,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = BigBirdPegasusDecoderAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
bias=config.use_bias,
config=config,
layer_idx=layer_idx,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer.forward
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
past_key_values (`Cache`): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
cache in the correct position and to infer the complete sequence length.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Cross-Attention Block
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
# Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->BigBirdPegasus
| BigBirdPegasusDecoderLayer |
python | pydata__xarray | xarray/tests/test_dataarray.py | {
"start": 186378,
"end": 203570
} | class ____(TestReduce):
def test_min(
self,
x: np.ndarray,
minindex: int | float,
maxindex: int | float,
nanindex: int | None,
) -> None:
ar = xr.DataArray(
x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs
)
if np.isnan(minindex):
minindex = 0
expected0 = ar.isel(x=minindex, drop=True)
result0 = ar.min(keep_attrs=True)
assert_identical(result0, expected0)
# Default keeps attrs for reduction operations
result1 = ar.min()
expected1 = expected0.copy()
assert_identical(result1, expected1)
result2 = ar.min(skipna=False)
if nanindex is not None and ar.dtype.kind != "O":
expected2 = ar.isel(x=nanindex, drop=True)
else:
expected2 = expected1
assert_identical(result2, expected2)
# Test explicitly dropping attrs
result3 = ar.min(keep_attrs=False)
expected3 = expected0.copy()
expected3.attrs = {}
assert_identical(result3, expected3)
def test_max(
self,
x: np.ndarray,
minindex: int | float,
maxindex: int | float,
nanindex: int | None,
) -> None:
ar = xr.DataArray(
x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs
)
if np.isnan(minindex):
maxindex = 0
expected0 = ar.isel(x=maxindex, drop=True)
result0 = ar.max(keep_attrs=True)
assert_identical(result0, expected0)
# Default keeps attrs for reduction operations
result1 = ar.max()
expected1 = expected0.copy()
assert_identical(result1, expected1)
result2 = ar.max(skipna=False)
if nanindex is not None and ar.dtype.kind != "O":
expected2 = ar.isel(x=nanindex, drop=True)
else:
expected2 = expected1
assert_identical(result2, expected2)
# Test explicitly dropping attrs
result3 = ar.max(keep_attrs=False)
expected3 = expected0.copy()
expected3.attrs = {}
assert_identical(result3, expected3)
@pytest.mark.filterwarnings(
"ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning"
)
def test_argmin(
self,
x: np.ndarray,
minindex: int | float,
maxindex: int | float,
nanindex: int | None,
) -> None:
ar = xr.DataArray(
x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs
)
indarr = xr.DataArray(np.arange(x.size, dtype=np.intp), dims=["x"])
if np.isnan(minindex):
with pytest.raises(ValueError):
ar.argmin()
return
expected0 = indarr[minindex]
expected0.attrs = self.attrs # argmin should preserve attrs from input
result0 = ar.argmin()
assert_identical(result0, expected0)
result1 = ar.argmin(keep_attrs=True)
expected1 = expected0.copy()
expected1.attrs = self.attrs
assert_identical(result1, expected1)
result2 = ar.argmin(skipna=False)
if nanindex is not None and ar.dtype.kind != "O":
expected2 = indarr.isel(x=nanindex, drop=True)
expected2.attrs = self.attrs # Default keeps attrs for reduction operations
else:
expected2 = expected0
assert_identical(result2, expected2)
@pytest.mark.filterwarnings(
"ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning"
)
def test_argmax(
self,
x: np.ndarray,
minindex: int | float,
maxindex: int | float,
nanindex: int | None,
) -> None:
ar = xr.DataArray(
x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs
)
indarr = xr.DataArray(np.arange(x.size, dtype=np.intp), dims=["x"])
if np.isnan(maxindex):
with pytest.raises(ValueError):
ar.argmax()
return
expected0 = indarr[maxindex]
expected0.attrs = self.attrs # Default keeps attrs for reduction operations
result0 = ar.argmax()
assert_identical(result0, expected0)
result1 = ar.argmax(keep_attrs=True)
expected1 = expected0.copy()
expected1.attrs = self.attrs
assert_identical(result1, expected1)
result2 = ar.argmax(skipna=False)
if nanindex is not None and ar.dtype.kind != "O":
expected2 = indarr.isel(x=nanindex, drop=True)
expected2.attrs = self.attrs # Default keeps attrs for reduction operations
else:
expected2 = expected0
assert_identical(result2, expected2)
@pytest.mark.parametrize(
"use_dask",
[
pytest.param(
True, marks=pytest.mark.skipif(not has_dask, reason="no dask")
),
False,
],
)
def test_idxmin(
self,
x: np.ndarray,
minindex: int | float,
maxindex: int | float,
nanindex: int | None,
use_dask: bool,
) -> None:
ar0_raw = xr.DataArray(
x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs
)
if use_dask:
ar0 = ar0_raw.chunk()
else:
ar0 = ar0_raw
with pytest.raises(
KeyError,
match=r"'spam' not found in array dimensions",
):
ar0.idxmin(dim="spam")
# Scalar Dataarray
with pytest.raises(ValueError):
xr.DataArray(5).idxmin()
coordarr0 = xr.DataArray(ar0.coords["x"].data, dims=["x"])
coordarr1 = coordarr0.copy()
hasna = np.isnan(minindex)
if np.isnan(minindex):
minindex = 0
if hasna:
coordarr1[...] = 1
fill_value_0 = np.nan
else:
fill_value_0 = 1
expected0 = (
(coordarr1 * fill_value_0).isel(x=minindex, drop=True).astype("float")
)
expected0.name = "x"
expected0.attrs = self.attrs # Default keeps attrs for reduction operations
# Default fill value (NaN)
result0 = ar0.idxmin()
assert_identical(result0, expected0)
# Manually specify NaN fill_value
result1 = ar0.idxmin(fill_value=np.nan)
assert_identical(result1, expected0)
# keep_attrs
result2 = ar0.idxmin(keep_attrs=True)
expected2 = expected0.copy()
expected2.attrs = self.attrs
assert_identical(result2, expected2)
# skipna=False
if nanindex is not None and ar0.dtype.kind != "O":
expected3 = coordarr0.isel(x=nanindex, drop=True).astype("float")
expected3.name = "x"
expected3.attrs = self.attrs # Default keeps attrs for reduction operations
else:
expected3 = expected0.copy()
result3 = ar0.idxmin(skipna=False)
assert_identical(result3, expected3)
# fill_value should be ignored with skipna=False
result4 = ar0.idxmin(skipna=False, fill_value=-100j)
assert_identical(result4, expected3)
# Float fill_value
if hasna:
fill_value_5 = -1.1
else:
fill_value_5 = 1
expected5 = (coordarr1 * fill_value_5).isel(x=minindex, drop=True)
expected5.name = "x"
expected5.attrs = self.attrs # Default keeps attrs for reduction operations
result5 = ar0.idxmin(fill_value=-1.1)
assert_identical(result5, expected5)
# Integer fill_value
if hasna:
fill_value_6 = -1
else:
fill_value_6 = 1
expected6 = (coordarr1 * fill_value_6).isel(x=minindex, drop=True)
expected6.name = "x"
expected6.attrs = self.attrs # Default keeps attrs for reduction operations
result6 = ar0.idxmin(fill_value=-1)
assert_identical(result6, expected6)
# Complex fill_value
if hasna:
fill_value_7 = -1j
else:
fill_value_7 = 1
expected7 = (coordarr1 * fill_value_7).isel(x=minindex, drop=True)
expected7.name = "x"
expected7.attrs = self.attrs # Default keeps attrs for reduction operations
result7 = ar0.idxmin(fill_value=-1j)
assert_identical(result7, expected7)
@pytest.mark.parametrize("use_dask", [True, False])
def test_idxmax(
self,
x: np.ndarray,
minindex: int | float,
maxindex: int | float,
nanindex: int | None,
use_dask: bool,
) -> None:
if use_dask and not has_dask:
pytest.skip("requires dask")
if use_dask and x.dtype.kind == "M":
pytest.xfail("dask operation 'argmax' breaks when dtype is datetime64 (M)")
ar0_raw = xr.DataArray(
x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs
)
if use_dask:
ar0 = ar0_raw.chunk({})
else:
ar0 = ar0_raw
with pytest.raises(
KeyError,
match=r"'spam' not found in array dimensions",
):
ar0.idxmax(dim="spam")
# Scalar Dataarray
with pytest.raises(ValueError):
xr.DataArray(5).idxmax()
coordarr0 = xr.DataArray(ar0.coords["x"].data, dims=["x"])
coordarr1 = coordarr0.copy()
hasna = np.isnan(maxindex)
if np.isnan(maxindex):
maxindex = 0
if hasna:
coordarr1[...] = 1
fill_value_0 = np.nan
else:
fill_value_0 = 1
expected0 = (
(coordarr1 * fill_value_0).isel(x=maxindex, drop=True).astype("float")
)
expected0.name = "x"
expected0.attrs = self.attrs # Default keeps attrs for reduction operations
# Default fill value (NaN)
result0 = ar0.idxmax()
assert_identical(result0, expected0)
# Manually specify NaN fill_value
result1 = ar0.idxmax(fill_value=np.nan)
assert_identical(result1, expected0)
# keep_attrs
result2 = ar0.idxmax(keep_attrs=True)
expected2 = expected0.copy()
expected2.attrs = self.attrs
assert_identical(result2, expected2)
# skipna=False
if nanindex is not None and ar0.dtype.kind != "O":
expected3 = coordarr0.isel(x=nanindex, drop=True).astype("float")
expected3.name = "x"
expected3.attrs = self.attrs # Default keeps attrs for reduction operations
else:
expected3 = expected0.copy()
result3 = ar0.idxmax(skipna=False)
assert_identical(result3, expected3)
# fill_value should be ignored with skipna=False
result4 = ar0.idxmax(skipna=False, fill_value=-100j)
assert_identical(result4, expected3)
# Float fill_value
if hasna:
fill_value_5 = -1.1
else:
fill_value_5 = 1
expected5 = (coordarr1 * fill_value_5).isel(x=maxindex, drop=True)
expected5.name = "x"
expected5.attrs = self.attrs # Default keeps attrs for reduction operations
result5 = ar0.idxmax(fill_value=-1.1)
assert_identical(result5, expected5)
# Integer fill_value
if hasna:
fill_value_6 = -1
else:
fill_value_6 = 1
expected6 = (coordarr1 * fill_value_6).isel(x=maxindex, drop=True)
expected6.name = "x"
expected6.attrs = self.attrs # Default keeps attrs for reduction operations
result6 = ar0.idxmax(fill_value=-1)
assert_identical(result6, expected6)
# Complex fill_value
if hasna:
fill_value_7 = -1j
else:
fill_value_7 = 1
expected7 = (coordarr1 * fill_value_7).isel(x=maxindex, drop=True)
expected7.name = "x"
expected7.attrs = self.attrs # Default keeps attrs for reduction operations
result7 = ar0.idxmax(fill_value=-1j)
assert_identical(result7, expected7)
@pytest.mark.filterwarnings(
"ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning"
)
def test_argmin_dim(
self,
x: np.ndarray,
minindex: int | float,
maxindex: int | float,
nanindex: int | None,
) -> None:
ar = xr.DataArray(
x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs
)
indarr = xr.DataArray(np.arange(x.size, dtype=np.intp), dims=["x"])
if np.isnan(minindex):
with pytest.raises(ValueError):
ar.argmin()
return
expected0 = {"x": indarr[minindex]}
for da in expected0.values():
da.attrs = self.attrs # Default keeps attrs for reduction operations
result0 = ar.argmin(...)
for key in expected0:
assert_identical(result0[key], expected0[key])
result1 = ar.argmin(..., keep_attrs=True)
expected1 = deepcopy(expected0)
for da in expected1.values():
da.attrs = self.attrs
for key in expected1:
assert_identical(result1[key], expected1[key])
result2 = ar.argmin(..., skipna=False)
if nanindex is not None and ar.dtype.kind != "O":
expected2 = {"x": indarr.isel(x=nanindex, drop=True)}
expected2[
"x"
].attrs = self.attrs # Default keeps attrs for reduction operations
else:
expected2 = expected0
for key in expected2:
assert_identical(result2[key], expected2[key])
@pytest.mark.filterwarnings(
"ignore:Behaviour of argmin/argmax with neither dim nor :DeprecationWarning"
)
def test_argmax_dim(
self,
x: np.ndarray,
minindex: int | float,
maxindex: int | float,
nanindex: int | None,
) -> None:
ar = xr.DataArray(
x, dims=["x"], coords={"x": np.arange(x.size) * 4}, attrs=self.attrs
)
indarr = xr.DataArray(np.arange(x.size, dtype=np.intp), dims=["x"])
if np.isnan(maxindex):
with pytest.raises(ValueError):
ar.argmax()
return
expected0 = {"x": indarr[maxindex]}
for da in expected0.values():
da.attrs = self.attrs # Default keeps attrs for reduction operations
result0 = ar.argmax(...)
for key in expected0:
assert_identical(result0[key], expected0[key])
result1 = ar.argmax(..., keep_attrs=True)
expected1 = deepcopy(expected0)
for da in expected1.values():
da.attrs = self.attrs
for key in expected1:
assert_identical(result1[key], expected1[key])
result2 = ar.argmax(..., skipna=False)
if nanindex is not None and ar.dtype.kind != "O":
expected2 = {"x": indarr.isel(x=nanindex, drop=True)}
expected2[
"x"
].attrs = self.attrs # Default keeps attrs for reduction operations
else:
expected2 = expected0
for key in expected2:
assert_identical(result2[key], expected2[key])
@pytest.mark.parametrize(
["x", "minindex", "maxindex", "nanindex"],
[
pytest.param(
np.array(
[
[0, 1, 2, 0, -2, -4, 2],
[1, 1, 1, 1, 1, 1, 1],
[0, 0, -10, 5, 20, 0, 0],
]
),
[5, 0, 2],
[2, 0, 4],
[None, None, None],
id="int",
),
pytest.param(
np.array(
[
[2.0, 1.0, 2.0, 0.0, -2.0, -4.0, 2.0],
[-4.0, np.nan, 2.0, np.nan, -2.0, -4.0, 2.0],
[np.nan] * 7,
]
),
[5, 0, np.nan],
[0, 2, np.nan],
[None, 1, 0],
id="nan",
),
pytest.param(
np.array(
[
[2.0, 1.0, 2.0, 0.0, -2.0, -4.0, 2.0],
[-4.0, np.nan, 2.0, np.nan, -2.0, -4.0, 2.0],
[np.nan] * 7,
]
).astype("object"),
[5, 0, np.nan],
[0, 2, np.nan],
[None, 1, 0],
marks=pytest.mark.filterwarnings(
"ignore:invalid value encountered in reduce:RuntimeWarning:"
),
id="obj",
),
pytest.param(
np.array(
[
["2015-12-31", "2020-01-02", "2020-01-01", "2016-01-01"],
["2020-01-02", "2020-01-02", "2020-01-02", "2020-01-02"],
["1900-01-01", "1-02-03", "1900-01-02", "1-02-03"],
],
dtype="datetime64[ns]",
),
[0, 0, 1],
[1, 0, 2],
[None, None, None],
id="datetime",
),
],
)
| TestReduce1D |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 94127,
"end": 95388
} | class ____(Request):
"""
Convert public models to private
:param ids: Ids of the models to convert. Only the models originated by the
company can be converted
:type ids: Sequence[str]
"""
_service = "models"
_action = "make_private"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Ids of the models to convert. Only the models originated by the company can be converted",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
super(MakePrivateRequest, self).__init__(**kwargs)
self.ids = ids
@schema_property("ids")
def ids(self) -> Optional[List[str]]:
return self._property_ids
@ids.setter
def ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
| MakePrivateRequest |
python | xlwings__xlwings | xlwings/pro/_xlremote.py | {
"start": 25381,
"end": 26486
} | class ____(base_classes.Collection):
def __init__(self, parent):
self._parent = parent
self._api = parent.api[self._attr]
@property
def api(self):
return self._api
@property
def parent(self):
return self._parent
def __call__(self, key):
if isinstance(key, numbers.Number):
if key > len(self):
raise KeyError(key)
else:
return self._wrap(self.parent, key)
else:
for ix, i in enumerate(self.api):
if i["name"] == key:
return self._wrap(self.parent, ix + 1)
raise KeyError(key)
def __len__(self):
return len(self.api)
def __iter__(self):
for ix, api in enumerate(self.api):
yield self._wrap(self._parent, ix + 1)
def __contains__(self, key):
if isinstance(key, numbers.Number):
return 1 <= key <= len(self)
else:
for i in self.api:
if i["name"] == key:
return True
return False
| Collection |
python | scipy__scipy | scipy/stats/tests/test_morestats.py | {
"start": 2803,
"end": 3924
} | class ____:
def test_basic(self):
# Expected values in this test simply taken from the function. For
# some checks regarding correctness of implementation, see review in
# gh-674
data = [6, 9, 12, 7, 8, 8, 13]
mean, var, std = stats.bayes_mvs(data)
assert_almost_equal(mean.statistic, 9.0)
assert_allclose(mean.minmax, (7.103650222492964, 10.896349777507034),
rtol=1e-6)
assert_almost_equal(var.statistic, 10.0)
assert_allclose(var.minmax, (3.1767242068607087, 24.45910381334018),
rtol=1e-09)
assert_almost_equal(std.statistic, 2.9724954732045084, decimal=14)
assert_allclose(std.minmax, (1.7823367265645145, 4.9456146050146312),
rtol=1e-14)
def test_empty_input(self):
assert_raises(ValueError, stats.bayes_mvs, [])
def test_result_attributes(self):
x = np.arange(15)
attributes = ('statistic', 'minmax')
res = stats.bayes_mvs(x)
for i in res:
check_named_results(i, attributes)
| TestBayes_mvs |
python | huggingface__transformers | tests/models/blip/test_modeling_blip.py | {
"start": 33652,
"end": 38657
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (BlipForConditionalGeneration,) if is_torch_available() else ()
# Doesn't run generation tests due to custom generation logic -- wont fix
all_generative_model_classes = ()
test_resize_embeddings = True
test_attention_outputs = False
def setUp(self):
self.model_tester = BlipTextImageModelsModelTester(self)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Inputs_embeds is tested in individual model tests")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="BlipModel does not have input/output embeddings")
def test_model_get_set_embeddings(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
expected_arg_names.extend(["encoder_outputs"])
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["input_ids"] if model_class != BlipForConditionalGeneration else ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_training(self):
if not self.model_tester.is_training:
self.skipTest(reason="ModelTester is not setup for training")
for model_class in self.all_model_classes[:-1]:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
# hardcode labels to be the same as input_ids
inputs["labels"] = inputs["input_ids"]
loss = model(**inputs).loss
loss.backward()
def test_training_gradient_checkpointing(self):
if not self.model_tester.is_training:
self.skipTest(reason="ModelTester is not setup for training")
for model_class in self.all_model_classes[:-1]:
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.use_cache = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.gradient_checkpointing_enable()
model.train()
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
# hardcode labels to be the same as input_ids
inputs["labels"] = inputs["input_ids"]
loss = model(**inputs).loss
loss.backward()
def test_load_vision_text_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Save BlipConfig and check if we can load BlipVisionConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
vision_config = BlipVisionConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict())
# Save BlipConfig and check if we can load BlipTextConfig from it
with tempfile.TemporaryDirectory() as tmp_dir_name:
config.save_pretrained(tmp_dir_name)
text_config = BlipTextConfig.from_pretrained(tmp_dir_name)
self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict())
@slow
def test_model_from_pretrained(self):
model_name = "Salesforce/blip-vqa-base"
model = BlipModel.from_pretrained(model_name)
self.assertIsNotNone(model)
# We will verify our results on an image of cute cats
def prepare_img():
url = "https://huggingface.co/hf-internal-testing/blip-test-image/resolve/main/demo.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@require_vision
@require_torch
@slow
| BlipTextImageModelTest |
python | astropy__astropy | astropy/io/fits/tests/test_fitsheader.py | {
"start": 208,
"end": 8211
} | class ____(FitsTestCase):
def test_help(self):
with pytest.raises(SystemExit) as e:
fitsheader.main(["-h"])
assert e.value.code == 0
def test_version(self, capsys):
with pytest.raises(SystemExit) as e:
fitsheader.main(["--version"])
out = capsys.readouterr()[0]
assert out == f"fitsheader {version}"
assert e.value.code == 0
def test_file_exists(self, capsys):
fitsheader.main([self.data("arange.fits")])
out, err = capsys.readouterr()
assert out.splitlines()[1].startswith(
"SIMPLE = T / conforms to FITS standard"
)
assert err == ""
def test_by_keyword(self, capsys):
fitsheader.main(["-k", "NAXIS", self.data("arange.fits")])
out, err = capsys.readouterr()
assert out.splitlines()[1].startswith(
"NAXIS = 3 / number of array dimensions"
)
fitsheader.main(["-k", "NAXIS*", self.data("arange.fits")])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 5
assert out[1].startswith("NAXIS")
assert out[2].startswith("NAXIS1")
assert out[3].startswith("NAXIS2")
assert out[4].startswith("NAXIS3")
fitsheader.main(["-k", "RANDOMKEY", self.data("arange.fits")])
out, err = capsys.readouterr()
assert err.startswith("WARNING") and "RANDOMKEY" in err
assert not err.startswith("ERROR")
def test_by_extension(self, capsys):
fitsheader.main(["-e", "1", self.data("test0.fits")])
out, err = capsys.readouterr()
assert len(out.splitlines()) == 62
fitsheader.main(["-e", "3", "-k", "BACKGRND", self.data("test0.fits")])
out, err = capsys.readouterr()
assert out.splitlines()[1].startswith("BACKGRND= 312.")
fitsheader.main(["-e", "0", "-k", "BACKGRND", self.data("test0.fits")])
out, err = capsys.readouterr()
assert err.startswith("WARNING")
fitsheader.main(["-e", "3", "-k", "FOO", self.data("test0.fits")])
out, err = capsys.readouterr()
assert err.startswith("WARNING")
def test_table(self, capsys):
fitsheader.main(["-t", "-k", "BACKGRND", self.data("test0.fits")])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 5
assert out[1].endswith("| 1 | BACKGRND | 316.0 |")
assert out[2].endswith("| 2 | BACKGRND | 351.0 |")
assert out[3].endswith("| 3 | BACKGRND | 312.0 |")
assert out[4].endswith("| 4 | BACKGRND | 323.0 |")
fitsheader.main(
[
"-t",
"-e",
"0",
"-k",
"NAXIS",
self.data("arange.fits"),
self.data("ascii.fits"),
self.data("blank.fits"),
]
)
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[1].endswith("| 0 | NAXIS | 3 |")
assert out[2].endswith("| 0 | NAXIS | 0 |")
assert out[3].endswith("| 0 | NAXIS | 2 |")
def test_fitsort(self, capsys):
fitsheader.main(
[
"-e",
"0",
"-f",
"-k",
"EXPSTART",
"-k",
"EXPTIME",
self.data("test0.fits"),
self.data("test1.fits"),
]
)
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[2].endswith("test0.fits 49491.65366175 0.23")
assert out[3].endswith("test1.fits 49492.65366175 0.22")
fitsheader.main(
[
"-e",
"0",
"-f",
"-k",
"EXPSTART",
"-k",
"EXPTIME",
self.data("test0.fits"),
]
)
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 3
assert out[2].endswith("test0.fits 49491.65366175 0.23")
fitsheader.main(
["-f", "-k", "NAXIS", self.data("tdim.fits"), self.data("test1.fits")]
)
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[0].endswith("0:NAXIS 1:NAXIS 2:NAXIS 3:NAXIS 4:NAXIS")
assert out[2].endswith("tdim.fits 0 2 -- -- --")
assert out[3].endswith("test1.fits 0 2 2 2 2")
# check that files without required keyword are present
fitsheader.main(
["-f", "-k", "DATE-OBS", self.data("table.fits"), self.data("test0.fits")]
)
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[2].endswith("table.fits --")
assert out[3].endswith("test0.fits 19/05/94")
# check that COMMENT and HISTORY are excluded
fitsheader.main(["-e", "0", "-f", self.data("tb.fits")])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 3
assert out[2].endswith(
"tb.fits True 16 0 True STScI-STSDAS/TABLES tb.fits 1"
)
def test_fitsort_sorting_keyword_fitsort(self, capsys):
"""check that sorting by keyword works"""
fitsheader.main(
[
"-f",
"-k",
"NAXIS",
"-e",
"0",
self.data("group.fits"),
self.data("test0.fits"),
]
)
out_unsorted, err_unsorted = capsys.readouterr()
out_unsorted = out_unsorted.splitlines()
fitsheader.main(
[
"-f",
"-s",
"NAXIS",
"-k",
"NAXIS",
"-e",
"0",
self.data("group.fits"),
self.data("test0.fits"),
]
)
out_sorted, err_sorted = capsys.readouterr()
out_sorted = out_sorted.splitlines()
assert len(out_unsorted) == 4
assert out_unsorted[2].endswith("group.fits 5")
assert out_unsorted[3].endswith("test0.fits 0")
assert len(out_sorted) == 4
assert out_sorted[2].endswith("test0.fits 0")
assert out_sorted[3].endswith("group.fits 5")
def test_fitsort_sorting_keyword_complains(self, capsys):
with pytest.raises(SystemExit):
fitsheader.main(
["-t", "-s", "DUMMY", self.data("group.fits"), self.data("test0.fits")]
)
out_table, err_table = capsys.readouterr()
assert "only supported in conjunction with -f/--fitsort" in err_table
with pytest.raises(SystemExit):
fitsheader.main(
["-s", "DUMMY", self.data("group.fits"), self.data("test0.fits")]
)
out_default, err_default = capsys.readouterr()
assert "only supported in conjunction with -f/--fitsort" in err_default
def test_dotkeyword(self, capsys):
fitsheader.main(["-e", "0", "-k", "ESO DET ID", self.data("fixed-1890.fits")])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 2
assert (
out[1].strip().endswith("HIERARCH ESO DET ID = 'DV13' / Detector system Id")
)
fitsheader.main(["-e", "0", "-k", "ESO.DET.ID", self.data("fixed-1890.fits")])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 2
assert (
out[1].strip().endswith("HIERARCH ESO DET ID = 'DV13' / Detector system Id")
)
| TestFITSheader_script |
python | ansible__ansible | lib/ansible/module_utils/_internal/_patches/__init__.py | {
"start": 397,
"end": 2565
} | class ____(abc.ABC):
"""Base class for patches that provides abstractions for validation of broken behavior, installation of patches, and validation of fixed behavior."""
target_container: t.ClassVar
"""The module object containing the function to be patched."""
target_attribute: t.ClassVar[str]
"""The attribute name on the target module to patch."""
unpatched_implementation: t.ClassVar[t.Callable]
"""The unpatched implementation. Available only after the patch has been applied."""
@classmethod
@abc.abstractmethod
def is_patch_needed(cls) -> bool:
"""Returns True if the patch is currently needed. Returns False if the original target does not need the patch or the patch has already been applied."""
@abc.abstractmethod
def __call__(self, *args, **kwargs) -> t.Any:
"""Invoke the patched or original implementation, depending on whether the patch has been applied or not."""
@classmethod
def is_patched(cls) -> bool:
"""Returns True if the patch has been applied, otherwise returns False."""
return isinstance(cls.get_current_implementation(), PatchedTarget) # using a protocol lets us be more resilient to module unload weirdness
@classmethod
def get_current_implementation(cls) -> t.Any:
"""Get the current (possibly patched) implementation from the patch target container."""
return getattr(cls.target_container, cls.target_attribute)
@classmethod
def patch(cls) -> None:
"""Idempotently apply this patch (if needed)."""
if cls.is_patched():
return
cls.unpatched_implementation = cls.get_current_implementation()
if not cls.is_patch_needed():
return
# __call__ requires an instance (otherwise it'll be __new__)
setattr(cls.target_container, cls.target_attribute, cls())
if not cls.is_patch_needed():
return
setattr(cls.target_container, cls.target_attribute, cls.unpatched_implementation)
raise RuntimeError(f"Validation of '{cls.target_container.__name__}.{cls.target_attribute}' failed after patching.")
| CallablePatch |
python | tensorflow__tensorflow | tensorflow/tools/docs/tf_doctest.py | {
"start": 4593,
"end": 6851
} | class ____(tf.test.TestCase):
def set_up(self, test):
# Enable soft device placement to run distributed doctests.
tf.config.set_soft_device_placement(True)
self.setUp()
context.async_wait()
def tear_down(self, test):
self.tearDown()
def load_tests(unused_loader, tests, unused_ignore):
"""Loads all the tests in the docstrings and runs them."""
tf_modules = find_modules()
if FLAGS.module:
tf_modules = filter_on_submodules(tf_modules, FLAGS.module)
if FLAGS.list:
print('**************************************************')
for mod in tf_modules:
print(mod.__name__)
print('**************************************************')
return tests
test_shard_index = int(os.environ.get('TEST_SHARD_INDEX', '0'))
total_test_shards = int(os.environ.get('TEST_TOTAL_SHARDS', '1'))
tf_modules = sorted(tf_modules, key=lambda mod: mod.__name__)
for n, module in enumerate(tf_modules):
if (n % total_test_shards) != test_shard_index:
continue
# If I break the loop comprehension, then the test times out in `small`
# size.
if any(
module.__name__.startswith(package + prefix) # pylint: disable=g-complex-comprehension
for prefix in FLAGS.module_prefix_skip for package in PACKAGES):
continue
testcase = TfTestCase()
tests.addTests(
doctest.DocTestSuite(
module,
test_finder=doctest.DocTestFinder(exclude_empty=False),
extraglobs={
'tf': tf,
'np': np,
'os': os
},
setUp=testcase.set_up,
tearDown=testcase.tear_down,
checker=tf_doctest_lib.TfDoctestOutputChecker(),
optionflags=(doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
| doctest.IGNORE_EXCEPTION_DETAIL
| doctest.DONT_ACCEPT_BLANKLINE),
))
return tests
# We can only create logical devices before initializing Tensorflow. This is
# called by unittest framework before running any test.
# https://docs.python.org/3/library/unittest.html#setupmodule-and-teardownmodule
def setUpModule():
setup_gpu(FLAGS.required_gpus)
if __name__ == '__main__':
absltest.main()
| TfTestCase |
python | getsentry__sentry | tests/sentry/runner/commands/test_migrations.py | {
"start": 427,
"end": 3229
} | class ____(TransactionTestCase):
command = migrations
# Copy paste from CliTest as this test needs to escape auto transactions
@property
def runner(self) -> CliRunner:
return CliRunner()
def invoke(self, *args, **kwargs):
return self.runner.invoke(self.command, args, obj={}, **kwargs)
def test_index_creation(self) -> None:
with override_settings(
INSTALLED_APPS=("fixtures.safe_migrations_apps.migration_test_app",),
MIGRATION_MODULES={},
):
result = self.invoke("run", "migration_test_app", "0001")
assert result.exit_code == 0, result.output
assert "Running post-deployment migration for default" in result.output
assert "Running post-deployment migration for control" in result.output
assert "Migration complete" in result.output
connection = connections["default"]
queries = [q["sql"] for q in connection.queries]
expected = 'CREATE INDEX CONCURRENTLY "migration_run_test_name_idx" ON "migration_test_app_migrationruntest" ("name")'
assert expected in queries, queries
matched = filter_queries("CREATE INDEX CONCURRENTLY", queries)
assert len(matched) == 1
matched = filter_queries('CREATE INDEX "migration_run_test_name_idx"', queries)
assert len(matched) == 0
for conn_name in connections:
connection = connections[conn_name]
if connection.alias == "default":
continue
queries = [q["sql"] for q in connection.queries]
matched = filter_queries("CREATE TABLE", queries)
assert len(matched) == 0
matched = filter_queries("CREATE INDEX", queries)
assert len(matched) == 0
def test_migration_skipped_by_router(self) -> None:
with (
override_settings(
INSTALLED_APPS=("fixtures.safe_migrations_apps.migration_test_app",),
MIGRATION_MODULES={},
),
patch.object(router, "allow_migrate") as mock_allow,
):
mock_allow.return_value = False
result = self.invoke("run", "migration_test_app", "0001")
assert result.exit_code == 0, result.output
assert "Migration complete" in result.output
for conn_name in connections:
connection = connections[conn_name]
queries = [q["sql"] for q in connection.queries]
matched = filter_queries("CREATE TABLE", queries)
assert len(matched) == 0
matched = filter_queries("CREATE INDEX", queries)
assert len(matched) == 0
| MigrationsRunTest |
python | getsentry__sentry | src/sentry/notifications/models/notificationsettingoption.py | {
"start": 248,
"end": 1138
} | class ____(NotificationSettingBase):
__relocation_scope__ = RelocationScope.Excluded
class Meta:
app_label = "notifications"
db_table = "sentry_notificationsettingoption"
unique_together = (
(
"scope_type",
"scope_identifier",
"user_id",
"team_id",
"type",
),
)
constraints = [
models.CheckConstraint(
condition=models.Q(team_id__isnull=False, user_id__isnull=True)
| models.Q(team_id__isnull=True, user_id__isnull=False),
name="notification_setting_option_team_or_user_check",
)
]
__repr__ = sane_repr(
"scope_type",
"scope_identifier",
"type",
"user_id",
"team_id",
"value",
)
| NotificationSettingOption |
python | apache__airflow | providers/standard/tests/unit/standard/triggers/test_external_task.py | {
"start": 22448,
"end": 30839
} | class ____:
DAG_ID = "test_dag_state_trigger"
RUN_ID = "external_task_run_id"
STATES = ["success", "fail"]
EXECUTION_DATE = timezone.datetime(2022, 1, 1)
@pytest.mark.db_test
@pytest.mark.asyncio
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Airflow 3 had a different implementation")
async def test_dag_state_trigger(self, session):
"""
Assert that the DagStateTrigger only goes off on or after a DagRun
reaches an allowed state (i.e. SUCCESS).
"""
dag = DAG(self.DAG_ID, schedule=None, start_date=timezone.datetime(2022, 1, 1))
run_id_or_execution_date = (
{"run_id": "external_task_run_id"}
if AIRFLOW_V_3_0_PLUS
else {"execution_date": timezone.datetime(2022, 1, 1), "run_id": "external_task_run_id"}
)
dag_run = DagRun(dag_id=dag.dag_id, run_type="manual", **run_id_or_execution_date)
session.add(dag_run)
session.commit()
trigger = DagStateTrigger(
dag_id=dag.dag_id,
states=self.STATES,
**_DATES,
poll_interval=0.2,
)
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
# It should not have produced a result
assert task.done() is False
# Progress the dag to a "success" state so that yields a TriggerEvent
dag_run.state = DagRunState.SUCCESS
session.commit()
await asyncio.sleep(0.5)
assert task.done() is True
# Prevents error when task is destroyed while in "pending" state
asyncio.get_event_loop().stop()
@pytest.mark.db_test
@pytest.mark.asyncio
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Airflow 2 had a different implementation")
@mock.patch("airflow.sdk.execution_time.task_runner.RuntimeTaskInstance.get_dr_count")
async def test_dag_state_trigger_af_3(self, mock_get_dag_run_count, session):
"""
Assert that the DagStateTrigger only goes off on or after a DagRun
reaches an allowed state (i.e. SUCCESS).
"""
# Mock the get_dag_run_count_by_run_ids_and_states function to return 0 first time
mock_get_dag_run_count.return_value = 0
dag = DAG(self.DAG_ID, schedule=None, start_date=timezone.datetime(2022, 1, 1))
dag_run = DagRun(
dag_id=dag.dag_id,
run_type="manual",
run_id="external_task_run_id",
logical_date=timezone.datetime(2022, 1, 1),
)
session.add(dag_run)
session.commit()
trigger = DagStateTrigger(
dag_id=dag.dag_id,
states=self.STATES,
run_ids=["external_task_run_id"],
poll_interval=0.2,
execution_dates=[timezone.datetime(2022, 1, 1)],
)
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
# It should not have produced a result
assert task.done() is False
# Progress the dag to a "success" state so that yields a TriggerEvent
dag_run.state = DagRunState.SUCCESS
session.commit()
# Mock the get_dag_run_count_by_run_ids_and_states function to return 1 second time
mock_get_dag_run_count.return_value = 1
await asyncio.sleep(0.5)
assert task.done() is True
# Prevents error when task is destroyed while in "pending" state
asyncio.get_event_loop().stop()
@pytest.mark.db_test
@pytest.mark.asyncio
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Airflow 2 had a different implementation")
@mock.patch("airflow.sdk.execution_time.task_runner.RuntimeTaskInstance.get_dr_count")
@mock.patch("airflow.sdk.execution_time.task_runner.RuntimeTaskInstance.get_dagrun_state")
async def test_dag_state_trigger_af_3_return_type(
self, mock_get_dagrun_state, mock_get_dag_run_count, session
):
"""
Assert that the DagStateTrigger returns a tuple with classpath and event_data.
"""
mock_get_dag_run_count.return_value = 1
mock_get_dagrun_state.return_value = DagRunState.SUCCESS
dag = DAG(f"{self.DAG_ID}_return_type", schedule=None, start_date=timezone.datetime(2022, 1, 1))
dag_run = DagRun(
dag_id=dag.dag_id,
run_type="manual",
run_id="external_task_run_id",
logical_date=timezone.datetime(2022, 1, 1),
)
dag_run.state = DagRunState.SUCCESS
session.add(dag_run)
session.commit()
trigger = DagStateTrigger(
dag_id=dag.dag_id,
states=self.STATES,
run_ids=["external_task_run_id"],
poll_interval=0.2,
execution_dates=[timezone.datetime(2022, 1, 1)],
)
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
assert task.done() is True
result = task.result()
assert isinstance(result, TriggerEvent)
assert result.payload == (
"airflow.providers.standard.triggers.external_task.DagStateTrigger",
{
"dag_id": "test_dag_state_trigger_return_type",
"execution_dates": [
timezone.datetime(2022, 1, 1, 0, 0, tzinfo=timezone.utc),
],
"external_task_run_id": DagRunState.SUCCESS,
"poll_interval": 0.2,
"run_ids": ["external_task_run_id"],
"states": ["success", "fail"],
},
)
asyncio.get_event_loop().stop()
@pytest.mark.db_test
@pytest.mark.asyncio
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Test only AF2 implementation.")
async def test_dag_state_trigger_af_2_return_type(self, session):
"""
Assert that the DagStateTrigger returns a tuple with classpath and event_data.
"""
dag = DAG(f"{self.DAG_ID}_return_type", schedule=None, start_date=timezone.datetime(2022, 1, 1))
dag_run = DagRun(
dag_id=dag.dag_id,
run_type="manual",
run_id="external_task_run_id",
execution_date=timezone.datetime(2022, 1, 1),
)
dag_run.state = DagRunState.SUCCESS
session.add(dag_run)
session.commit()
trigger = DagStateTrigger(
dag_id=dag.dag_id,
states=self.STATES,
run_ids=["external_task_run_id"],
poll_interval=0.2,
execution_dates=[timezone.datetime(2022, 1, 1)],
)
task = asyncio.create_task(trigger.run().__anext__())
await asyncio.sleep(0.5)
assert task.done() is True
result = task.result()
assert isinstance(result, TriggerEvent)
assert result.payload == (
"airflow.providers.standard.triggers.external_task.DagStateTrigger",
{
"dag_id": "test_dag_state_trigger_return_type",
"execution_dates": [
timezone.datetime(2022, 1, 1, 0, 0, tzinfo=timezone.utc),
],
# 'external_task_run_id': DagRunState.SUCCESS, # This is only appended in AF3
"poll_interval": 0.2,
"run_ids": ["external_task_run_id"],
"states": ["success", "fail"],
},
)
asyncio.get_event_loop().stop()
def test_serialization(self):
"""Asserts that the DagStateTrigger correctly serializes its arguments and classpath."""
trigger = DagStateTrigger(
dag_id=self.DAG_ID,
states=self.STATES,
run_ids=[TestDagStateTrigger.RUN_ID],
execution_dates=[TestDagStateTrigger.EXECUTION_DATE],
poll_interval=5,
)
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.standard.triggers.external_task.DagStateTrigger"
assert kwargs == {
"dag_id": self.DAG_ID,
"states": self.STATES,
"run_ids": [TestDagStateTrigger.RUN_ID],
"execution_dates": [TestDagStateTrigger.EXECUTION_DATE],
"poll_interval": 5,
}
def mocked_get_count(*args, **kwargs):
time.sleep(0.0001)
return 1
async def fake_async_fun():
await asyncio.sleep(0.00005)
| TestDagStateTrigger |
python | PrefectHQ__prefect | src/prefect/exceptions.py | {
"start": 7976,
"end": 8115
} | class ____(PrefectException, ValueError):
"""
Raised when a name contains characters that are not permitted.
"""
| InvalidNameError |
python | kamyu104__LeetCode-Solutions | Python/lexicographically-smallest-string-after-adjacent-removals.py | {
"start": 38,
"end": 912
} | class ____(object):
def lexicographicallySmallestString(self, s):
"""
:type s: str
:rtype: str
"""
dp = [[False]*len(s) for _ in xrange(len(s))]
for l in xrange(2, len(s)+1, 2):
for i in xrange(len(s)-(l-1)):
j = i+(l-1)
if abs(ord(s[i])-ord(s[j])) in (1, 25) and (j == i+1 or dp[i+1][j-1]):
dp[i][j] = True
continue
for k in xrange(i+1, j, 2):
if dp[i][k] and dp[k+1][j]:
dp[i][j] = True
break
dp2 = [""]*(len(s)+1)
for i in reversed(xrange(len(s))):
dp2[i] = s[i]+dp2[i+1]
for j in xrange(i+1, len(s), 2):
if dp[i][j]:
dp2[i] = min(dp2[i], dp2[j+1])
return dp2[0]
| Solution |
python | pydantic__pydantic | pydantic/networks.py | {
"start": 19375,
"end": 22234
} | class ____(AnyUrl):
"""A type that will accept any http or https URL.
* TLD not required
* Host not required
* Max length 2083
```python
from pydantic import BaseModel, HttpUrl, ValidationError
class MyModel(BaseModel):
url: HttpUrl
m = MyModel(url='http://www.example.com') # (1)!
print(m.url)
#> http://www.example.com/
try:
MyModel(url='ftp://invalid.url')
except ValidationError as e:
print(e)
'''
1 validation error for MyModel
url
URL scheme should be 'http' or 'https' [type=url_scheme, input_value='ftp://invalid.url', input_type=str]
'''
try:
MyModel(url='not a url')
except ValidationError as e:
print(e)
'''
1 validation error for MyModel
url
Input should be a valid URL, relative URL without a base [type=url_parsing, input_value='not a url', input_type=str]
'''
```
1. Note: mypy would prefer `m = MyModel(url=HttpUrl('http://www.example.com'))`, but Pydantic will convert the string to an HttpUrl instance anyway.
"International domains" (e.g. a URL where the host or TLD includes non-ascii characters) will be encoded via
[punycode](https://en.wikipedia.org/wiki/Punycode) (see
[this article](https://www.xudongz.com/blog/2017/idn-phishing/) for a good description of why this is important):
```python
from pydantic import BaseModel, HttpUrl
class MyModel(BaseModel):
url: HttpUrl
m1 = MyModel(url='http://puny£code.com')
print(m1.url)
#> http://xn--punycode-eja.com/
m2 = MyModel(url='https://www.аррӏе.com/')
print(m2.url)
#> https://www.xn--80ak6aa92e.com/
m3 = MyModel(url='https://www.example.珠宝/')
print(m3.url)
#> https://www.example.xn--pbt977c/
```
!!! warning "Underscores in Hostnames"
In Pydantic, underscores are allowed in all parts of a domain except the TLD.
Technically this might be wrong - in theory the hostname cannot have underscores, but subdomains can.
To explain this; consider the following two cases:
- `exam_ple.co.uk`: the hostname is `exam_ple`, which should not be allowed since it contains an underscore.
- `foo_bar.example.com` the hostname is `example`, which should be allowed since the underscore is in the subdomain.
Without having an exhaustive list of TLDs, it would be impossible to differentiate between these two. Therefore
underscores are allowed, but you can always do further validation in a validator if desired.
Also, Chrome, Firefox, and Safari all currently accept `http://exam_ple.com` as a URL, so we're in good
(or at least big) company.
"""
_constraints = UrlConstraints(max_length=2083, allowed_schemes=['http', 'https'])
| HttpUrl |
python | realpython__materials | thread-safety-locks/bank_multithreaded_withdrawal.py | {
"start": 64,
"end": 622
} | class ____:
def __init__(self):
self.balance = 1000
def withdraw(self, amount):
if self.balance >= amount:
new_balance = self.balance - amount
time.sleep(0.1) # Simulate a delay
self.balance = new_balance
else:
raise ValueError("Insufficient balance")
account = BankAccount()
with ThreadPoolExecutor(max_workers=2) as executor:
executor.submit(account.withdraw, 500)
executor.submit(account.withdraw, 700)
print(f"Final account balance: {account.balance}")
| BankAccount |
python | Textualize__textual | tests/test_validation.py | {
"start": 2725,
"end": 8907
} | class ____(Validator):
def validate(self, value: str) -> ValidationResult:
return self.failure(value=value, description="ABC")
def describe_failure(self, failure: Failure) -> str | None:
return "describe_failure"
def test_Failure_description_describe_and_description_inside_validate():
# This is kind of a weird case - there's no reason to supply both of
# these but lets still make sure we're sensible about how we handle it.
validator = ValidatorWithFailureMessageAndDescribe()
result = validator.validate("x")
assert result.failures == [Failure(validator, "x", "ABC")]
@pytest.mark.parametrize(
"value, minimum, maximum, expected_result",
[
("123", None, None, True), # valid number, no range
("-123", None, None, True), # valid negative number, no range
("123.45", None, None, True), # valid float, no range
("1.23e-4", None, None, True), # valid scientific notation, no range
("abc", None, None, False), # non-numeric string, no range
("123", 100, 200, True), # valid number within range
("99", 100, 200, False), # valid number but not in range
("201", 100, 200, False), # valid number but not in range
("1.23e4", 0, 50000, True), # valid scientific notation within range
("inf", None, None, False), # infinity never valid
("nan", None, None, False), # nan never valid
("-inf", None, None, False), # nan never valid
("-4", 0, 5, False), # valid negative number, out of range with zero
("2", -3, 0, False), # valid number out of range with zero
("-2", -3, 0, True), # negative in range
],
)
def test_Number_validate(value, minimum, maximum, expected_result):
validator = Number(minimum=minimum, maximum=maximum)
result = validator.validate(value)
assert result.is_valid == expected_result
@pytest.mark.parametrize(
"regex, value, expected_result",
[
(r"\d+", "123", True), # matches regex for one or more digits
(r"\d+", "abc", False), # does not match regex for one or more digits
(r"[a-z]+", "abc", True), # matches regex for one or more lowercase letters
(
r"[a-z]+",
"ABC",
False,
), # does not match regex for one or more lowercase letters
(r"\w+", "abc123", True), # matches regex for one or more word characters
(r"\w+", "!@#", False), # does not match regex for one or more word characters
],
)
def test_Regex_validate(regex, value, expected_result):
validator = Regex(regex)
result = validator.validate(value)
assert result.is_valid == expected_result
@pytest.mark.parametrize(
"value, minimum, maximum, expected_result",
[
("123", None, None, True), # valid integer, no range
("-123", None, None, True), # valid negative integer, no range
("123.45", None, None, False), # float, not a valid integer
("1.23e-4", None, None, False), # scientific notation, not a valid integer
("abc", None, None, False), # non-numeric string, not a valid integer
("123", 100, 200, True), # valid integer within range
("99", 100, 200, False), # valid integer but not in range
("201", 100, 200, False), # valid integer but not in range
("1.23e4", None, None, False), # valid scientific notation, even resolving to an integer, is not valid
("123.", None, None, False), # periods not valid in integers
("123_456", None, None, True), # underscores are valid python
("_123_456", None, None, False), # leading underscores are not valid python
("-123", -123, -123, True), # valid negative number in minimal range
],
)
def test_Integer_validate(value, minimum, maximum, expected_result):
validator = Integer(minimum=minimum, maximum=maximum)
result = validator.validate(value)
assert result.is_valid == expected_result
@pytest.mark.parametrize(
"value, min_length, max_length, expected_result",
[
("", None, None, True), # empty string
("test", None, None, True), # any string with no restrictions
("test", 5, None, False), # shorter than minimum length
("test", None, 3, False), # longer than maximum length
("test", 4, 4, True), # exactly matches minimum and maximum length
("test", 2, 6, True), # within length range
],
)
def test_Length_validate(value, min_length, max_length, expected_result):
validator = Length(minimum=min_length, maximum=max_length)
result = validator.validate(value)
assert result.is_valid == expected_result
@pytest.mark.parametrize(
"value, expected_result",
[
("http://example.com", True), # valid URL
("https://example.com", True), # valid URL with https
("www.example.com", False), # missing scheme
("://example.com", False), # invalid URL (no scheme)
("https:///path", False), # missing netloc
(
"redis://username:pass[word@localhost:6379/0",
False,
), # invalid URL characters
("", False), # empty string
],
)
def test_URL_validate(value, expected_result):
validator = URL()
result = validator.validate(value)
assert result.is_valid == expected_result
@pytest.mark.parametrize(
"function, failure_description, is_valid",
[
((lambda value: True), None, True),
((lambda value: False), "failure!", False),
],
)
def test_Function_validate(function, failure_description, is_valid):
validator = Function(function, failure_description)
result = validator.validate("x")
assert result.is_valid is is_valid
if result.failure_descriptions:
assert result.failure_descriptions[0] == failure_description
def test_Integer_failure_description_when_NotANumber():
"""Regression test for https://github.com/Textualize/textual/issues/4413"""
validator = Integer()
result = validator.validate("x")
assert result.is_valid is False
assert result.failure_descriptions[0] == "Must be a valid integer."
| ValidatorWithFailureMessageAndDescribe |
python | pypa__pip | src/pip/_internal/exceptions.py | {
"start": 10466,
"end": 10540
} | class ____(InstallationError):
"""Unsupported wheel."""
| UnsupportedWheel |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/check_ops_test.py | {
"start": 33427,
"end": 35391
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_negative(self):
frank = constant_op.constant([-1, -2], name="frank")
with ops.control_dependencies([check_ops.assert_negative(frank)]):
out = array_ops.identity(frank)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_positive(self):
doug = constant_op.constant([1, 2], name="doug")
with self.assertRaisesOpError( # pylint:disable=g-error-prone-assert-raises
"fail"):
with ops.control_dependencies(
[check_ops.assert_negative(
doug, message="fail")]):
out = array_ops.identity(doug)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_zero(self):
claire = constant_op.constant([0], name="claire")
with self.assertRaisesOpError( # pylint:disable=g-error-prone-assert-raises
"x < 0 did not hold"):
with ops.control_dependencies([check_ops.assert_negative(claire)]):
out = array_ops.identity(claire)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_empty_tensor_doesnt_raise(self):
# A tensor is negative when it satisfies:
# For every element x_i in x, x_i < 0
# and an empty tensor has no elements, so this is trivially satisfied.
# This is standard set theory.
empty = constant_op.constant([], name="empty")
with ops.control_dependencies([check_ops.assert_negative(empty)]):
out = array_ops.identity(empty)
self.evaluate(out)
def test_static_check_in_graph_mode(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Custom error message"):
check_ops.assert_negative(1, message="Custom error message")
# pylint:disable=g-error-prone-assert-raises
| AssertNegativeTest |
python | psf__black | tests/data/cases/comments9.py | {
"start": 517,
"end": 658
} | class ____:
pass
some = statement
# This should be stick to the statement above
# This should be split from the above by two lines
| MyClass |
python | sympy__sympy | sympy/simplify/hyperexpand.py | {
"start": 30552,
"end": 33136
} | class ____:
""" A collection of formulae to use as origins. """
def __init__(self):
""" Doing this globally at module init time is a pain ... """
self.symbolic_formulae = {}
self.concrete_formulae = {}
self.formulae = []
add_formulae(self.formulae)
# Now process the formulae into a helpful form.
# These dicts are indexed by (p, q).
for f in self.formulae:
sizes = f.func.sizes
if len(f.symbols) > 0:
self.symbolic_formulae.setdefault(sizes, []).append(f)
else:
inv = f.func.build_invariants()
self.concrete_formulae.setdefault(sizes, {})[inv] = f
def lookup_origin(self, func):
"""
Given the suitable target ``func``, try to find an origin in our
knowledge base.
Examples
========
>>> from sympy.simplify.hyperexpand import (FormulaCollection,
... Hyper_Function)
>>> f = FormulaCollection()
>>> f.lookup_origin(Hyper_Function((), ())).closed_form
exp(_z)
>>> f.lookup_origin(Hyper_Function([1], ())).closed_form
HyperRep_power1(-1, _z)
>>> from sympy import S
>>> i = Hyper_Function([S('1/4'), S('3/4 + 4')], [S.Half])
>>> f.lookup_origin(i).closed_form
HyperRep_sqrts1(-1/4, _z)
"""
inv = func.build_invariants()
sizes = func.sizes
if sizes in self.concrete_formulae and \
inv in self.concrete_formulae[sizes]:
return self.concrete_formulae[sizes][inv]
# We don't have a concrete formula. Try to instantiate.
if sizes not in self.symbolic_formulae:
return None # Too bad...
possible = []
for f in self.symbolic_formulae[sizes]:
repls = f.find_instantiations(func)
for repl in repls:
func2 = f.func.xreplace(repl)
if not func2._is_suitable_origin():
continue
diff = func2.difficulty(func)
if diff == -1:
continue
possible.append((diff, repl, f, func2))
# find the nearest origin
possible.sort(key=lambda x: x[0])
for _, repl, f, func2 in possible:
f2 = Formula(func2, f.z, None, [], f.B.subs(repl),
f.C.subs(repl), f.M.subs(repl))
if not any(e.has(S.NaN, oo, -oo, zoo) for e in [f2.B, f2.M, f2.C]):
return f2
return None
| FormulaCollection |
python | keras-team__keras | keras/src/ops/linalg.py | {
"start": 22665,
"end": 26736
} | class ____(Operation):
def __init__(self, has_aux=False, *, name=None):
super().__init__(name=name)
self.has_aux = has_aux
def call(self, fun, primals, tangents):
"""Computes the JVP of `fun` at `primals` along `tangents`.
Args:
fun: A callable that takes tensors (or nested structures) as input
and returns a tensor (or nested structure) as output.
primals: Input tensors (or nested structures) at which the Jacobian
of `fun` is evaluated.
tangents: Tensors (or nested structures) representing the direction
vectors for the JVP. Must have the same structure as
`primals`.
Returns:
If `has_aux` is False:
A tuple (primals_out, tangents_out) where:
- primals_out: Output of `fun(*primals)`
- tangents_out: JVP of `fun` at `primals` along `tangents`
If `has_aux` is True:
A tuple (primals_out, tangents_out, aux) where:
- aux: Auxiliary data returned by `fun`
"""
return backend.linalg.jvp(fun, primals, tangents, has_aux=self.has_aux)
def compute_output_spec(self, fun, primals, tangents):
# Infer primal output spec
if self.has_aux:
primals_out_spec, aux_spec = backend.compute_output_spec(
fun, *primals
)
else:
primals_out_spec = backend.compute_output_spec(fun, *primals)
# Tangents output should match primals output in structure and shape
tangents_out_spec = tree.map_structure(
lambda x: KerasTensor(x.shape, x.dtype), primals_out_spec
)
if self.has_aux:
return primals_out_spec, tangents_out_spec, aux_spec
return primals_out_spec, tangents_out_spec
@keras_export(["keras.ops.jvp", "keras.ops.linalg.jvp"])
def jvp(fun, primals, tangents, has_aux=False):
"""Computes a (forward-mode) Jacobian-vector product of `fun`.
Args:
fun: Function to be differentiated. Its arguments should be arrays,
scalars, or standard Python containers of arrays or scalars. It
should return an array, scalar, or standard Python container of
arrays or scalars.
primals: The primal values at which the Jacobian of `fun` should be
evaluated. Should be either a tuple or a list of arguments,
and its length should be equal to the number of positional
parameters of `fun`.
tangents: The tangent vector for which the Jacobian-vector product
should be evaluated. Should be either a tuple or a list of
tangents, with the same tree structure and array shapes as
`primals`.
has_aux: Optional, bool. Indicates whether `fun` returns a pair where
the first element is considered the output of the mathematical
function to be differentiated and the second element is
auxiliary data. Default is False.
Returns:
If `has_aux` is False, returns a (`primals_out`, `tangents_out`) pair,
where `primals_out` is `fun(*primals)`, and `tangents_out` is the
Jacobian-vector product of `fun` evaluated at `primals` with
`tangents`. The `tangents_out` value has the same Python tree
structure and shapes as `primals_out`.
If `has_aux` is True, returns a (`primals_out`, `tangents_out`, `aux`)
tuple where `aux` is the auxiliary data returned by `fun`.
Example:
>>> from keras import ops
>>> a1, a2 = ops.convert_to_tensor(0.1), ops.convert_to_tensor(0.2)
>>> primals, tangents = ops.jvp(ops.sin, (a1,), (a2,))
>>> primals
0.09983342
>>> tangents
0.19900084
"""
if any_symbolic_tensors((primals, tangents)):
return JVP(has_aux=has_aux).symbolic_call(fun, primals, tangents)
return backend.linalg.jvp(fun, primals, tangents, has_aux=has_aux)
| JVP |
python | allegroai__clearml | clearml/backend_api/services/v2_13/models.py | {
"start": 78108,
"end": 83012
} | class ____(Response):
"""
Response of models.get_all endpoint.
:param models: Models list
:type models: Sequence[Model]
"""
_service = "models"
_action = "get_all"
_version = "2.13"
_schema = {
"definitions": {
"model": {
"properties": {
"comment": {
"description": "Model comment",
"type": ["string", "null"],
},
"company": {
"description": "Company id",
"type": ["string", "null"],
},
"created": {
"description": "Model creation time",
"format": "date-time",
"type": ["string", "null"],
},
"design": {
"additionalProperties": True,
"description": "Json object representing the model design. Should be identical to the network design of the task which created the model",
"type": ["object", "null"],
},
"framework": {
"description": "Framework on which the model is based. Should be identical to the framework of the task which created the model",
"type": ["string", "null"],
},
"id": {"description": "Model id", "type": ["string", "null"]},
"labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model. The keys are the layers' names and the values are the ids.",
"type": ["object", "null"],
},
"name": {"description": "Model name", "type": ["string", "null"]},
"parent": {
"description": "Parent model ID",
"type": ["string", "null"],
},
"project": {
"description": "Associated project ID",
"type": ["string", "null"],
},
"ready": {
"description": "Indication if the model is final and can be used by other tasks",
"type": ["boolean", "null"],
},
"system_tags": {
"description": "System tags. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"task": {
"description": "Task ID of task in which the model was created",
"type": ["string", "null"],
},
"ui_cache": {
"additionalProperties": True,
"description": "UI cache for this model",
"type": ["object", "null"],
},
"uri": {
"description": "URI for the model, pointing to the destination storage.",
"type": ["string", "null"],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"models": {
"description": "Models list",
"items": {"$ref": "#/definitions/model"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, models: Optional[List[Any]] = None, **kwargs: Any) -> None:
super(GetAllResponse, self).__init__(**kwargs)
self.models = models
@schema_property("models")
def models(self) -> Optional[List[Any]]:
return self._property_models
@models.setter
def models(self, value: Optional[List[Any]]) -> None:
if value is None:
self._property_models = None
return
self.assert_isinstance(value, "models", (list, tuple))
if any((isinstance(v, dict) for v in value)):
value = [Model.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "models", Model, is_array=True)
self._property_models = value
| GetAllResponse |
python | getsentry__sentry | src/sentry/snuba/sessions_v2.py | {
"start": 7357,
"end": 7974
} | class ____:
def get_snuba_columns(self):
return []
def get_snuba_groupby(self):
return []
def get_keys_for_row(self, row):
return [
("session.status", key)
for key in ["healthy", "abnormal", "crashed", "errored", "unhandled"]
]
# NOTE: in the future we might add new `user_agent` and `os` fields
GROUPBY_MAP: dict[str, _GroupBy] = {
"project": SimpleGroupBy("project_id", "project"),
"environment": SimpleGroupBy("environment"),
"release": SimpleGroupBy("release"),
"session.status": SessionStatusGroupBy(),
}
| SessionStatusGroupBy |
python | gevent__gevent | src/greentest/3.12/test_socket.py | {
"start": 214786,
"end": 217001
} | class ____(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except TimeoutError:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except TimeoutError:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except BaseException as e:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(type(e), e, traceback.format_exc()))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
| TCPTimeoutTest |
python | sympy__sympy | sympy/physics/vector/frame.py | {
"start": 2414,
"end": 57260
} | class ____:
"""A reference frame in classical mechanics.
ReferenceFrame is a class used to represent a reference frame in classical
mechanics. It has a standard basis of three unit vectors in the frame's
x, y, and z directions.
It also can have a rotation relative to a parent frame; this rotation is
defined by a direction cosine matrix relating this frame's basis vectors to
the parent frame's basis vectors. It can also have an angular velocity
vector, defined in another frame.
"""
_count = 0
def __init__(self, name, indices=None, latexs=None, variables=None):
"""ReferenceFrame initialization method.
A ReferenceFrame has a set of orthonormal basis vectors, along with
orientations relative to other ReferenceFrames and angular velocities
relative to other ReferenceFrames.
Parameters
==========
indices : tuple of str
Enables the reference frame's basis unit vectors to be accessed by
Python's square bracket indexing notation using the provided three
indice strings and alters the printing of the unit vectors to
reflect this choice.
latexs : tuple of str
Alters the LaTeX printing of the reference frame's basis unit
vectors to the provided three valid LaTeX strings.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, vlatex
>>> N = ReferenceFrame('N')
>>> N.x
N.x
>>> O = ReferenceFrame('O', indices=('1', '2', '3'))
>>> O.x
O['1']
>>> O['1']
O['1']
>>> P = ReferenceFrame('P', latexs=('A1', 'A2', 'A3'))
>>> vlatex(P.x)
'A1'
``symbols()`` can be used to create multiple Reference Frames in one
step, for example:
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import symbols
>>> A, B, C = symbols('A B C', cls=ReferenceFrame)
>>> D, E = symbols('D E', cls=ReferenceFrame, indices=('1', '2', '3'))
>>> A[0]
A_x
>>> D.x
D['1']
>>> E.y
E['2']
>>> type(A) == type(D)
True
Unit dyads for the ReferenceFrame can be accessed through the attributes ``xx``, ``xy``, etc. For example:
>>> from sympy.physics.vector import ReferenceFrame
>>> N = ReferenceFrame('N')
>>> N.yz
(N.y|N.z)
>>> N.zx
(N.z|N.x)
>>> P = ReferenceFrame('P', indices=['1', '2', '3'])
>>> P.xx
(P['1']|P['1'])
>>> P.zy
(P['3']|P['2'])
Unit dyadic is also accessible via the ``u`` attribute:
>>> from sympy.physics.vector import ReferenceFrame
>>> N = ReferenceFrame('N')
>>> N.u
(N.x|N.x) + (N.y|N.y) + (N.z|N.z)
>>> P = ReferenceFrame('P', indices=['1', '2', '3'])
>>> P.u
(P['1']|P['1']) + (P['2']|P['2']) + (P['3']|P['3'])
"""
if not isinstance(name, str):
raise TypeError('Need to supply a valid name')
# The if statements below are for custom printing of basis-vectors for
# each frame.
# First case, when custom indices are supplied
if indices is not None:
if not isinstance(indices, (tuple, list)):
raise TypeError('Supply the indices as a list')
if len(indices) != 3:
raise ValueError('Supply 3 indices')
for i in indices:
if not isinstance(i, str):
raise TypeError('Indices must be strings')
self.str_vecs = [(name + '[\'' + indices[0] + '\']'),
(name + '[\'' + indices[1] + '\']'),
(name + '[\'' + indices[2] + '\']')]
self.pretty_vecs = [(name.lower() + "_" + indices[0]),
(name.lower() + "_" + indices[1]),
(name.lower() + "_" + indices[2])]
self.latex_vecs = [(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[0])),
(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[1])),
(r"\mathbf{\hat{%s}_{%s}}" % (name.lower(),
indices[2]))]
self.indices = indices
# Second case, when no custom indices are supplied
else:
self.str_vecs = [(name + '.x'), (name + '.y'), (name + '.z')]
self.pretty_vecs = [name.lower() + "_x",
name.lower() + "_y",
name.lower() + "_z"]
self.latex_vecs = [(r"\mathbf{\hat{%s}_x}" % name.lower()),
(r"\mathbf{\hat{%s}_y}" % name.lower()),
(r"\mathbf{\hat{%s}_z}" % name.lower())]
self.indices = ['x', 'y', 'z']
# Different step, for custom latex basis vectors
if latexs is not None:
if not isinstance(latexs, (tuple, list)):
raise TypeError('Supply the indices as a list')
if len(latexs) != 3:
raise ValueError('Supply 3 indices')
for i in latexs:
if not isinstance(i, str):
raise TypeError('Latex entries must be strings')
self.latex_vecs = latexs
self.name = name
self._var_dict = {}
# The _dcm_dict dictionary will only store the dcms of adjacent
# parent-child relationships. The _dcm_cache dictionary will store
# calculated dcm along with all content of _dcm_dict for faster
# retrieval of dcms.
self._dcm_dict = {}
self._dcm_cache = {}
self._ang_vel_dict = {}
self._ang_acc_dict = {}
self._dlist = [self._dcm_dict, self._ang_vel_dict, self._ang_acc_dict]
self._cur = 0
self._x = Vector([(Matrix([1, 0, 0]), self)])
self._y = Vector([(Matrix([0, 1, 0]), self)])
self._z = Vector([(Matrix([0, 0, 1]), self)])
# Associate coordinate symbols wrt this frame
if variables is not None:
if not isinstance(variables, (tuple, list)):
raise TypeError('Supply the variable names as a list/tuple')
if len(variables) != 3:
raise ValueError('Supply 3 variable names')
for i in variables:
if not isinstance(i, str):
raise TypeError('Variable names must be strings')
else:
variables = [name + '_x', name + '_y', name + '_z']
self.varlist = (CoordinateSym(variables[0], self, 0),
CoordinateSym(variables[1], self, 1),
CoordinateSym(variables[2], self, 2))
ReferenceFrame._count += 1
self.index = ReferenceFrame._count
def __getitem__(self, ind):
"""
Returns basis vector for the provided index, if the index is a string.
If the index is a number, returns the coordinate variable correspon-
-ding to that index.
"""
if not isinstance(ind, str):
if ind < 3:
return self.varlist[ind]
else:
raise ValueError("Invalid index provided")
if self.indices[0] == ind:
return self.x
if self.indices[1] == ind:
return self.y
if self.indices[2] == ind:
return self.z
else:
raise ValueError('Not a defined index')
def __iter__(self):
return iter([self.x, self.y, self.z])
def __str__(self):
"""Returns the name of the frame. """
return self.name
__repr__ = __str__
def _dict_list(self, other, num):
"""Returns an inclusive list of reference frames that connect this
reference frame to the provided reference frame.
Parameters
==========
other : ReferenceFrame
The other reference frame to look for a connecting relationship to.
num : integer
``0``, ``1``, and ``2`` will look for orientation, angular
velocity, and angular acceleration relationships between the two
frames, respectively.
Returns
=======
list
Inclusive list of reference frames that connect this reference
frame to the other reference frame.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> A = ReferenceFrame('A')
>>> B = ReferenceFrame('B')
>>> C = ReferenceFrame('C')
>>> D = ReferenceFrame('D')
>>> B.orient_axis(A, A.x, 1.0)
>>> C.orient_axis(B, B.x, 1.0)
>>> D.orient_axis(C, C.x, 1.0)
>>> D._dict_list(A, 0)
[D, C, B, A]
Raises
======
ValueError
When no path is found between the two reference frames or ``num``
is an incorrect value.
"""
connect_type = {0: 'orientation',
1: 'angular velocity',
2: 'angular acceleration'}
if num not in connect_type.keys():
raise ValueError('Valid values for num are 0, 1, or 2.')
possible_connecting_paths = [[self]]
oldlist = [[]]
while possible_connecting_paths != oldlist:
oldlist = possible_connecting_paths.copy()
for frame_list in possible_connecting_paths:
frames_adjacent_to_last = frame_list[-1]._dlist[num].keys()
for adjacent_frame in frames_adjacent_to_last:
if adjacent_frame not in frame_list:
connecting_path = frame_list + [adjacent_frame]
if connecting_path not in possible_connecting_paths:
possible_connecting_paths.append(connecting_path)
for connecting_path in oldlist:
if connecting_path[-1] != other:
possible_connecting_paths.remove(connecting_path)
possible_connecting_paths.sort(key=len)
if len(possible_connecting_paths) != 0:
return possible_connecting_paths[0] # selects the shortest path
msg = 'No connecting {} path found between {} and {}.'
raise ValueError(msg.format(connect_type[num], self.name, other.name))
def _w_diff_dcm(self, otherframe):
"""Angular velocity from time differentiating the DCM. """
from sympy.physics.vector.functions import dynamicsymbols
dcm2diff = otherframe.dcm(self)
diffed = dcm2diff.diff(dynamicsymbols._t)
angvelmat = diffed * dcm2diff.T
w1 = trigsimp(expand(angvelmat[7]), recursive=True)
w2 = trigsimp(expand(angvelmat[2]), recursive=True)
w3 = trigsimp(expand(angvelmat[3]), recursive=True)
return Vector([(Matrix([w1, w2, w3]), otherframe)])
def variable_map(self, otherframe):
"""
Returns a dictionary which expresses the coordinate variables
of this frame in terms of the variables of otherframe.
If Vector.simp is True, returns a simplified version of the mapped
values. Else, returns them without simplification.
Simplification of the expressions may take time.
Parameters
==========
otherframe : ReferenceFrame
The other frame to map the variables to
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols
>>> A = ReferenceFrame('A')
>>> q = dynamicsymbols('q')
>>> B = A.orientnew('B', 'Axis', [q, A.z])
>>> A.variable_map(B)
{A_x: B_x*cos(q(t)) - B_y*sin(q(t)), A_y: B_x*sin(q(t)) + B_y*cos(q(t)), A_z: B_z}
"""
_check_frame(otherframe)
if (otherframe, Vector.simp) in self._var_dict:
return self._var_dict[(otherframe, Vector.simp)]
else:
vars_matrix = self.dcm(otherframe) * Matrix(otherframe.varlist)
mapping = {}
for i, x in enumerate(self):
if Vector.simp:
mapping[self.varlist[i]] = trigsimp(vars_matrix[i],
method='fu')
else:
mapping[self.varlist[i]] = vars_matrix[i]
self._var_dict[(otherframe, Vector.simp)] = mapping
return mapping
def ang_acc_in(self, otherframe):
"""Returns the angular acceleration Vector of the ReferenceFrame.
Effectively returns the Vector:
``N_alpha_B``
which represent the angular acceleration of B in N, where B is self,
and N is otherframe.
Parameters
==========
otherframe : ReferenceFrame
The ReferenceFrame which the angular acceleration is returned in.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_acc(N, V)
>>> A.ang_acc_in(N)
10*N.x
"""
_check_frame(otherframe)
if otherframe in self._ang_acc_dict:
return self._ang_acc_dict[otherframe]
else:
return self.ang_vel_in(otherframe).dt(otherframe)
def ang_vel_in(self, otherframe):
"""Returns the angular velocity Vector of the ReferenceFrame.
Effectively returns the Vector:
^N omega ^B
which represent the angular velocity of B in N, where B is self, and
N is otherframe.
Parameters
==========
otherframe : ReferenceFrame
The ReferenceFrame which the angular velocity is returned in.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_vel(N, V)
>>> A.ang_vel_in(N)
10*N.x
"""
_check_frame(otherframe)
flist = self._dict_list(otherframe, 1)
outvec = Vector(0)
for i in range(len(flist) - 1):
outvec += flist[i]._ang_vel_dict[flist[i + 1]]
return outvec
def dcm(self, otherframe):
r"""Returns the direction cosine matrix of this reference frame
relative to the provided reference frame.
The returned matrix can be used to express the orthogonal unit vectors
of this frame in terms of the orthogonal unit vectors of
``otherframe``.
Parameters
==========
otherframe : ReferenceFrame
The reference frame which the direction cosine matrix of this frame
is formed relative to.
Examples
========
The following example rotates the reference frame A relative to N by a
simple rotation and then calculates the direction cosine matrix of N
relative to A.
>>> from sympy import symbols, sin, cos
>>> from sympy.physics.vector import ReferenceFrame
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> A.orient_axis(N, q1, N.x)
>>> N.dcm(A)
Matrix([
[1, 0, 0],
[0, cos(q1), -sin(q1)],
[0, sin(q1), cos(q1)]])
The second row of the above direction cosine matrix represents the
``N.y`` unit vector in N expressed in A. Like so:
>>> Ny = 0*A.x + cos(q1)*A.y - sin(q1)*A.z
Thus, expressing ``N.y`` in A should return the same result:
>>> N.y.express(A)
cos(q1)*A.y - sin(q1)*A.z
Notes
=====
It is important to know what form of the direction cosine matrix is
returned. If ``B.dcm(A)`` is called, it means the "direction cosine
matrix of B rotated relative to A". This is the matrix
:math:`{}^B\mathbf{C}^A` shown in the following relationship:
.. math::
\begin{bmatrix}
\hat{\mathbf{b}}_1 \\
\hat{\mathbf{b}}_2 \\
\hat{\mathbf{b}}_3
\end{bmatrix}
=
{}^B\mathbf{C}^A
\begin{bmatrix}
\hat{\mathbf{a}}_1 \\
\hat{\mathbf{a}}_2 \\
\hat{\mathbf{a}}_3
\end{bmatrix}.
:math:`{}^B\mathbf{C}^A` is the matrix that expresses the B unit
vectors in terms of the A unit vectors.
"""
_check_frame(otherframe)
# Check if the dcm wrt that frame has already been calculated
if otherframe in self._dcm_cache:
return self._dcm_cache[otherframe]
flist = self._dict_list(otherframe, 0)
outdcm = eye(3)
for i in range(len(flist) - 1):
outdcm = outdcm * flist[i]._dcm_dict[flist[i + 1]]
# After calculation, store the dcm in dcm cache for faster future
# retrieval
self._dcm_cache[otherframe] = outdcm
otherframe._dcm_cache[self] = outdcm.T
return outdcm
def _dcm(self, parent, parent_orient):
# If parent.oreint(self) is already defined,then
# update the _dcm_dict of parent while over write
# all content of self._dcm_dict and self._dcm_cache
# with new dcm relation.
# Else update _dcm_cache and _dcm_dict of both
# self and parent.
frames = self._dcm_cache.keys()
dcm_dict_del = []
dcm_cache_del = []
if parent in frames:
for frame in frames:
if frame in self._dcm_dict:
dcm_dict_del += [frame]
dcm_cache_del += [frame]
# Reset the _dcm_cache of this frame, and remove it from the
# _dcm_caches of the frames it is linked to. Also remove it from
# the _dcm_dict of its parent
for frame in dcm_dict_del:
del frame._dcm_dict[self]
for frame in dcm_cache_del:
del frame._dcm_cache[self]
# Reset the _dcm_dict
self._dcm_dict = self._dlist[0] = {}
# Reset the _dcm_cache
self._dcm_cache = {}
else:
# Check for loops and raise warning accordingly.
visited = []
queue = list(frames)
cont = True # Flag to control queue loop.
while queue and cont:
node = queue.pop(0)
if node not in visited:
visited.append(node)
neighbors = node._dcm_dict.keys()
for neighbor in neighbors:
if neighbor == parent:
warn('Loops are defined among the orientation of '
'frames. This is likely not desired and may '
'cause errors in your calculations.')
cont = False
break
queue.append(neighbor)
# Add the dcm relationship to _dcm_dict
self._dcm_dict.update({parent: parent_orient.T})
parent._dcm_dict.update({self: parent_orient})
# Update the dcm cache
self._dcm_cache.update({parent: parent_orient.T})
parent._dcm_cache.update({self: parent_orient})
def orient_axis(self, parent, axis, angle):
"""Sets the orientation of this reference frame with respect to a
parent reference frame by rotating through an angle about an axis fixed
in the parent reference frame.
Parameters
==========
parent : ReferenceFrame
Reference frame that this reference frame will be rotated relative
to.
axis : Vector
Vector fixed in the parent frame about about which this frame is
rotated. It need not be a unit vector and the rotation follows the
right hand rule.
angle : sympifiable
Angle in radians by which it the frame is to be rotated.
Warns
======
UserWarning
If the orientation creates a kinematic loop.
Examples
========
Setup variables for the examples:
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B.orient_axis(N, N.x, q1)
The ``orient_axis()`` method generates a direction cosine matrix and
its transpose which defines the orientation of B relative to N and vice
versa. Once orient is called, ``dcm()`` outputs the appropriate
direction cosine matrix:
>>> B.dcm(N)
Matrix([
[1, 0, 0],
[0, cos(q1), sin(q1)],
[0, -sin(q1), cos(q1)]])
>>> N.dcm(B)
Matrix([
[1, 0, 0],
[0, cos(q1), -sin(q1)],
[0, sin(q1), cos(q1)]])
The following two lines show that the sense of the rotation can be
defined by negating the vector direction or the angle. Both lines
produce the same result.
>>> B.orient_axis(N, -N.x, q1)
>>> B.orient_axis(N, N.x, -q1)
"""
from sympy.physics.vector.functions import dynamicsymbols
_check_frame(parent)
if not isinstance(axis, Vector) and isinstance(angle, Vector):
axis, angle = angle, axis
axis = _check_vector(axis)
theta = sympify(angle)
if not axis.dt(parent) == 0:
raise ValueError('Axis cannot be time-varying.')
unit_axis = axis.express(parent).normalize()
unit_col = unit_axis.args[0][0]
parent_orient_axis = (
(eye(3) - unit_col * unit_col.T) * cos(theta) +
Matrix([[0, -unit_col[2], unit_col[1]],
[unit_col[2], 0, -unit_col[0]],
[-unit_col[1], unit_col[0], 0]]) *
sin(theta) + unit_col * unit_col.T)
self._dcm(parent, parent_orient_axis)
thetad = (theta).diff(dynamicsymbols._t)
wvec = thetad*axis.express(parent).normalize()
self._ang_vel_dict.update({parent: wvec})
parent._ang_vel_dict.update({self: -wvec})
self._var_dict = {}
def orient_explicit(self, parent, dcm):
"""Sets the orientation of this reference frame relative to another (parent) reference frame
using a direction cosine matrix that describes the rotation from the parent to the child.
Parameters
==========
parent : ReferenceFrame
Reference frame that this reference frame will be rotated relative
to.
dcm : Matrix, shape(3, 3)
Direction cosine matrix that specifies the relative rotation
between the two reference frames.
Warns
======
UserWarning
If the orientation creates a kinematic loop.
Examples
========
Setup variables for the examples:
>>> from sympy import symbols, Matrix, sin, cos
>>> from sympy.physics.vector import ReferenceFrame
>>> q1 = symbols('q1')
>>> A = ReferenceFrame('A')
>>> B = ReferenceFrame('B')
>>> N = ReferenceFrame('N')
A simple rotation of ``A`` relative to ``N`` about ``N.x`` is defined
by the following direction cosine matrix:
>>> dcm = Matrix([[1, 0, 0],
... [0, cos(q1), -sin(q1)],
... [0, sin(q1), cos(q1)]])
>>> A.orient_explicit(N, dcm)
>>> A.dcm(N)
Matrix([
[1, 0, 0],
[0, cos(q1), sin(q1)],
[0, -sin(q1), cos(q1)]])
This is equivalent to using ``orient_axis()``:
>>> B.orient_axis(N, N.x, q1)
>>> B.dcm(N)
Matrix([
[1, 0, 0],
[0, cos(q1), sin(q1)],
[0, -sin(q1), cos(q1)]])
**Note carefully that** ``N.dcm(B)`` **(the transpose) would be passed
into** ``orient_explicit()`` **for** ``A.dcm(N)`` **to match**
``B.dcm(N)``:
>>> A.orient_explicit(N, N.dcm(B))
>>> A.dcm(N)
Matrix([
[1, 0, 0],
[0, cos(q1), sin(q1)],
[0, -sin(q1), cos(q1)]])
"""
_check_frame(parent)
# amounts must be a Matrix type object
# (e.g. sympy.matrices.dense.MutableDenseMatrix).
if not isinstance(dcm, MatrixBase):
raise TypeError("Amounts must be a SymPy Matrix type object.")
self.orient_dcm(parent, dcm.T)
def orient_dcm(self, parent, dcm):
"""Sets the orientation of this reference frame relative to another (parent) reference frame
using a direction cosine matrix that describes the rotation from the child to the parent.
Parameters
==========
parent : ReferenceFrame
Reference frame that this reference frame will be rotated relative
to.
dcm : Matrix, shape(3, 3)
Direction cosine matrix that specifies the relative rotation
between the two reference frames.
Warns
======
UserWarning
If the orientation creates a kinematic loop.
Examples
========
Setup variables for the examples:
>>> from sympy import symbols, Matrix, sin, cos
>>> from sympy.physics.vector import ReferenceFrame
>>> q1 = symbols('q1')
>>> A = ReferenceFrame('A')
>>> B = ReferenceFrame('B')
>>> N = ReferenceFrame('N')
A simple rotation of ``A`` relative to ``N`` about ``N.x`` is defined
by the following direction cosine matrix:
>>> dcm = Matrix([[1, 0, 0],
... [0, cos(q1), sin(q1)],
... [0, -sin(q1), cos(q1)]])
>>> A.orient_dcm(N, dcm)
>>> A.dcm(N)
Matrix([
[1, 0, 0],
[0, cos(q1), sin(q1)],
[0, -sin(q1), cos(q1)]])
This is equivalent to using ``orient_axis()``:
>>> B.orient_axis(N, N.x, q1)
>>> B.dcm(N)
Matrix([
[1, 0, 0],
[0, cos(q1), sin(q1)],
[0, -sin(q1), cos(q1)]])
"""
_check_frame(parent)
# amounts must be a Matrix type object
# (e.g. sympy.matrices.dense.MutableDenseMatrix).
if not isinstance(dcm, MatrixBase):
raise TypeError("Amounts must be a SymPy Matrix type object.")
self._dcm(parent, dcm.T)
wvec = self._w_diff_dcm(parent)
self._ang_vel_dict.update({parent: wvec})
parent._ang_vel_dict.update({self: -wvec})
self._var_dict = {}
def _rot(self, axis, angle):
"""DCM for simple axis 1,2,or 3 rotations."""
if axis == 1:
return Matrix([[1, 0, 0],
[0, cos(angle), -sin(angle)],
[0, sin(angle), cos(angle)]])
elif axis == 2:
return Matrix([[cos(angle), 0, sin(angle)],
[0, 1, 0],
[-sin(angle), 0, cos(angle)]])
elif axis == 3:
return Matrix([[cos(angle), -sin(angle), 0],
[sin(angle), cos(angle), 0],
[0, 0, 1]])
def _parse_consecutive_rotations(self, angles, rotation_order):
"""Helper for orient_body_fixed and orient_space_fixed.
Parameters
==========
angles : 3-tuple of sympifiable
Three angles in radians used for the successive rotations.
rotation_order : 3 character string or 3 digit integer
Order of the rotations. The order can be specified by the strings
``'XZX'``, ``'131'``, or the integer ``131``. There are 12 unique
valid rotation orders.
Returns
=======
amounts : list
List of sympifiables corresponding to the rotation angles.
rot_order : list
List of integers corresponding to the axis of rotation.
rot_matrices : list
List of DCM around the given axis with corresponding magnitude.
"""
amounts = list(angles)
for i, v in enumerate(amounts):
if not isinstance(v, Vector):
amounts[i] = sympify(v)
approved_orders = ('123', '231', '312', '132', '213', '321', '121',
'131', '212', '232', '313', '323', '')
# make sure XYZ => 123
rot_order = translate(str(rotation_order), 'XYZxyz', '123123')
if rot_order not in approved_orders:
raise TypeError('The rotation order is not a valid order.')
rot_order = [int(r) for r in rot_order]
if not (len(amounts) == 3 & len(rot_order) == 3):
raise TypeError('Body orientation takes 3 values & 3 orders')
rot_matrices = [self._rot(order, amount)
for (order, amount) in zip(rot_order, amounts)]
return amounts, rot_order, rot_matrices
def orient_body_fixed(self, parent, angles, rotation_order):
"""Rotates this reference frame relative to the parent reference frame
by right hand rotating through three successive body fixed simple axis
rotations. Each subsequent axis of rotation is about the "body fixed"
unit vectors of a new intermediate reference frame. This type of
rotation is also referred to rotating through the `Euler and Tait-Bryan
Angles`_.
.. _Euler and Tait-Bryan Angles: https://en.wikipedia.org/wiki/Euler_angles
The computed angular velocity in this method is by default expressed in
the child's frame, so it is most preferable to use ``u1 * child.x + u2 *
child.y + u3 * child.z`` as generalized speeds.
Parameters
==========
parent : ReferenceFrame
Reference frame that this reference frame will be rotated relative
to.
angles : 3-tuple of sympifiable
Three angles in radians used for the successive rotations.
rotation_order : 3 character string or 3 digit integer
Order of the rotations about each intermediate reference frames'
unit vectors. The Euler rotation about the X, Z', X'' axes can be
specified by the strings ``'XZX'``, ``'131'``, or the integer
``131``. There are 12 unique valid rotation orders (6 Euler and 6
Tait-Bryan): zxz, xyx, yzy, zyz, xzx, yxy, xyz, yzx, zxy, xzy, zyx,
and yxz.
Warns
======
UserWarning
If the orientation creates a kinematic loop.
Examples
========
Setup variables for the examples:
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame
>>> q1, q2, q3 = symbols('q1, q2, q3')
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B1 = ReferenceFrame('B1')
>>> B2 = ReferenceFrame('B2')
>>> B3 = ReferenceFrame('B3')
For example, a classic Euler Angle rotation can be done by:
>>> B.orient_body_fixed(N, (q1, q2, q3), 'XYX')
>>> B.dcm(N)
Matrix([
[ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)],
[sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)],
[sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]])
This rotates reference frame B relative to reference frame N through
``q1`` about ``N.x``, then rotates B again through ``q2`` about
``B.y``, and finally through ``q3`` about ``B.x``. It is equivalent to
three successive ``orient_axis()`` calls:
>>> B1.orient_axis(N, N.x, q1)
>>> B2.orient_axis(B1, B1.y, q2)
>>> B3.orient_axis(B2, B2.x, q3)
>>> B3.dcm(N)
Matrix([
[ cos(q2), sin(q1)*sin(q2), -sin(q2)*cos(q1)],
[sin(q2)*sin(q3), -sin(q1)*sin(q3)*cos(q2) + cos(q1)*cos(q3), sin(q1)*cos(q3) + sin(q3)*cos(q1)*cos(q2)],
[sin(q2)*cos(q3), -sin(q1)*cos(q2)*cos(q3) - sin(q3)*cos(q1), -sin(q1)*sin(q3) + cos(q1)*cos(q2)*cos(q3)]])
Acceptable rotation orders are of length 3, expressed in as a string
``'XYZ'`` or ``'123'`` or integer ``123``. Rotations about an axis
twice in a row are prohibited.
>>> B.orient_body_fixed(N, (q1, q2, 0), 'ZXZ')
>>> B.orient_body_fixed(N, (q1, q2, 0), '121')
>>> B.orient_body_fixed(N, (q1, q2, q3), 123)
"""
from sympy.physics.vector.functions import dynamicsymbols
_check_frame(parent)
amounts, rot_order, rot_matrices = self._parse_consecutive_rotations(
angles, rotation_order)
self._dcm(parent, rot_matrices[0] * rot_matrices[1] * rot_matrices[2])
rot_vecs = [zeros(3, 1) for _ in range(3)]
for i, order in enumerate(rot_order):
rot_vecs[i][order - 1] = amounts[i].diff(dynamicsymbols._t)
u1, u2, u3 = rot_vecs[2] + rot_matrices[2].T * (
rot_vecs[1] + rot_matrices[1].T * rot_vecs[0])
wvec = u1 * self.x + u2 * self.y + u3 * self.z # There is a double -
self._ang_vel_dict.update({parent: wvec})
parent._ang_vel_dict.update({self: -wvec})
self._var_dict = {}
def orient_space_fixed(self, parent, angles, rotation_order):
"""Rotates this reference frame relative to the parent reference frame
by right hand rotating through three successive space fixed simple axis
rotations. Each subsequent axis of rotation is about the "space fixed"
unit vectors of the parent reference frame.
The computed angular velocity in this method is by default expressed in
the child's frame, so it is most preferable to use ``u1 * child.x + u2 *
child.y + u3 * child.z`` as generalized speeds.
Parameters
==========
parent : ReferenceFrame
Reference frame that this reference frame will be rotated relative
to.
angles : 3-tuple of sympifiable
Three angles in radians used for the successive rotations.
rotation_order : 3 character string or 3 digit integer
Order of the rotations about the parent reference frame's unit
vectors. The order can be specified by the strings ``'XZX'``,
``'131'``, or the integer ``131``. There are 12 unique valid
rotation orders.
Warns
======
UserWarning
If the orientation creates a kinematic loop.
Examples
========
Setup variables for the examples:
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame
>>> q1, q2, q3 = symbols('q1, q2, q3')
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
>>> B1 = ReferenceFrame('B1')
>>> B2 = ReferenceFrame('B2')
>>> B3 = ReferenceFrame('B3')
>>> B.orient_space_fixed(N, (q1, q2, q3), '312')
>>> B.dcm(N)
Matrix([
[ sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1)],
[-sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3)],
[ sin(q3)*cos(q2), -sin(q2), cos(q2)*cos(q3)]])
is equivalent to:
>>> B1.orient_axis(N, N.z, q1)
>>> B2.orient_axis(B1, N.x, q2)
>>> B3.orient_axis(B2, N.y, q3)
>>> B3.dcm(N).simplify()
Matrix([
[ sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1)],
[-sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3)],
[ sin(q3)*cos(q2), -sin(q2), cos(q2)*cos(q3)]])
It is worth noting that space-fixed and body-fixed rotations are
related by the order of the rotations, i.e. the reverse order of body
fixed will give space fixed and vice versa.
>>> B.orient_space_fixed(N, (q1, q2, q3), '231')
>>> B.dcm(N)
Matrix([
[cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3), -sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)],
[ -sin(q2), cos(q2)*cos(q3), sin(q3)*cos(q2)],
[sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3)]])
>>> B.orient_body_fixed(N, (q3, q2, q1), '132')
>>> B.dcm(N)
Matrix([
[cos(q1)*cos(q2), sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3), -sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)],
[ -sin(q2), cos(q2)*cos(q3), sin(q3)*cos(q2)],
[sin(q1)*cos(q2), sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3)]])
"""
from sympy.physics.vector.functions import dynamicsymbols
_check_frame(parent)
amounts, rot_order, rot_matrices = self._parse_consecutive_rotations(
angles, rotation_order)
self._dcm(parent, rot_matrices[2] * rot_matrices[1] * rot_matrices[0])
rot_vecs = [zeros(3, 1) for _ in range(3)]
for i, order in enumerate(rot_order):
rot_vecs[i][order - 1] = amounts[i].diff(dynamicsymbols._t)
u1, u2, u3 = rot_vecs[0] + rot_matrices[0].T * (
rot_vecs[1] + rot_matrices[1].T * rot_vecs[2])
wvec = u1 * self.x + u2 * self.y + u3 * self.z # There is a double -
self._ang_vel_dict.update({parent: wvec})
parent._ang_vel_dict.update({self: -wvec})
self._var_dict = {}
def orient_quaternion(self, parent, numbers):
"""Sets the orientation of this reference frame relative to a parent
reference frame via an orientation quaternion. An orientation
quaternion is defined as a finite rotation a unit vector, ``(lambda_x,
lambda_y, lambda_z)``, by an angle ``theta``. The orientation
quaternion is described by four parameters:
- ``q0 = cos(theta/2)``
- ``q1 = lambda_x*sin(theta/2)``
- ``q2 = lambda_y*sin(theta/2)``
- ``q3 = lambda_z*sin(theta/2)``
See `Quaternions and Spatial Rotation
<https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation>`_ on
Wikipedia for more information.
Parameters
==========
parent : ReferenceFrame
Reference frame that this reference frame will be rotated relative
to.
numbers : 4-tuple of sympifiable
The four quaternion scalar numbers as defined above: ``q0``,
``q1``, ``q2``, ``q3``.
Warns
======
UserWarning
If the orientation creates a kinematic loop.
Examples
========
Setup variables for the examples:
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = ReferenceFrame('N')
>>> B = ReferenceFrame('B')
Set the orientation:
>>> B.orient_quaternion(N, (q0, q1, q2, q3))
>>> B.dcm(N)
Matrix([
[q0**2 + q1**2 - q2**2 - q3**2, 2*q0*q3 + 2*q1*q2, -2*q0*q2 + 2*q1*q3],
[ -2*q0*q3 + 2*q1*q2, q0**2 - q1**2 + q2**2 - q3**2, 2*q0*q1 + 2*q2*q3],
[ 2*q0*q2 + 2*q1*q3, -2*q0*q1 + 2*q2*q3, q0**2 - q1**2 - q2**2 + q3**2]])
"""
from sympy.physics.vector.functions import dynamicsymbols
_check_frame(parent)
numbers = list(numbers)
for i, v in enumerate(numbers):
if not isinstance(v, Vector):
numbers[i] = sympify(v)
if not (isinstance(numbers, (list, tuple)) & (len(numbers) == 4)):
raise TypeError('Amounts are a list or tuple of length 4')
q0, q1, q2, q3 = numbers
parent_orient_quaternion = (
Matrix([[q0**2 + q1**2 - q2**2 - q3**2,
2 * (q1 * q2 - q0 * q3),
2 * (q0 * q2 + q1 * q3)],
[2 * (q1 * q2 + q0 * q3),
q0**2 - q1**2 + q2**2 - q3**2,
2 * (q2 * q3 - q0 * q1)],
[2 * (q1 * q3 - q0 * q2),
2 * (q0 * q1 + q2 * q3),
q0**2 - q1**2 - q2**2 + q3**2]]))
self._dcm(parent, parent_orient_quaternion)
t = dynamicsymbols._t
q0, q1, q2, q3 = numbers
q0d = diff(q0, t)
q1d = diff(q1, t)
q2d = diff(q2, t)
q3d = diff(q3, t)
w1 = 2 * (q1d * q0 + q2d * q3 - q3d * q2 - q0d * q1)
w2 = 2 * (q2d * q0 + q3d * q1 - q1d * q3 - q0d * q2)
w3 = 2 * (q3d * q0 + q1d * q2 - q2d * q1 - q0d * q3)
wvec = Vector([(Matrix([w1, w2, w3]), self)])
self._ang_vel_dict.update({parent: wvec})
parent._ang_vel_dict.update({self: -wvec})
self._var_dict = {}
def orient(self, parent, rot_type, amounts, rot_order=''):
"""Sets the orientation of this reference frame relative to another
(parent) reference frame.
.. note:: It is now recommended to use the ``.orient_axis,
.orient_body_fixed, .orient_space_fixed, .orient_quaternion``
methods for the different rotation types.
Parameters
==========
parent : ReferenceFrame
Reference frame that this reference frame will be rotated relative
to.
rot_type : str
The method used to generate the direction cosine matrix. Supported
methods are:
- ``'Axis'``: simple rotations about a single common axis
- ``'DCM'``: for setting the direction cosine matrix directly
- ``'Body'``: three successive rotations about new intermediate
axes, also called "Euler and Tait-Bryan angles"
- ``'Space'``: three successive rotations about the parent
frames' unit vectors
- ``'Quaternion'``: rotations defined by four parameters which
result in a singularity free direction cosine matrix
amounts :
Expressions defining the rotation angles or direction cosine
matrix. These must match the ``rot_type``. See examples below for
details. The input types are:
- ``'Axis'``: 2-tuple (expr/sym/func, Vector)
- ``'DCM'``: Matrix, shape(3,3)
- ``'Body'``: 3-tuple of expressions, symbols, or functions
- ``'Space'``: 3-tuple of expressions, symbols, or functions
- ``'Quaternion'``: 4-tuple of expressions, symbols, or
functions
rot_order : str or int, optional
If applicable, the order of the successive of rotations. The string
``'123'`` and integer ``123`` are equivalent, for example. Required
for ``'Body'`` and ``'Space'``.
Warns
======
UserWarning
If the orientation creates a kinematic loop.
"""
_check_frame(parent)
approved_orders = ('123', '231', '312', '132', '213', '321', '121',
'131', '212', '232', '313', '323', '')
rot_order = translate(str(rot_order), 'XYZxyz', '123123')
rot_type = rot_type.upper()
if rot_order not in approved_orders:
raise TypeError('The supplied order is not an approved type')
if rot_type == 'AXIS':
self.orient_axis(parent, amounts[1], amounts[0])
elif rot_type == 'DCM':
self.orient_explicit(parent, amounts)
elif rot_type == 'BODY':
self.orient_body_fixed(parent, amounts, rot_order)
elif rot_type == 'SPACE':
self.orient_space_fixed(parent, amounts, rot_order)
elif rot_type == 'QUATERNION':
self.orient_quaternion(parent, amounts)
else:
raise NotImplementedError('That is not an implemented rotation')
def orientnew(self, newname, rot_type, amounts, rot_order='',
variables=None, indices=None, latexs=None):
r"""Returns a new reference frame oriented with respect to this
reference frame.
See ``ReferenceFrame.orient()`` for detailed examples of how to orient
reference frames.
Parameters
==========
newname : str
Name for the new reference frame.
rot_type : str
The method used to generate the direction cosine matrix. Supported
methods are:
- ``'Axis'``: simple rotations about a single common axis
- ``'DCM'``: for setting the direction cosine matrix directly
- ``'Body'``: three successive rotations about new intermediate
axes, also called "Euler and Tait-Bryan angles"
- ``'Space'``: three successive rotations about the parent
frames' unit vectors
- ``'Quaternion'``: rotations defined by four parameters which
result in a singularity free direction cosine matrix
amounts :
Expressions defining the rotation angles or direction cosine
matrix. These must match the ``rot_type``. See examples below for
details. The input types are:
- ``'Axis'``: 2-tuple (expr/sym/func, Vector)
- ``'DCM'``: Matrix, shape(3,3)
- ``'Body'``: 3-tuple of expressions, symbols, or functions
- ``'Space'``: 3-tuple of expressions, symbols, or functions
- ``'Quaternion'``: 4-tuple of expressions, symbols, or
functions
rot_order : str or int, optional
If applicable, the order of the successive of rotations. The string
``'123'`` and integer ``123`` are equivalent, for example. Required
for ``'Body'`` and ``'Space'``.
indices : tuple of str
Enables the reference frame's basis unit vectors to be accessed by
Python's square bracket indexing notation using the provided three
indice strings and alters the printing of the unit vectors to
reflect this choice.
latexs : tuple of str
Alters the LaTeX printing of the reference frame's basis unit
vectors to the provided three valid LaTeX strings.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame, vlatex
>>> q0, q1, q2, q3 = symbols('q0 q1 q2 q3')
>>> N = ReferenceFrame('N')
Create a new reference frame A rotated relative to N through a simple
rotation.
>>> A = N.orientnew('A', 'Axis', (q0, N.x))
Create a new reference frame B rotated relative to N through body-fixed
rotations.
>>> B = N.orientnew('B', 'Body', (q1, q2, q3), '123')
Create a new reference frame C rotated relative to N through a simple
rotation with unique indices and LaTeX printing.
>>> C = N.orientnew('C', 'Axis', (q0, N.x), indices=('1', '2', '3'),
... latexs=(r'\hat{\mathbf{c}}_1',r'\hat{\mathbf{c}}_2',
... r'\hat{\mathbf{c}}_3'))
>>> C['1']
C['1']
>>> print(vlatex(C['1']))
\hat{\mathbf{c}}_1
"""
newframe = self.__class__(newname, variables=variables,
indices=indices, latexs=latexs)
approved_orders = ('123', '231', '312', '132', '213', '321', '121',
'131', '212', '232', '313', '323', '')
rot_order = translate(str(rot_order), 'XYZxyz', '123123')
rot_type = rot_type.upper()
if rot_order not in approved_orders:
raise TypeError('The supplied order is not an approved type')
if rot_type == 'AXIS':
newframe.orient_axis(self, amounts[1], amounts[0])
elif rot_type == 'DCM':
newframe.orient_explicit(self, amounts)
elif rot_type == 'BODY':
newframe.orient_body_fixed(self, amounts, rot_order)
elif rot_type == 'SPACE':
newframe.orient_space_fixed(self, amounts, rot_order)
elif rot_type == 'QUATERNION':
newframe.orient_quaternion(self, amounts)
else:
raise NotImplementedError('That is not an implemented rotation')
return newframe
def set_ang_acc(self, otherframe, value):
"""Define the angular acceleration Vector in a ReferenceFrame.
Defines the angular acceleration of this ReferenceFrame, in another.
Angular acceleration can be defined with respect to multiple different
ReferenceFrames. Care must be taken to not create loops which are
inconsistent.
Parameters
==========
otherframe : ReferenceFrame
A ReferenceFrame to define the angular acceleration in
value : Vector
The Vector representing angular acceleration
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_acc(N, V)
>>> A.ang_acc_in(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(otherframe)
self._ang_acc_dict.update({otherframe: value})
otherframe._ang_acc_dict.update({self: -value})
def set_ang_vel(self, otherframe, value):
"""Define the angular velocity vector in a ReferenceFrame.
Defines the angular velocity of this ReferenceFrame, in another.
Angular velocity can be defined with respect to multiple different
ReferenceFrames. Care must be taken to not create loops which are
inconsistent.
Parameters
==========
otherframe : ReferenceFrame
A ReferenceFrame to define the angular velocity in
value : Vector
The Vector representing angular velocity
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> V = 10 * N.x
>>> A.set_ang_vel(N, V)
>>> A.ang_vel_in(N)
10*N.x
"""
if value == 0:
value = Vector(0)
value = _check_vector(value)
_check_frame(otherframe)
self._ang_vel_dict.update({otherframe: value})
otherframe._ang_vel_dict.update({self: -value})
@property
def x(self):
"""The basis Vector for the ReferenceFrame, in the x direction. """
return self._x
@property
def y(self):
"""The basis Vector for the ReferenceFrame, in the y direction. """
return self._y
@property
def z(self):
"""The basis Vector for the ReferenceFrame, in the z direction. """
return self._z
@property
def xx(self):
"""Unit dyad of basis Vectors x and x for the ReferenceFrame."""
return Vector.outer(self.x, self.x)
@property
def xy(self):
"""Unit dyad of basis Vectors x and y for the ReferenceFrame."""
return Vector.outer(self.x, self.y)
@property
def xz(self):
"""Unit dyad of basis Vectors x and z for the ReferenceFrame."""
return Vector.outer(self.x, self.z)
@property
def yx(self):
"""Unit dyad of basis Vectors y and x for the ReferenceFrame."""
return Vector.outer(self.y, self.x)
@property
def yy(self):
"""Unit dyad of basis Vectors y and y for the ReferenceFrame."""
return Vector.outer(self.y, self.y)
@property
def yz(self):
"""Unit dyad of basis Vectors y and z for the ReferenceFrame."""
return Vector.outer(self.y, self.z)
@property
def zx(self):
"""Unit dyad of basis Vectors z and x for the ReferenceFrame."""
return Vector.outer(self.z, self.x)
@property
def zy(self):
"""Unit dyad of basis Vectors z and y for the ReferenceFrame."""
return Vector.outer(self.z, self.y)
@property
def zz(self):
"""Unit dyad of basis Vectors z and z for the ReferenceFrame."""
return Vector.outer(self.z, self.z)
@property
def u(self):
"""Unit dyadic for the ReferenceFrame."""
return self.xx + self.yy + self.zz
def partial_velocity(self, frame, *gen_speeds):
"""Returns the partial angular velocities of this frame in the given
frame with respect to one or more provided generalized speeds.
Parameters
==========
frame : ReferenceFrame
The frame with which the angular velocity is defined in.
gen_speeds : functions of time
The generalized speeds.
Returns
=======
partial_velocities : tuple of Vector
The partial angular velocity vectors corresponding to the provided
generalized speeds.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dynamicsymbols
>>> N = ReferenceFrame('N')
>>> A = ReferenceFrame('A')
>>> u1, u2 = dynamicsymbols('u1, u2')
>>> A.set_ang_vel(N, u1 * A.x + u2 * N.y)
>>> A.partial_velocity(N, u1)
A.x
>>> A.partial_velocity(N, u1, u2)
(A.x, N.y)
"""
from sympy.physics.vector.functions import partial_velocity
vel = self.ang_vel_in(frame)
partials = partial_velocity([vel], gen_speeds, frame)[0]
if len(partials) == 1:
return partials[0]
else:
return tuple(partials)
def _check_frame(other):
from .vector import VectorTypeError
if not isinstance(other, ReferenceFrame):
raise VectorTypeError(other, ReferenceFrame('A'))
| ReferenceFrame |
python | TheAlgorithms__Python | searches/jump_search.py | {
"start": 440,
"end": 1839
} | class ____(Protocol):
def __lt__(self, other: Any, /) -> bool: ...
T = TypeVar("T", bound=Comparable)
def jump_search(arr: Sequence[T], item: T) -> int:
"""
Python implementation of the jump search algorithm.
Return the index if the `item` is found, otherwise return -1.
Examples:
>>> jump_search([0, 1, 2, 3, 4, 5], 3)
3
>>> jump_search([-5, -2, -1], -1)
2
>>> jump_search([0, 5, 10, 20], 8)
-1
>>> jump_search([0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610], 55)
10
>>> jump_search(["aa", "bb", "cc", "dd", "ee", "ff"], "ee")
4
"""
arr_size = len(arr)
block_size = int(math.sqrt(arr_size))
prev = 0
step = block_size
while arr[min(step, arr_size) - 1] < item:
prev = step
step += block_size
if prev >= arr_size:
return -1
while arr[prev] < item:
prev += 1
if prev == min(step, arr_size):
return -1
if arr[prev] == item:
return prev
return -1
if __name__ == "__main__":
user_input = input("Enter numbers separated by a comma:\n").strip()
array = [int(item) for item in user_input.split(",")]
x = int(input("Enter the number to be searched:\n"))
res = jump_search(array, x)
if res == -1:
print("Number not found!")
else:
print(f"Number {x} is at index {res}")
| Comparable |
python | huggingface__transformers | src/transformers/image_processing_utils.py | {
"start": 1170,
"end": 13931
} | class ____(ImageProcessingMixin):
valid_kwargs = ImagesKwargs
def __init__(self, **kwargs):
super().__init__(**kwargs)
@property
def is_fast(self) -> bool:
"""
`bool`: Whether or not this image processor is a fast processor (backed by PyTorch and TorchVision).
"""
return False
def __call__(self, images: ImageInput, *args, **kwargs: Unpack[ImagesKwargs]) -> BatchFeature:
"""Preprocess an image or a batch of images."""
return self.preprocess(images, *args, **kwargs)
def preprocess(self, images, **kwargs) -> BatchFeature:
raise NotImplementedError("Each image processor must implement its own preprocess method")
def rescale(
self,
image: np.ndarray,
scale: float,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Rescale an image by a scale factor. image = image * scale.
Args:
image (`np.ndarray`):
Image to rescale.
scale (`float`):
The scaling factor to rescale pixel values by.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
Returns:
`np.ndarray`: The rescaled image.
"""
return rescale(image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs)
def normalize(
self,
image: np.ndarray,
mean: Union[float, Iterable[float]],
std: Union[float, Iterable[float]],
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Normalize an image. image = (image - image_mean) / image_std.
Args:
image (`np.ndarray`):
Image to normalize.
mean (`float` or `Iterable[float]`):
Image mean to use for normalization.
std (`float` or `Iterable[float]`):
Image standard deviation to use for normalization.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
Returns:
`np.ndarray`: The normalized image.
"""
return normalize(
image, mean=mean, std=std, data_format=data_format, input_data_format=input_data_format, **kwargs
)
def center_crop(
self,
image: np.ndarray,
size: dict[str, int],
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
any edge, the image is padded with 0's and then center cropped.
Args:
image (`np.ndarray`):
Image to center crop.
size (`dict[str, int]`):
Size of the output image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
size = get_size_dict(size)
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
return center_crop(
image,
size=(size["height"], size["width"]),
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
def to_dict(self):
encoder_dict = super().to_dict()
encoder_dict.pop("_valid_processor_keys", None)
return encoder_dict
VALID_SIZE_DICT_KEYS = (
{"height", "width"},
{"shortest_edge"},
{"shortest_edge", "longest_edge"},
{"longest_edge"},
{"max_height", "max_width"},
)
def is_valid_size_dict(size_dict):
if not isinstance(size_dict, dict):
return False
size_dict_keys = set(size_dict.keys())
for allowed_keys in VALID_SIZE_DICT_KEYS:
if size_dict_keys == allowed_keys:
return True
return False
def convert_to_size_dict(
size, max_size: Optional[int] = None, default_to_square: bool = True, height_width_order: bool = True
):
# By default, if size is an int we assume it represents a tuple of (size, size).
if isinstance(size, int) and default_to_square:
if max_size is not None:
raise ValueError("Cannot specify both size as an int, with default_to_square=True and max_size")
return {"height": size, "width": size}
# In other configs, if size is an int and default_to_square is False, size represents the length of
# the shortest edge after resizing.
elif isinstance(size, int) and not default_to_square:
size_dict = {"shortest_edge": size}
if max_size is not None:
size_dict["longest_edge"] = max_size
return size_dict
# Otherwise, if size is a tuple it's either (height, width) or (width, height)
elif isinstance(size, (tuple, list)) and height_width_order:
return {"height": size[0], "width": size[1]}
elif isinstance(size, (tuple, list)) and not height_width_order:
return {"height": size[1], "width": size[0]}
elif size is None and max_size is not None:
if default_to_square:
raise ValueError("Cannot specify both default_to_square=True and max_size")
return {"longest_edge": max_size}
raise ValueError(f"Could not convert size input to size dict: {size}")
def get_size_dict(
size: Optional[Union[int, Iterable[int], dict[str, int]]] = None,
max_size: Optional[int] = None,
height_width_order: bool = True,
default_to_square: bool = True,
param_name="size",
) -> dict:
"""
Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards
compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,
width) or (width, height) format.
- If `size` is tuple, it is converted to `{"height": size[0], "width": size[1]}` or `{"height": size[1], "width":
size[0]}` if `height_width_order` is `False`.
- If `size` is an int, and `default_to_square` is `True`, it is converted to `{"height": size, "width": size}`.
- If `size` is an int and `default_to_square` is False, it is converted to `{"shortest_edge": size}`. If `max_size`
is set, it is added to the dict as `{"longest_edge": max_size}`.
Args:
size (`Union[int, Iterable[int], dict[str, int]]`, *optional*):
The `size` parameter to be cast into a size dictionary.
max_size (`Optional[int]`, *optional*):
The `max_size` parameter to be cast into a size dictionary.
height_width_order (`bool`, *optional*, defaults to `True`):
If `size` is a tuple, whether it's in (height, width) or (width, height) order.
default_to_square (`bool`, *optional*, defaults to `True`):
If `size` is an int, whether to default to a square image or not.
"""
if not isinstance(size, dict):
size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)
logger.info(
f"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}."
f" Converted to {size_dict}.",
)
else:
size_dict = size
if not is_valid_size_dict(size_dict):
raise ValueError(
f"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}"
)
return size_dict
def select_best_resolution(original_size: tuple, possible_resolutions: list) -> tuple:
"""
Selects the best resolution from a list of possible resolutions based on the original size.
This is done by calculating the effective and wasted resolution for each possible resolution.
The best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution.
Args:
original_size (tuple):
The original size of the image in the format (height, width).
possible_resolutions (list):
A list of possible resolutions in the format [(height1, width1), (height2, width2), ...].
Returns:
tuple: The best fit resolution in the format (height, width).
"""
original_height, original_width = original_size
best_fit = None
max_effective_resolution = 0
min_wasted_resolution = float("inf")
for height, width in possible_resolutions:
scale = min(width / original_width, height / original_height)
downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)
effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)
wasted_resolution = (width * height) - effective_resolution
if effective_resolution > max_effective_resolution or (
effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution
):
max_effective_resolution = effective_resolution
min_wasted_resolution = wasted_resolution
best_fit = (height, width)
return best_fit
def get_patch_output_size(image, target_resolution, input_data_format):
"""
Given an image and a target resolution, calculate the output size of the image after cropping to the target
"""
original_height, original_width = get_image_size(image, channel_dim=input_data_format)
target_height, target_width = target_resolution
scale_w = target_width / original_width
scale_h = target_height / original_height
if scale_w < scale_h:
new_width = target_width
new_height = min(math.ceil(original_height * scale_w), target_height)
else:
new_height = target_height
new_width = min(math.ceil(original_width * scale_h), target_width)
return new_height, new_width
| BaseImageProcessor |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constrainedTypeVar4.py | {
"start": 535,
"end": 802
} | class ____(Generic[T]):
value: T | None
def __init__(self, source: T | None) -> None:
self.value = source
def read(self) -> T | None:
if self.value is None:
raise RuntimeError(f"Item is required!")
return self.value
| Item |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_complex.py | {
"start": 3545,
"end": 3587
} | class ____(complex):
pass
| ComplexSubclass |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/unitofwork.py | {
"start": 24121,
"end": 25344
} | class ____(_PostSortRec):
__slots__ = "dependency_processor", "isdelete", "state", "sort_key"
def __init__(self, uow, dependency_processor, isdelete, state):
self.dependency_processor = dependency_processor
self.sort_key = ("ProcessState", dependency_processor.sort_key)
self.isdelete = isdelete
self.state = state
def execute_aggregate(self, uow, recs):
cls_ = self.__class__
dependency_processor = self.dependency_processor
isdelete = self.isdelete
our_recs = [
r
for r in recs
if r.__class__ is cls_
and r.dependency_processor is dependency_processor
and r.isdelete is isdelete
]
recs.difference_update(our_recs)
states = [self.state] + [r.state for r in our_recs]
if isdelete:
dependency_processor.process_deletes(uow, states)
else:
dependency_processor.process_saves(uow, states)
def __repr__(self):
return "%s(%s, %s, delete=%s)" % (
self.__class__.__name__,
self.dependency_processor,
orm_util.state_str(self.state),
self.isdelete,
)
| _ProcessState |
python | kubernetes-client__python | kubernetes/client/models/v1_stateful_set_list.py | {
"start": 383,
"end": 6928
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1StatefulSet]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1StatefulSetList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1StatefulSetList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1StatefulSetList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1StatefulSetList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1StatefulSetList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1StatefulSetList. # noqa: E501
Items is the list of stateful sets. # noqa: E501
:return: The items of this V1StatefulSetList. # noqa: E501
:rtype: list[V1StatefulSet]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1StatefulSetList.
Items is the list of stateful sets. # noqa: E501
:param items: The items of this V1StatefulSetList. # noqa: E501
:type: list[V1StatefulSet]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1StatefulSetList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1StatefulSetList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1StatefulSetList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1StatefulSetList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1StatefulSetList. # noqa: E501
:return: The metadata of this V1StatefulSetList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1StatefulSetList.
:param metadata: The metadata of this V1StatefulSetList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1StatefulSetList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1StatefulSetList):
return True
return self.to_dict() != other.to_dict()
| V1StatefulSetList |
python | numba__numba | numba/core/typing/context.py | {
"start": 6332,
"end": 27970
} | class ____(object):
"""A typing context for storing function typing constrain template.
"""
def __init__(self):
# A list of installed registries
self._registries = {}
# Typing declarations extracted from the registries or other sources
self._functions = defaultdict(list)
self._attributes = defaultdict(list)
self._globals = utils.UniqueDict()
self.tm = rules.default_type_manager
self.callstack = CallStack()
# Initialize
self.init()
def init(self):
"""
Initialize the typing context. Can be overridden by subclasses.
"""
def refresh(self):
"""
Refresh context with new declarations from known registries.
Useful for third-party extensions.
"""
self.load_additional_registries()
# Some extensions may have augmented the builtin registry
self._load_builtins()
def explain_function_type(self, func):
"""
Returns a string description of the type of a function
"""
desc = []
defns = []
param = False
if isinstance(func, types.Callable):
sigs, param = func.get_call_signatures()
defns.extend(sigs)
elif func in self._functions:
for tpl in self._functions[func]:
param = param or hasattr(tpl, 'generic')
defns.extend(getattr(tpl, 'cases', []))
else:
msg = "No type info available for {func!r} as a callable."
desc.append(msg.format(func=func))
if defns:
desc = ['Known signatures:']
for sig in defns:
desc.append(' * {0}'.format(sig))
return '\n'.join(desc)
def resolve_function_type(self, func, args, kws):
"""
Resolve function type *func* for argument types *args* and *kws*.
A signature is returned.
"""
cache = self.callstack.lookup_resolve_cache(func, args, kws)
if cache.has_failed_previously():
return cache.replay_failure()
# Prefer user definition first
try:
res = self._resolve_user_function_type(func, args, kws)
except errors.TypingError as e:
# Capture any typing error
last_exception = e
res = None
else:
last_exception = None
# Return early we know there's a working user function
if res is not None:
return res
# Check builtin functions
res = self._resolve_builtin_function_type(func, args, kws)
# Re-raise last_exception if no function type has been found
if res is None and last_exception is not None:
cache.mark_error(last_exception)
raise last_exception
if res is None:
cache.mark_failed()
return res
def _resolve_builtin_function_type(self, func, args, kws):
# NOTE: we should reduce usage of this
if func in self._functions:
# Note: Duplicating code with types.Function.get_call_type().
# *defns* are CallTemplates.
defns = self._functions[func]
for defn in defns:
for support_literals in [True, False]:
if support_literals:
res = defn.apply(args, kws)
else:
fixedargs = [types.unliteral(a) for a in args]
res = defn.apply(fixedargs, kws)
if res is not None:
return res
def _resolve_user_function_type(self, func, args, kws, literals=None):
# It's not a known function type, perhaps it's a global?
functy = self._lookup_global(func)
if functy is not None:
func = functy
if isinstance(func, types.Type):
# If it's a type, it may support a __call__ method
func_type = self.resolve_getattr(func, "__call__")
if func_type is not None:
# The function has a __call__ method, type its call.
return self.resolve_function_type(func_type, args, kws)
if isinstance(func, types.Callable):
# XXX fold this into the __call__ attribute logic?
return func.get_call_type(self, args, kws)
def _get_attribute_templates(self, typ):
"""
Get matching AttributeTemplates for the Numba type.
"""
if typ in self._attributes:
for attrinfo in self._attributes[typ]:
yield attrinfo
else:
for cls in type(typ).__mro__:
if cls in self._attributes:
for attrinfo in self._attributes[cls]:
yield attrinfo
def resolve_getattr(self, typ, attr):
"""
Resolve getting the attribute *attr* (a string) on the Numba type.
The attribute's type is returned, or None if resolution failed.
"""
def core(typ):
out = self.find_matching_getattr_template(typ, attr)
if out:
return out['return_type']
out = core(typ)
if out is not None:
return out
# Try again without literals
out = core(types.unliteral(typ))
if out is not None:
return out
if isinstance(typ, types.Module):
attrty = self.resolve_module_constants(typ, attr)
if attrty is not None:
return attrty
def find_matching_getattr_template(self, typ, attr):
templates = list(self._get_attribute_templates(typ))
# get the order in which to try templates
from numba.core.target_extension import get_local_target # circular
target_hw = get_local_target(self)
order = order_by_target_specificity(target_hw, templates, fnkey=attr)
for template in order:
return_type = template.resolve(typ, attr)
if return_type is not None:
return {
'template': template,
'return_type': return_type,
}
def resolve_setattr(self, target, attr, value):
"""
Resolve setting the attribute *attr* (a string) on the *target* type
to the given *value* type.
A function signature is returned, or None if resolution failed.
"""
for attrinfo in self._get_attribute_templates(target):
expectedty = attrinfo.resolve(target, attr)
# NOTE: convertibility from *value* to *expectedty* is left to
# the caller.
if expectedty is not None:
return templates.signature(types.void, target, expectedty)
def resolve_static_getitem(self, value, index):
assert not isinstance(index, types.Type), index
args = value, index
kws = ()
return self.resolve_function_type("static_getitem", args, kws)
def resolve_static_setitem(self, target, index, value):
assert not isinstance(index, types.Type), index
args = target, index, value
kws = {}
return self.resolve_function_type("static_setitem", args, kws)
def resolve_setitem(self, target, index, value):
assert isinstance(index, types.Type), index
fnty = self.resolve_value_type(operator.setitem)
sig = fnty.get_call_type(self, (target, index, value), {})
return sig
def resolve_delitem(self, target, index):
args = target, index
kws = {}
fnty = self.resolve_value_type(operator.delitem)
sig = fnty.get_call_type(self, args, kws)
return sig
def resolve_module_constants(self, typ, attr):
"""
Resolve module-level global constants.
Return None or the attribute type
"""
assert isinstance(typ, types.Module)
attrval = getattr(typ.pymod, attr)
try:
return self.resolve_value_type(attrval)
except ValueError:
pass
def resolve_value_type(self, val):
"""
Return the numba type of a Python value that is being used
as a runtime constant.
ValueError is raised for unsupported types.
"""
try:
ty = typeof(val, Purpose.constant)
except ValueError as e:
# Make sure the exception doesn't hold a reference to the user
# value.
typeof_exc = utils.erase_traceback(e)
else:
return ty
if isinstance(val, types.ExternalFunction):
return val
# Try to look up target specific typing information
ty = self._get_global_type(val)
if ty is not None:
return ty
raise typeof_exc
def resolve_value_type_prefer_literal(self, value):
"""Resolve value type and prefer Literal types whenever possible.
"""
lit = types.maybe_literal(value)
if lit is None:
return self.resolve_value_type(value)
else:
return lit
def _get_global_type(self, gv):
ty = self._lookup_global(gv)
if ty is not None:
return ty
if isinstance(gv, pytypes.ModuleType):
return types.Module(gv)
def _load_builtins(self):
# Initialize declarations
from numba.core.typing import builtins, arraydecl, npdatetime # noqa: F401, E501
from numba.core.typing import ctypes_utils, bufproto # noqa: F401, E501
from numba.core.unsafe import eh # noqa: F401
self.install_registry(templates.builtin_registry)
def load_additional_registries(self):
"""
Load target-specific registries. Can be overridden by subclasses.
"""
def install_registry(self, registry):
"""
Install a *registry* (a templates.Registry instance) of function,
attribute and global declarations.
"""
try:
loader = self._registries[registry]
except KeyError:
loader = templates.RegistryLoader(registry)
self._registries[registry] = loader
from numba.core.target_extension import (get_local_target,
resolve_target_str)
current_target = get_local_target(self)
def is_for_this_target(ftcls):
metadata = getattr(ftcls, 'metadata', None)
if metadata is None:
return True
target_str = metadata.get('target')
if target_str is None:
return True
# There may be pending registrations for nonexistent targets.
# Ideally it would be impossible to leave a registration pending
# for an invalid target, but in practice this is exceedingly
# difficult to guard against - many things are registered at import
# time, and eagerly reporting an error when registering for invalid
# targets would require that all target registration code is
# executed prior to all typing registrations during the import
# process; attempting to enforce this would impose constraints on
# execution order during import that would be very difficult to
# resolve and maintain in the presence of typical code maintenance.
# Furthermore, these constraints would be imposed not only on
# Numba internals, but also on its dependents.
#
# Instead of that enforcement, we simply catch any occurrences of
# registrations for targets that don't exist, and report that
# they're not for this target. They will then not be encountered
# again during future typing context refreshes (because the
# loader's new registrations are a stream_list that doesn't yield
# previously-yielded items).
try:
ft_target = resolve_target_str(target_str)
except errors.NonexistentTargetError:
return False
return current_target.inherits_from(ft_target)
for ftcls in loader.new_registrations('functions'):
if not is_for_this_target(ftcls):
continue
self.insert_function(ftcls(self))
for ftcls in loader.new_registrations('attributes'):
if not is_for_this_target(ftcls):
continue
self.insert_attributes(ftcls(self))
for gv, gty in loader.new_registrations('globals'):
existing = self._lookup_global(gv)
if existing is None:
self.insert_global(gv, gty)
else:
# A type was already inserted, see if we can add to it
newty = existing.augment(gty)
if newty is None:
raise TypeError("cannot augment %s with %s"
% (existing, gty))
self._remove_global(gv)
self._insert_global(gv, newty)
def _lookup_global(self, gv):
"""
Look up the registered type for global value *gv*.
"""
try:
gv = weakref.ref(gv)
except TypeError:
pass
try:
return self._globals.get(gv, None)
except TypeError:
# Unhashable type
return None
def _insert_global(self, gv, gty):
"""
Register type *gty* for value *gv*. Only a weak reference
to *gv* is kept, if possible.
"""
def on_disposal(wr, pop=self._globals.pop):
# pop() is pre-looked up to avoid a crash late at shutdown on 3.5
# (https://bugs.python.org/issue25217)
pop(wr)
try:
gv = weakref.ref(gv, on_disposal)
except TypeError:
pass
self._globals[gv] = gty
def _remove_global(self, gv):
"""
Remove the registered type for global value *gv*.
"""
try:
gv = weakref.ref(gv)
except TypeError:
pass
del self._globals[gv]
def insert_global(self, gv, gty):
self._insert_global(gv, gty)
def insert_attributes(self, at):
key = at.key
self._attributes[key].append(at)
def insert_function(self, ft):
key = ft.key
self._functions[key].append(ft)
def insert_user_function(self, fn, ft):
"""Insert a user function.
Args
----
- fn:
object used as callee
- ft:
function template
"""
self._insert_global(fn, types.Function(ft))
def can_convert(self, fromty, toty):
"""
Check whether conversion is possible from *fromty* to *toty*.
If successful, return a numba.typeconv.Conversion instance;
otherwise None is returned.
"""
if fromty == toty:
return Conversion.exact
else:
# First check with the type manager (some rules are registered
# at startup there, see numba.typeconv.rules)
conv = self.tm.check_compatible(fromty, toty)
if conv is not None:
return conv
# Fall back on type-specific rules
forward = fromty.can_convert_to(self, toty)
backward = toty.can_convert_from(self, fromty)
if backward is None:
return forward
elif forward is None:
return backward
else:
return min(forward, backward)
def _rate_arguments(self, actualargs, formalargs, unsafe_casting=True,
exact_match_required=False):
"""
Rate the actual arguments for compatibility against the formal
arguments. A Rating instance is returned, or None if incompatible.
"""
if len(actualargs) != len(formalargs):
return None
rate = Rating()
for actual, formal in zip(actualargs, formalargs):
conv = self.can_convert(actual, formal)
if conv is None:
return None
elif not unsafe_casting and conv >= Conversion.unsafe:
return None
elif exact_match_required and conv != Conversion.exact:
return None
if conv == Conversion.promote:
rate.promote += 1
elif conv == Conversion.safe:
rate.safe_convert += 1
elif conv == Conversion.unsafe:
rate.unsafe_convert += 1
elif conv == Conversion.exact:
pass
else:
raise AssertionError("unreachable", conv)
return rate
def install_possible_conversions(self, actualargs, formalargs):
"""
Install possible conversions from the actual argument types to
the formal argument types in the C++ type manager.
Return True if all arguments can be converted.
"""
if len(actualargs) != len(formalargs):
return False
for actual, formal in zip(actualargs, formalargs):
if self.tm.check_compatible(actual, formal) is not None:
# This conversion is already known
continue
conv = self.can_convert(actual, formal)
if conv is None:
return False
assert conv is not Conversion.exact
self.tm.set_compatible(actual, formal, conv)
return True
def resolve_overload(self, key, cases, args, kws,
allow_ambiguous=True, unsafe_casting=True,
exact_match_required=False):
"""
Given actual *args* and *kws*, find the best matching
signature in *cases*, or None if none matches.
*key* is used for error reporting purposes.
If *allow_ambiguous* is False, a tie in the best matches
will raise an error.
If *unsafe_casting* is False, unsafe casting is forbidden.
"""
assert not kws, "Keyword arguments are not supported, yet"
options = {
'unsafe_casting': unsafe_casting,
'exact_match_required': exact_match_required,
}
# Rate each case
candidates = []
for case in cases:
if len(args) == len(case.args):
rating = self._rate_arguments(args, case.args, **options)
if rating is not None:
candidates.append((rating.astuple(), case))
# Find the best case
candidates.sort(key=lambda i: i[0])
if candidates:
best_rate, best = candidates[0]
if not allow_ambiguous:
# Find whether there is a tie and if so, raise an error
tied = []
for rate, case in candidates:
if rate != best_rate:
break
tied.append(case)
if len(tied) > 1:
args = (key, args, '\n'.join(map(str, tied)))
msg = "Ambiguous overloading for %s %s:\n%s" % args
raise TypeError(msg)
# Simply return the best matching candidate in order.
# If there is a tie, since list.sort() is stable, the first case
# in the original order is returned.
# (this can happen if e.g. a function template exposes
# (int32, int32) -> int32 and (int64, int64) -> int64,
# and you call it with (int16, int16) arguments)
return best
def unify_types(self, *typelist):
# Sort the type list according to bit width before doing
# pairwise unification (with thanks to aterrel).
def keyfunc(obj):
"""Uses bitwidth to order numeric-types.
Fallback to stable, deterministic sort.
"""
return getattr(obj, 'bitwidth', 0)
typelist = sorted(typelist, key=keyfunc)
unified = typelist[0]
for tp in typelist[1:]:
unified = self.unify_pairs(unified, tp)
if unified is None:
break
return unified
def unify_pairs(self, first, second):
"""
Try to unify the two given types. A third type is returned,
or None in case of failure.
"""
if first == second:
return first
if first is types.undefined:
return second
elif second is types.undefined:
return first
# Types with special unification rules
unified = first.unify(self, second)
if unified is not None:
return unified
unified = second.unify(self, first)
if unified is not None:
return unified
# Other types with simple conversion rules
conv = self.can_convert(fromty=first, toty=second)
if conv is not None and conv <= Conversion.safe:
# Can convert from first to second
return second
conv = self.can_convert(fromty=second, toty=first)
if conv is not None and conv <= Conversion.safe:
# Can convert from second to first
return first
if isinstance(first, types.Literal) or \
isinstance(second, types.Literal):
first = types.unliteral(first)
second = types.unliteral(second)
return self.unify_pairs(first, second)
# Cannot unify
return None
| BaseContext |
python | gevent__gevent | src/greentest/3.11/test_ftplib.py | {
"start": 16612,
"end": 32263
} | class ____(TestCase):
def setUp(self, encoding=DEFAULT_ENCODING):
self.server = DummyFTPServer((HOST, 0), encoding=encoding)
self.server.start()
self.client = ftplib.FTP(timeout=TIMEOUT, encoding=encoding)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
asyncore.close_all(ignore_all=True)
def check_data(self, received, expected):
self.assertEqual(len(received), len(expected))
self.assertEqual(received, expected)
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r0')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, OSError,
EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.assertEqual(self.client.voidcmd('echo 200'), '200')
self.assertEqual(self.client.voidcmd('echo 299'), '299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_abort(self):
self.client.abort()
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.check_data(b''.join(received),
RETR_DATA.encode(self.client.encoding))
def test_retrbinary_rest(self):
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', received.append, rest=rest)
self.check_data(b''.join(received),
RETR_DATA[rest:].encode(self.client.encoding))
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.check_data(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = io.BytesIO(RETR_DATA.encode(self.client.encoding))
self.client.storbinary('stor', f)
self.check_data(self.server.handler_instance.last_received_data,
RETR_DATA.encode(self.server.encoding))
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
data = RETR_DATA.replace('\r\n', '\n').encode(self.client.encoding)
f = io.BytesIO(data)
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
data = RETR_DATA.replace('\r\n', '\n').encode(self.client.encoding)
f = io.BytesIO(data)
self.client.storlines('stor', f)
self.check_data(self.server.handler_instance.last_received_data,
RETR_DATA.encode(self.server.encoding))
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
f = io.StringIO(RETR_DATA.replace('\r\n', '\n'))
# storlines() expects a binary file, not a text file
with warnings_helper.check_warnings(('', BytesWarning), quiet=True):
self.assertRaises(TypeError, self.client.storlines, 'stor foo', f)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(l.append)
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_mlsd(self):
list(self.client.mlsd())
list(self.client.mlsd(path='/'))
list(self.client.mlsd(path='/', facts=['size', 'type']))
ls = list(self.client.mlsd())
for name, facts in ls:
self.assertIsInstance(name, str)
self.assertIsInstance(facts, dict)
self.assertTrue(name)
self.assertIn('type', facts)
self.assertIn('perm', facts)
self.assertIn('unique', facts)
def set_data(data):
self.server.handler_instance.next_data = data
def test_entry(line, type=None, perm=None, unique=None, name=None):
type = 'type' if type is None else type
perm = 'perm' if perm is None else perm
unique = 'unique' if unique is None else unique
name = 'name' if name is None else name
set_data(line)
_name, facts = next(self.client.mlsd())
self.assertEqual(_name, name)
self.assertEqual(facts['type'], type)
self.assertEqual(facts['perm'], perm)
self.assertEqual(facts['unique'], unique)
# plain
test_entry('type=type;perm=perm;unique=unique; name\r\n')
# "=" in fact value
test_entry('type=ty=pe;perm=perm;unique=unique; name\r\n', type="ty=pe")
test_entry('type==type;perm=perm;unique=unique; name\r\n', type="=type")
test_entry('type=t=y=pe;perm=perm;unique=unique; name\r\n', type="t=y=pe")
test_entry('type=====;perm=perm;unique=unique; name\r\n', type="====")
# spaces in name
test_entry('type=type;perm=perm;unique=unique; na me\r\n', name="na me")
test_entry('type=type;perm=perm;unique=unique; name \r\n', name="name ")
test_entry('type=type;perm=perm;unique=unique; name\r\n', name=" name")
test_entry('type=type;perm=perm;unique=unique; n am e\r\n', name="n am e")
# ";" in name
test_entry('type=type;perm=perm;unique=unique; na;me\r\n', name="na;me")
test_entry('type=type;perm=perm;unique=unique; ;name\r\n', name=";name")
test_entry('type=type;perm=perm;unique=unique; ;name;\r\n', name=";name;")
test_entry('type=type;perm=perm;unique=unique; ;;;;\r\n', name=";;;;")
# case sensitiveness
set_data('Type=type;TyPe=perm;UNIQUE=unique; name\r\n')
_name, facts = next(self.client.mlsd())
for x in facts:
self.assertTrue(x.islower())
# no data (directory empty)
set_data('')
self.assertRaises(StopIteration, next, self.client.mlsd())
set_data('')
for x in self.client.mlsd():
self.fail("unexpected data %s" % x)
def test_makeport(self):
with self.client.makeport():
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd,
'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), timeout=TIMEOUT)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_makepasv_issue43285_security_disabled(self):
"""Test the opt-in to the old vulnerable behavior."""
self.client.trust_server_pasv_ipv4_address = True
bad_host, port = self.client.makepasv()
self.assertEqual(
bad_host, self.server.handler_instance.fake_pasv_server_ip)
# Opening and closing a connection keeps the dummy server happy
# instead of timing out on accept.
socket.create_connection((self.client.sock.getpeername()[0], port),
timeout=TIMEOUT).close()
def test_makepasv_issue43285_security_enabled_default(self):
self.assertFalse(self.client.trust_server_pasv_ipv4_address)
trusted_host, port = self.client.makepasv()
self.assertNotEqual(
trusted_host, self.server.handler_instance.fake_pasv_server_ip)
# Opening and closing a connection keeps the dummy server happy
# instead of timing out on accept.
socket.create_connection((trusted_host, port), timeout=TIMEOUT).close()
def test_with_statement(self):
self.client.quit()
def is_client_connected():
if self.client.sock is None:
return False
try:
self.client.sendcmd('noop')
except (OSError, EOFError):
return False
return True
# base test
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.assertTrue(is_client_connected())
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# QUIT sent inside the with block
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.client.quit()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
# force a wrong response code to be sent on QUIT: error_perm
# is expected and the connection is supposed to be closed
try:
with ftplib.FTP(timeout=TIMEOUT) as self.client:
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
self.server.handler_instance.next_response = '550 error on quit'
except ftplib.error_perm as err:
self.assertEqual(str(err), '550 error on quit')
else:
self.fail('Exception not raised')
# needed to give the threaded server some time to set the attribute
# which otherwise would still be == 'noop'
time.sleep(0.1)
self.assertEqual(self.server.handler_instance.last_received_cmd, 'quit')
self.assertFalse(is_client_connected())
def test_source_address(self):
self.client.quit()
port = socket_helper.find_unused_port()
try:
self.client.connect(self.server.host, self.server.port,
source_address=(HOST, port))
self.assertEqual(self.client.sock.getsockname()[1], port)
self.client.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_source_address_passive_connection(self):
port = socket_helper.find_unused_port()
self.client.source_address = (HOST, port)
try:
with self.client.transfercmd('list') as sock:
self.assertEqual(sock.getsockname()[1], port)
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def test_parse257(self):
self.assertEqual(ftplib.parse257('257 "/foo/bar"'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 "/foo/bar" created'), '/foo/bar')
self.assertEqual(ftplib.parse257('257 ""'), '')
self.assertEqual(ftplib.parse257('257 "" created'), '')
self.assertRaises(ftplib.error_reply, ftplib.parse257, '250 "/foo/bar"')
# The 257 response is supposed to include the directory
# name and in case it contains embedded double-quotes
# they must be doubled (see RFC-959, chapter 7, appendix 2).
self.assertEqual(ftplib.parse257('257 "/foo/b""ar"'), '/foo/b"ar')
self.assertEqual(ftplib.parse257('257 "/foo/b""ar" created'), '/foo/b"ar')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = io.BytesIO(b'x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
def test_encoding_param(self):
encodings = ['latin-1', 'utf-8']
for encoding in encodings:
with self.subTest(encoding=encoding):
self.tearDown()
self.setUp(encoding=encoding)
self.assertEqual(encoding, self.client.encoding)
self.test_retrbinary()
self.test_storbinary()
self.test_retrlines()
new_dir = self.client.mkd('/non-ascii dir \xAE')
self.check_data(new_dir, '/non-ascii dir \xAE')
# Check default encoding
client = ftplib.FTP(timeout=TIMEOUT)
self.assertEqual(DEFAULT_ENCODING, client.encoding)
@skipUnless(socket_helper.IPV6_ENABLED, "IPv6 not enabled")
| TestFTPClass |
python | apache__airflow | providers/cloudant/src/airflow/providers/cloudant/cloudant_fake.py | {
"start": 1026,
"end": 1197
} | class ____:
"""Phony class to pass mypy when real class is not imported."""
def __init__(self, username: str, password: str):
pass
| CouchDbSessionAuthenticator |
python | plotly__plotly.py | plotly/graph_objs/contour/colorbar/title/_font.py | {
"start": 233,
"end": 9913
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "contour.colorbar.title"
_path_str = "contour.colorbar.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.contour.colorbar.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.contour.colorbar.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contour.colorbar.title.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | huggingface__transformers | src/transformers/models/modernbert/modeling_modernbert.py | {
"start": 5107,
"end": 6937
} | class ____(RotaryEmbedding):
"""
The rotary position embeddings applied directly to unpadded sequences.
"""
def __init__(
self,
dim: int,
base: float = 10000.0,
max_seqlen: Optional[int] = None,
device: Optional[torch.device] = None,
dtype: Optional[torch.dtype] = None,
):
"""
max_seqlen: if max_seqlen, device, and dtype are provided, we precompute the cos_sin_cache
up to max_seqlen. If the max_seqlen, device, or dtype during training/inference differ,
the cos_sin_cache will be recomputed during the forward pass.
"""
super().__init__(dim=dim, base=base, device=device, interleaved=False)
self.max_seqlen = max_seqlen
if max_seqlen is not None and device is not None and dtype is not None:
self._update_cos_sin_cache(max_seqlen, device=device, dtype=dtype)
def forward(
self,
qkv: torch.Tensor,
cu_seqlens: torch.Tensor,
max_seqlen: Optional[int] = None,
) -> Union[torch.Tensor, tuple[torch.Tensor, torch.Tensor]]:
"""
Apply rotary embedding *inplace* to qkv.
qkv: (total_nnz, 3, nheads, headdim)
cu_seqlens: (batch + 1,) cumulative sequence lengths
max_seqlen: int max seq length in the batch
"""
if max_seqlen is not None:
self._update_cos_sin_cache(max_seqlen, device=qkv.device, dtype=qkv.dtype)
qkv = apply_rotary_unpadded(
qkv,
self._cos_cached,
self._sin_cached,
cu_seqlens=cu_seqlens,
max_seqlen=max_seqlen,
)
return qkv
def extra_repr(self) -> str:
return f"dim={self.dim}, base={self.base}, scale_base={self.scale_base}"
| ModernBertUnpaddedRotaryEmbedding |
python | pytorch__pytorch | torch/_export/serde/serialize.py | {
"start": 157650,
"end": 159629
} | class ____:
"""
Base class for handling extension operators.
"""
@classmethod
def namespace(cls) -> str:
raise NotImplementedError(f"{cls.__class__} namespace() must be implemented")
@classmethod
def to_op_name(cls, op) -> str:
raise NotImplementedError(f"{cls.__class__} op_name() must be implemented")
@classmethod
def from_op_name(cls, name: str):
raise NotImplementedError(f"{cls.__class__} op_name() must be implemented")
@classmethod
def op_schema(cls, op) -> torch.FunctionSchema:
raise NotImplementedError(f"{cls.__class__} op_schema() must be implemented")
def register_extension(
op_type: type[Any],
extension_handler: type[ExtensionHandler],
):
"""Register custom de/serialization method for a node with non-standard type."""
assert issubclass(extension_handler, ExtensionHandler), (
f"Expected ExtensionHandler, got {extension_handler}."
)
assert op_type not in _serialization_registry, f"{op_type} is already registered."
assert isinstance(op_type, type) # Maybe a good idea to enforce this first.
assert not (
op_type.__module__.startswith("torch")
or op_type.__module__.startswith("builtins")
)
assert extension_handler.namespace() not in _deserialization_registry
_serialization_registry[op_type] = extension_handler
_deserialization_registry[extension_handler.namespace()] = extension_handler
def _registered_extension_types():
return tuple(_serialization_registry.keys())
# Registry to store all custom serialization implementations.
# The registry maps a operation to its serialization function (a callable), in their own
# namespace to avoid conflicts.
# Serialization: Op type --> custom handler.
# De-serialization: Namespace --> custom handler.
_serialization_registry: dict[type[Any], type[ExtensionHandler]] = {}
_deserialization_registry: dict[str, type[ExtensionHandler]] = {}
| ExtensionHandler |
python | pytorch__pytorch | torch/_export/serde/schema.py | {
"start": 7532,
"end": 7664
} | class ____:
# Actually, only tensors and SymInts are allowed here
arg: Annotated[Argument, 10]
@_union_dataclass
| UserInputSpec |
python | Lightning-AI__lightning | tests/tests_pytorch/test_cli.py | {
"start": 26153,
"end": 26311
} | class ____(torch.optim.Adam):
def __init__(self, params, num_classes: Optional[int] = None, **kwargs):
super().__init__(params, **kwargs)
| CustomAdam |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 37480,
"end": 38070
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
package: str = Field(
default=...,
description="The name of the CRAN package to install. This field is required.",
examples=["geojson"],
)
repo: Optional[str] = Field(
default=None,
description=(
"The repository where the package can be found. If not specified, the"
" default CRAN repo is used."
),
examples=["https://my-repo.com"],
)
| RCranLibrary |
python | airbytehq__airbyte | airbyte-ci/connectors/erd/src/erd/relationships.py | {
"start": 364,
"end": 2783
} | class ____:
def merge(
self,
estimated_relationships: Relationships,
confirmed_relationships: Relationships,
) -> Relationships:
streams = []
for estimated_stream in estimated_relationships["streams"]:
confirmed_relationships_for_stream = self._get_stream(
confirmed_relationships, estimated_stream["name"]
)
if confirmed_relationships_for_stream:
streams.append(
self._merge_for_stream(
estimated_stream, confirmed_relationships_for_stream
)
) # type: ignore # at this point, we know confirmed_relationships_for_stream is not None
else:
streams.append(estimated_stream)
already_processed_streams = set(
map(lambda relationship: relationship["name"], streams)
)
for confirmed_stream in confirmed_relationships["streams"]:
if confirmed_stream["name"] not in already_processed_streams:
streams.append(
{
"name": confirmed_stream["name"],
"relations": confirmed_stream["relations"],
}
)
return {"streams": streams}
def _merge_for_stream(
self, estimated: Relationship, confirmed: Relationship
) -> Relationship:
relations = copy.deepcopy(confirmed.get("relations", {}))
# get estimated but filter out false positives
for field, target in estimated.get("relations", {}).items():
false_positives = (
confirmed["false_positives"] if "false_positives" in confirmed else {}
)
if field not in relations and (
field not in false_positives
or false_positives.get(field, None) != target
): # type: ignore # at this point, false_positives should not be None
relations[field] = target
return {
"name": estimated["name"],
"relations": relations,
}
def _get_stream(
self, relationships: Relationships, stream_name: str
) -> Optional[Relationship]:
for stream in relationships["streams"]:
if stream.get("name", None) == stream_name:
return stream
return None
| RelationshipsMerger |
python | django__django | django/core/serializers/jsonl.py | {
"start": 331,
"end": 1247
} | class ____(PythonSerializer):
"""Convert a queryset to JSON Lines."""
internal_use_only = False
def _init_options(self):
self._current = None
self.json_kwargs = self.options.copy()
self.json_kwargs.pop("stream", None)
self.json_kwargs.pop("fields", None)
self.json_kwargs.pop("indent", None)
self.json_kwargs["separators"] = (",", ": ")
self.json_kwargs.setdefault("cls", DjangoJSONEncoder)
self.json_kwargs.setdefault("ensure_ascii", False)
def start_serialization(self):
self._init_options()
def end_object(self, obj):
# self._current has the field data
json.dump(self.get_dump_object(obj), self.stream, **self.json_kwargs)
self.stream.write("\n")
self._current = None
def getvalue(self):
# Grandparent super
return super(PythonSerializer, self).getvalue()
| Serializer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amplitude/components.py | {
"start": 2198,
"end": 3576
} | class ____(RecordTransformation):
def __init__(self):
self.name = "events"
self.date_time_fields = [
"event_time",
"server_upload_time",
"processed_time",
"server_received_time",
"user_creation_time",
"client_upload_time",
"client_event_time",
]
def transform(
self,
record: Dict[str, Any],
config: Optional[Config] = None,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
) -> None:
"""
Transform 'date-time' items to RFC3339 format
"""
for item in record:
if item in self.date_time_fields and record[item]:
try:
record[item] = ab_datetime_parse(record[item]).to_datetime().isoformat()
except Exception as e:
logger.error(f"Error converting {item} to RFC3339 format: {e}")
raise AirbyteTracedException(
message=f"Error converting {item} to RFC3339 format. See logs for more infromation",
internal_message=f"Error converting {item} to RFC3339 format: {e}",
failure_type=FailureType.system_error,
) from e
return record
| TransformDatetimesToRFC3339 |
python | modin-project__modin | asv_bench/benchmarks/benchmarks.py | {
"start": 33602,
"end": 34454
} | class ____:
params = [get_benchmark_shapes("TimeDropDuplicatesDataframe")]
param_names = ["shape"]
def setup(self, shape):
rows, cols = shape
N = rows // 10
K = 10
data = {}
# dataframe would have cols-1 keys(strings) and one value(int) column
for col in range(cols - 1):
data["key" + str(col + 1)] = IMPL.Index(
[f"i-{i}" for i in range(N)], dtype=object
).values.repeat(K)
data["value"] = np.random.randn(N * K)
self.df = IMPL.DataFrame(data)
execute(self.df)
def time_drop_dups(self, shape):
execute(self.df.drop_duplicates(self.df.columns[:-1]))
def time_drop_dups_inplace(self, shape):
self.df.drop_duplicates(self.df.columns[:-1], inplace=True)
execute(self.df)
| TimeDropDuplicatesDataframe |
python | dask__dask | dask/dataframe/dask_expr/_rolling.py | {
"start": 5106,
"end": 5160
} | class ____(RollingReduction):
how = "var"
| RollingVar |
python | huggingface__transformers | src/transformers/models/olmo2/modular_olmo2.py | {
"start": 8851,
"end": 9233
} | class ____(LlamaRMSNorm):
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return (self.weight * hidden_states).to(input_dtype)
| Olmo2RMSNorm |
python | mkdocs__mkdocs | mkdocs/config/config_options.py | {
"start": 10858,
"end": 11894
} | class ____(Generic[T], OptionallyRequired[T]):
"""
Type Config Option.
Validate the type of a config option against a given Python type.
"""
@overload
def __init__(self, type_: type[T], /, length: int | None = None, **kwargs):
...
@overload
def __init__(self, type_: tuple[type[T], ...], /, length: int | None = None, **kwargs):
...
def __init__(self, type_, /, length=None, **kwargs) -> None:
super().__init__(**kwargs)
self._type = type_
self.length = length
def run_validation(self, value: object) -> T:
if not isinstance(value, self._type):
msg = f"Expected type: {self._type} but received: {type(value)}"
elif self.length is not None and len(value) != self.length:
msg = (
f"Expected type: {self._type} with length {self.length}"
f" but received: {value!r} with length {len(value)}"
)
else:
return value
raise ValidationError(msg)
| Type |
python | huggingface__transformers | src/transformers/models/poolformer/modeling_poolformer.py | {
"start": 12003,
"end": 14150
} | class ____(PoolFormerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.poolformer = PoolFormerModel(config)
# Final norm
self.norm = PoolFormerGroupNorm(config.hidden_sizes[-1])
# Classifier head
self.classifier = (
nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.poolformer(
pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(self.norm(sequence_output).mean([-2, -1]))
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
__all__ = ["PoolFormerForImageClassification", "PoolFormerModel", "PoolFormerPreTrainedModel"]
| PoolFormerForImageClassification |
python | pyca__cryptography | tests/test_utils.py | {
"start": 187720,
"end": 188321
} | class ____:
def test_getattr(self):
with pytest.warns(DeprecationWarning):
assert deprecated_module.DEPRECATED == 3
assert deprecated_module.NOT_DEPRECATED == 12
def test_inspect_deprecated_module(self):
# Check if inspection is supported by _ModuleWithDeprecations.
assert isinstance(
deprecated_module, cryptography.utils._ModuleWithDeprecations
)
source_file = inspect.getsourcefile(deprecated_module)
assert isinstance(source_file, str)
assert source_file.endswith("deprecated_module.py")
| TestDeprecated |
python | plotly__plotly.py | plotly/figure_factory/_quiver.py | {
"start": 3853,
"end": 9181
} | class ____(object):
"""
Refer to FigureFactory.create_quiver() for docstring
"""
def __init__(self, x, y, u, v, scale, arrow_scale, angle, scaleratio=1, **kwargs):
try:
x = utils.flatten(x)
except exceptions.PlotlyError:
pass
try:
y = utils.flatten(y)
except exceptions.PlotlyError:
pass
try:
u = utils.flatten(u)
except exceptions.PlotlyError:
pass
try:
v = utils.flatten(v)
except exceptions.PlotlyError:
pass
self.x = x
self.y = y
self.u = u
self.v = v
self.scale = scale
self.scaleratio = scaleratio
self.arrow_scale = arrow_scale
self.angle = angle
self.end_x = []
self.end_y = []
self.scale_uv()
barb_x, barb_y = self.get_barbs()
arrow_x, arrow_y = self.get_quiver_arrows()
def scale_uv(self):
"""
Scales u and v to avoid overlap of the arrows.
u and v are added to x and y to get the
endpoints of the arrows so a smaller scale value will
result in less overlap of arrows.
"""
self.u = [i * self.scale * self.scaleratio for i in self.u]
self.v = [i * self.scale for i in self.v]
def get_barbs(self):
"""
Creates x and y startpoint and endpoint pairs
After finding the endpoint of each barb this zips startpoint and
endpoint pairs to create 2 lists: x_values for barbs and y values
for barbs
:rtype: (list, list) barb_x, barb_y: list of startpoint and endpoint
x_value pairs separated by a None to create the barb of the arrow,
and list of startpoint and endpoint y_value pairs separated by a
None to create the barb of the arrow.
"""
self.end_x = [i + j for i, j in zip(self.x, self.u)]
self.end_y = [i + j for i, j in zip(self.y, self.v)]
empty = [None] * len(self.x)
barb_x = utils.flatten(zip(self.x, self.end_x, empty))
barb_y = utils.flatten(zip(self.y, self.end_y, empty))
return barb_x, barb_y
def get_quiver_arrows(self):
"""
Creates lists of x and y values to plot the arrows
Gets length of each barb then calculates the length of each side of
the arrow. Gets angle of barb and applies angle to each side of the
arrowhead. Next uses arrow_scale to scale the length of arrowhead and
creates x and y values for arrowhead point1 and point2. Finally x and y
values for point1, endpoint and point2s for each arrowhead are
separated by a None and zipped to create lists of x and y values for
the arrows.
:rtype: (list, list) arrow_x, arrow_y: list of point1, endpoint, point2
x_values separated by a None to create the arrowhead and list of
point1, endpoint, point2 y_values separated by a None to create
the barb of the arrow.
"""
dif_x = [i - j for i, j in zip(self.end_x, self.x)]
dif_y = [i - j for i, j in zip(self.end_y, self.y)]
# Get barb lengths(default arrow length = 30% barb length)
barb_len = [None] * len(self.x)
for index in range(len(barb_len)):
barb_len[index] = math.hypot(dif_x[index] / self.scaleratio, dif_y[index])
# Make arrow lengths
arrow_len = [None] * len(self.x)
arrow_len = [i * self.arrow_scale for i in barb_len]
# Get barb angles
barb_ang = [None] * len(self.x)
for index in range(len(barb_ang)):
barb_ang[index] = math.atan2(dif_y[index], dif_x[index] / self.scaleratio)
# Set angles to create arrow
ang1 = [i + self.angle for i in barb_ang]
ang2 = [i - self.angle for i in barb_ang]
cos_ang1 = [None] * len(ang1)
for index in range(len(ang1)):
cos_ang1[index] = math.cos(ang1[index])
seg1_x = [i * j for i, j in zip(arrow_len, cos_ang1)]
sin_ang1 = [None] * len(ang1)
for index in range(len(ang1)):
sin_ang1[index] = math.sin(ang1[index])
seg1_y = [i * j for i, j in zip(arrow_len, sin_ang1)]
cos_ang2 = [None] * len(ang2)
for index in range(len(ang2)):
cos_ang2[index] = math.cos(ang2[index])
seg2_x = [i * j for i, j in zip(arrow_len, cos_ang2)]
sin_ang2 = [None] * len(ang2)
for index in range(len(ang2)):
sin_ang2[index] = math.sin(ang2[index])
seg2_y = [i * j for i, j in zip(arrow_len, sin_ang2)]
# Set coordinates to create arrow
for index in range(len(self.end_x)):
point1_x = [i - j * self.scaleratio for i, j in zip(self.end_x, seg1_x)]
point1_y = [i - j for i, j in zip(self.end_y, seg1_y)]
point2_x = [i - j * self.scaleratio for i, j in zip(self.end_x, seg2_x)]
point2_y = [i - j for i, j in zip(self.end_y, seg2_y)]
# Combine lists to create arrow
empty = [None] * len(self.end_x)
arrow_x = utils.flatten(zip(point1_x, self.end_x, point2_x, empty))
arrow_y = utils.flatten(zip(point1_y, self.end_y, point2_y, empty))
return arrow_x, arrow_y
| _Quiver |
python | xlwings__xlwings | xlwings/base_classes.py | {
"start": 7002,
"end": 12108
} | class ____:
def adjust_indent(self, amount):
raise NotImplementedError()
def group(self, by):
raise NotImplementedError()
def ungroup(self, by):
raise NotImplementedError()
@property
def coords(self):
raise NotImplementedError()
@property
def api(self):
raise NotImplementedError()
def __len__(self):
raise NotImplementedError()
@property
def row(self):
raise NotImplementedError()
@property
def column(self):
raise NotImplementedError()
@property
def shape(self):
raise NotImplementedError()
@property
def raw_value(self):
raise NotImplementedError()
@raw_value.setter
def raw_value(self, value):
raise NotImplementedError()
def clear_contents(self):
raise NotImplementedError()
def clear_formats(self):
raise NotImplementedError()
def clear(self):
raise NotImplementedError()
def end(self, direction):
raise NotImplementedError()
@property
def formula(self):
raise NotImplementedError()
@formula.setter
def formula(self, value):
raise NotImplementedError()
@property
def formula2(self):
raise NotImplementedError()
@formula2.setter
def formula2(self, value):
raise NotImplementedError()
@property
def formula_array(self):
raise NotImplementedError()
@formula_array.setter
def formula_array(self, value):
raise NotImplementedError()
@property
def font(self):
raise NotImplementedError()
@property
def column_width(self):
raise NotImplementedError()
@column_width.setter
def column_width(self, value):
raise NotImplementedError()
@property
def row_height(self):
raise NotImplementedError()
@row_height.setter
def row_height(self, value):
raise NotImplementedError()
@property
def width(self):
raise NotImplementedError()
@property
def height(self):
raise NotImplementedError()
@property
def left(self):
raise NotImplementedError()
@property
def top(self):
raise NotImplementedError()
@property
def has_array(self):
raise NotImplementedError()
@property
def number_format(self):
raise NotImplementedError()
@number_format.setter
def number_format(self, value):
raise NotImplementedError()
def get_address(self, row_absolute, col_absolute, external):
raise NotImplementedError()
@property
def address(self):
raise NotImplementedError()
@property
def current_region(self):
raise NotImplementedError()
def autofit(self, axis=None):
raise NotImplementedError()
def insert(self, shift=None, copy_origin=None):
raise NotImplementedError()
def delete(self, shift=None):
raise NotImplementedError()
def copy(self, destination=None):
raise NotImplementedError()
def copy_from(
self, source_range, copy_type="all", skip_blanks=False, transpose=False
):
raise NotImplementedError()
def paste(self, paste=None, operation=None, skip_blanks=False, transpose=False):
raise NotImplementedError()
@property
def hyperlink(self):
raise NotImplementedError()
def add_hyperlink(self, address, text_to_display=None, screen_tip=None):
raise NotImplementedError()
@property
def color(self):
raise NotImplementedError()
@color.setter
def color(self, color_or_rgb):
raise NotImplementedError()
@property
def name(self):
raise NotImplementedError()
@name.setter
def name(self, value):
raise NotImplementedError()
def __call__(self, arg1, arg2=None):
raise NotImplementedError()
@property
def rows(self):
raise NotImplementedError()
@property
def columns(self):
raise NotImplementedError()
def select(self):
raise NotImplementedError()
@property
def merge_area(self):
raise NotImplementedError()
@property
def merge_cells(self):
raise NotImplementedError()
def merge(self, across):
raise NotImplementedError()
def unmerge(self):
raise NotImplementedError()
@property
def table(self):
raise NotImplementedError()
@property
def characters(self):
raise NotImplementedError()
@property
def wrap_text(self):
raise NotImplementedError()
@wrap_text.setter
def wrap_text(self, value):
raise NotImplementedError()
@property
def note(self):
raise NotImplementedError()
def copy_picture(self, appearance, format):
raise NotImplementedError()
def to_png(self, path):
raise NotImplementedError()
def to_pdf(self, path, quality):
raise NotImplementedError()
def autofill(self, destination, type_):
raise NotImplementedError()
| Range |
python | scipy__scipy | scipy/stats/tests/test_generation/reference_distributions.py | {
"start": 14095,
"end": 14537
} | class ____(ReferenceDistribution):
def __init__(self, *, alpha, beta):
super().__init__(alpha=alpha, beta=beta)
def _pdf(self, x, alpha, beta):
# Implemented as described in https://www.jstor.org/stable/4616433
# Equations 2.1 - 2.3
q = mp.sqrt(1 + x**2)
a = mp.pi**-1 * alpha * mp.exp(mp.sqrt(alpha**2 - beta**2))
return a * q**-1 * mp.besselk(1, alpha*q) * mp.exp(beta*x)
| NormInvGauss |
python | tensorflow__tensorflow | tensorflow/python/eager/record.py | {
"start": 785,
"end": 4436
} | class ____(object):
"""A scope that tracks all trainable variable accesses within it.
This explicitly ignores variables that are not marked as trainable.
Sample usage:
var = tf.Variable(0.0)
with VariableWatcher() as variable_watcher:
var.assign_add(1.0)
assert variable_watcher.watched_variables == [var]
"""
__slots__ = ["_variable_watcher"]
def __init__(self):
self._variable_watcher = None
def __enter__(self):
self._variable_watcher = pywrap_tfe.TFE_Py_VariableWatcherNew()
return self
def __exit__(self, typ, value, traceback):
pywrap_tfe.TFE_Py_VariableWatcherRemove(self._variable_watcher)
def watched_variables(self):
"""Returns a tuple of variables accessed under this scope."""
return pywrap_tfe.TFE_Py_VariableWatcherWatchedVariables(
self._variable_watcher)
@contextlib.contextmanager
def stop_recording():
"""Stop all gradient recording (backprop and forwardprop)."""
is_stopped = pywrap_tfe.TFE_Py_TapeSetIsStopped()
try:
if not is_stopped:
pywrap_tfe.TFE_Py_TapeSetStopOnThread()
yield
finally:
if not is_stopped:
pywrap_tfe.TFE_Py_TapeSetRestartOnThread()
def should_record_backprop(tensors):
"""Returns true if any tape in the stack watches any of these tensors.
Only takes GradientTapes into account, not forward accumulators.
Args:
tensors: Tensors to check, typically inputs to an operation.
Returns:
Boolean, whether any tape watches any of `tensors`.
"""
return pywrap_tfe.TFE_Py_TapeSetShouldRecordBackprop(tensors)
def record_operation(op_type, output_tensors, input_tensors, backward_function,
forward_function=None):
"""Records the operation on all tapes in the stack."""
pywrap_tfe.TFE_Py_TapeSetRecordOperation(op_type, output_tensors,
input_tensors, backward_function,
forward_function)
def record_operation_backprop_only(op_type, output_tensors, input_tensors,
backward_function):
"""Records the operation on all backward tapes in the stack."""
pywrap_tfe.TFE_Py_TapeSetRecordOperationBackprop(op_type, output_tensors,
input_tensors,
backward_function)
def record_operation_forwardprop_only(op_type, output_tensors, input_tensors,
backward_function,
forwardprop_output_indices):
"""Records the operation on all forward accumulators in the stack.
Args:
op_type: a string for the operation type, used in the backprop code
output_tensors: a list of Python Tensor objects output by the operation
input_tensors: a list of input Tensors to the recorded operation
backward_function: the function to be called to, given the gradients of the
output tensors, produce the gradients of the input tensors. This function
is automatically transposed to produce output gradients given input
gradients.
forwardprop_output_indices: indicates any output_tensors which contain JVPs.
Typically these will have come from TFE_Py_PackForwardGradients. May be
None or an empty sequence if there are no JVP outputs from the operation.
"""
pywrap_tfe.TFE_Py_TapeSetRecordOperationForwardprop(
op_type, output_tensors, input_tensors, backward_function,
forwardprop_output_indices)
def could_possibly_record():
"""Returns True if any tape is active."""
return not pywrap_tfe.TFE_Py_TapeSetIsEmpty()
| VariableWatcher |
python | django__django | tests/middleware/test_csp.py | {
"start": 7036,
"end": 7659
} | class ____(SeleniumTestCase, StaticLiveServerTestCase):
available_apps = ["middleware"]
def setUp(self):
self.addCleanup(csp_reports.clear)
super().setUp()
def test_reports_are_generated(self):
url = self.live_server_url + "/csp-failure/"
self.selenium.get(url)
time.sleep(1) # Allow time for the CSP report to be sent.
reports = sorted(
(r["csp-report"]["document-uri"], r["csp-report"]["violated-directive"])
for r in csp_reports
)
self.assertEqual(reports, [(url, "img-src"), (url, "style-src-elem")])
| CSPSeleniumTestCase |
python | bokeh__bokeh | src/bokeh/util/warnings.py | {
"start": 1630,
"end": 3108
} | class ____(UserWarning):
''' A Bokeh-specific ``UserWarning`` subclass.
Used to selectively filter Bokeh warnings for unconditional display.
'''
def warn(message: str, category: type[Warning] | None = None, stacklevel: int | None = None) -> None:
if stacklevel is None:
stacklevel = find_stack_level()
import warnings
warnings.warn(message, category, stacklevel=stacklevel)
def find_stack_level() -> int:
"""Find the first place in the stack that is not inside Bokeh.
Inspired by: pandas.util._exceptions.find_stack_level
"""
import bokeh
pkg_dir = os.path.dirname(bokeh.__file__)
# https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow
frame = inspect.currentframe()
n = 0
while frame:
fname = inspect.getfile(frame)
if fname.startswith(pkg_dir):
frame = frame.f_back
n += 1
else:
break
return n
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| BokehUserWarning |
python | ansible__ansible | lib/ansible/_internal/_templating/_jinja_bits.py | {
"start": 8266,
"end": 10838
} | class ____(Context):
"""
A custom context which intercepts resolve_or_missing() calls and
runs them through AnsibleAccessContext. This allows usage of variables
to be tracked. If needed, values can also be modified before being returned.
"""
environment: AnsibleEnvironment # narrow the type specified by the base
def __init__(self, *args, **kwargs):
super(AnsibleContext, self).__init__(*args, **kwargs)
__repr__ = object.__repr__ # prevent Jinja from dumping vars in case this gets repr'd
def get_all(self):
"""
Override Jinja's default get_all to return all vars in the context as a ChainMap with a mutable layer at the bottom.
This provides some isolation against accidental changes to inherited variable contexts without requiring copies.
"""
layers = []
if self.vars:
layers.append(self.vars)
if self.parent:
layers.append(self.parent)
# HACK: always include a sacrificial plain-dict on the bottom layer, since Jinja's debug and stacktrace rewrite code invokes
# `__setitem__` outside a call context; this will ensure that it always occurs on a plain dict instead of a lazy one.
return ChainMap({}, *layers)
# noinspection PyShadowingBuiltins
def derived(self, locals: t.Optional[t.Dict[str, t.Any]] = None) -> Context:
# this is a clone of Jinja's impl of derived, but using our lazy-aware _new_context
context = _new_context(
environment=self.environment,
template_name=self.name,
blocks={},
shared=True,
jinja_locals=locals,
jinja_vars=self.get_all(),
)
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in self.blocks.items())
return context
def keys(self, *args, **kwargs):
"""Base Context delegates to `dict.keys` against `get_all`, which would fail since we return a ChainMap. No known usage."""
raise NotImplementedError()
def values(self, *args, **kwargs):
"""Base Context delegates to `dict.values` against `get_all`, which would fail since we return a ChainMap. No known usage."""
raise NotImplementedError()
def items(self, *args, **kwargs):
"""Base Context delegates to built-in `dict.items` against `get_all`, which would fail since we return a ChainMap. No known usage."""
raise NotImplementedError()
@dataclasses.dataclass(frozen=True, kw_only=True, slots=True)
| AnsibleContext |
python | huggingface__transformers | tests/utils/test_hf_argparser.py | {
"start": 2436,
"end": 2711
} | class ____:
foo_int: list[int] = list_field(default=[])
bar_int: list[int] = list_field(default=[1, 2, 3])
foo_str: list[str] = list_field(default=["Hallo", "Bonjour", "Hello"])
foo_float: list[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
| ListExample |
python | pytest-dev__pytest | testing/test_capture.py | {
"start": 1687,
"end": 4526
} | class ____:
@pytest.mark.parametrize("method", ["no", "sys", "fd"])
def test_capturing_basic_api(self, method) -> None:
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.start_global_capturing()
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
print("hello")
capman.suspend_global_capture()
out, _err = capman.read_global_capture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resume_global_capture()
print("hello")
capman.suspend_global_capture()
out, _err = capman.read_global_capture()
if method != "no":
assert out == "hello\n"
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.start_global_capturing()
pytest.raises(AssertionError, capman.start_global_capturing)
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_unicode(pytester: Pytester, method: str) -> None:
obj = "'b\u00f6y'"
pytester.makepyfile(
f"""\
# taken from issue 227 from nosetests
def test_unicode():
import sys
print(sys.stdout)
print({obj})
"""
)
result = pytester.runpytest(f"--capture={method}")
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_bytes_in_utf8_encoding(pytester: Pytester, method: str) -> None:
pytester.makepyfile(
"""\
def test_unicode():
print('b\\u00f6y')
"""
)
result = pytester.runpytest(f"--capture={method}")
result.stdout.fnmatch_lines(["*1 passed*"])
def test_collect_capturing(pytester: Pytester) -> None:
p = pytester.makepyfile(
"""
import sys
print("collect %s failure" % 13)
sys.stderr.write("collect %s_stderr failure" % 13)
import xyz42123
"""
)
result = pytester.runpytest(p)
result.stdout.fnmatch_lines(
[
"*Captured stdout*",
"collect 13 failure",
"*Captured stderr*",
"collect 13_stderr failure",
]
)
| TestCaptureManager |
python | PyCQA__pylint | tests/functional/m/method_hidden.py | {
"start": 2131,
"end": 2204
} | class ____:
def __init__(self):
self.__private = None
| ParentTwo |
python | django__django | tests/multiple_database/models.py | {
"start": 1994,
"end": 2187
} | class ____(models.Model):
user = models.OneToOneField(User, models.SET_NULL, null=True)
flavor = models.CharField(max_length=100)
class Meta:
ordering = ("flavor",)
| UserProfile |
python | matplotlib__matplotlib | lib/matplotlib/colors.py | {
"start": 115244,
"end": 120683
} | class ____(Normalize):
"""
Generate a colormap index based on discrete intervals.
Unlike `Normalize` or `LogNorm`, `BoundaryNorm` maps values to integers
instead of to the interval 0-1.
"""
# Mapping to the 0-1 interval could have been done via piece-wise linear
# interpolation, but using integers seems simpler, and reduces the number
# of conversions back and forth between int and float.
def __init__(self, boundaries, ncolors, clip=False, *, extend='neither'):
"""
Parameters
----------
boundaries : array-like
Monotonically increasing sequence of at least 2 bin edges: data
falling in the n-th bin will be mapped to the n-th color.
ncolors : int
Number of colors in the colormap to be used.
clip : bool, optional
If clip is ``True``, out of range values are mapped to 0 if they
are below ``boundaries[0]`` or mapped to ``ncolors - 1`` if they
are above ``boundaries[-1]``.
If clip is ``False``, out of range values are mapped to -1 if
they are below ``boundaries[0]`` or mapped to *ncolors* if they are
above ``boundaries[-1]``. These are then converted to valid indices
by `Colormap.__call__`.
extend : {'neither', 'both', 'min', 'max'}, default: 'neither'
Extend the number of bins to include one or both of the
regions beyond the boundaries. For example, if ``extend``
is 'min', then the color to which the region between the first
pair of boundaries is mapped will be distinct from the first
color in the colormap, and by default a
`~matplotlib.colorbar.Colorbar` will be drawn with
the triangle extension on the left or lower end.
Notes
-----
If there are fewer bins (including extensions) than colors, then the
color index is chosen by linearly interpolating the ``[0, nbins - 1]``
range onto the ``[0, ncolors - 1]`` range, effectively skipping some
colors in the middle of the colormap.
"""
if clip and extend != 'neither':
raise ValueError("'clip=True' is not compatible with 'extend'")
super().__init__(vmin=boundaries[0], vmax=boundaries[-1], clip=clip)
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
if self.N < 2:
raise ValueError("You must provide at least 2 boundaries "
f"(1 region) but you passed in {boundaries!r}")
self.Ncmap = ncolors
self.extend = extend
self._scale = None # don't use the default scale.
self._n_regions = self.N - 1 # number of colors needed
self._offset = 0
if extend in ('min', 'both'):
self._n_regions += 1
self._offset = 1
if extend in ('max', 'both'):
self._n_regions += 1
if self._n_regions > self.Ncmap:
raise ValueError(f"There are {self._n_regions} color bins "
"including extensions, but ncolors = "
f"{ncolors}; ncolors must equal or exceed the "
"number of bins")
def __call__(self, value, clip=None):
"""
This method behaves similarly to `.Normalize.__call__`, except that it
returns integers or arrays of int16.
"""
if clip is None:
clip = self.clip
xx, is_scalar = self.process_value(value)
mask = np.ma.getmaskarray(xx)
# Fill masked values a value above the upper boundary
xx = np.atleast_1d(xx.filled(self.vmax + 1))
if clip:
np.clip(xx, self.vmin, self.vmax, out=xx)
max_col = self.Ncmap - 1
else:
max_col = self.Ncmap
# this gives us the bins in the lookup table in the range
# [0, _n_regions - 1] (the offset is set in the init)
iret = np.digitize(xx, self.boundaries) - 1 + self._offset
# if we have more colors than regions, stretch the region
# index computed above to full range of the color bins. This
# will make use of the full range (but skip some of the colors
# in the middle) such that the first region is mapped to the
# first color and the last region is mapped to the last color.
if self.Ncmap > self._n_regions:
if self._n_regions == 1:
# special case the 1 region case, pick the middle color
iret[iret == 0] = (self.Ncmap - 1) // 2
else:
# otherwise linearly remap the values from the region index
# to the color index spaces
iret = (self.Ncmap - 1) / (self._n_regions - 1) * iret
# cast to 16bit integers in all cases
iret = iret.astype(np.int16)
iret[xx < self.vmin] = -1
iret[xx >= self.vmax] = max_col
ret = np.ma.array(iret, mask=mask)
if is_scalar:
ret = int(ret[0]) # assume python scalar
return ret
def inverse(self, value):
"""
Raises
------
ValueError
BoundaryNorm is not invertible, so calling this method will always
raise an error
"""
raise ValueError("BoundaryNorm is not invertible")
| BoundaryNorm |
python | doocs__leetcode | solution/3500-3599/3536.Maximum Product of Two Digits/Solution.py | {
"start": 0,
"end": 242
} | class ____:
def maxProduct(self, n: int) -> int:
a = b = 0
while n:
n, x = divmod(n, 10)
if a < x:
a, b = x, a
elif b < x:
b = x
return a * b
| Solution |
python | crytic__slither | slither/tools/upgradeability/checks/constant.py | {
"start": 3058,
"end": 5950
} | class ____(AbstractCheck):
ARGUMENT = "became-constant"
IMPACT = CheckClassification.HIGH
HELP = "Variables that should not be constant"
WIKI = "https://github.com/crytic/slither/wiki/Upgradeability-Checks#variables-that-should-not-be-constant"
WIKI_TITLE = "Variables that should not be constant"
# region wiki_description
WIKI_DESCRIPTION = """
Detect state variables that should not be `constant̀`.
"""
# endregion wiki_description
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract Contract{
uint variable1;
uint variable2;
uint variable3;
}
contract ContractV2{
uint variable1;
uint constant variable2;
uint variable3;
}
```
Because `variable2` is now a `constant`, the storage location of `variable3` will be different.
As a result, `ContractV2` will have a corrupted storage layout.
"""
# endregion wiki_exploit_scenario
# region wiki_recommendation
WIKI_RECOMMENDATION = """
Do not make an existing state variable `constant`.
"""
# endregion wiki_recommendation
REQUIRE_CONTRACT = True
REQUIRE_CONTRACT_V2 = True
def _check(self) -> List[Output]:
contract_v1 = self.contract
contract_v2 = self.contract_v2
if contract_v2 is None:
raise Exception("became-constant requires a V2 contract")
state_variables_v1 = contract_v1.state_variables
state_variables_v2 = contract_v2.state_variables
v2_additional_variables = len(state_variables_v2) - len(state_variables_v1)
v2_additional_variables = max(v2_additional_variables, 0)
# We keep two index, because we need to have them out of sync if v2
# has additional non constant variables
idx_v1 = 0
idx_v2 = 0
results = []
while idx_v1 < len(state_variables_v1):
state_v1 = contract_v1.state_variables[idx_v1]
if len(state_variables_v2) <= idx_v2:
break
state_v2 = contract_v2.state_variables[idx_v2]
if state_v2:
if state_v1.is_constant:
if not state_v2.is_constant:
# If v2 has additional non constant variables, we need to skip them
if (
state_v1.name != state_v2.name or state_v1.type != state_v2.type
) and v2_additional_variables > 0:
v2_additional_variables -= 1
idx_v2 += 1
continue
elif state_v2.is_constant:
info: CHECK_INFO = [state_v1, " was not constant but ", state_v2, " is.\n"]
json = self.generate_result(info)
results.append(json)
idx_v1 += 1
idx_v2 += 1
return results
| BecameConstant |
python | scipy__scipy | scipy/stats/tests/test_rank.py | {
"start": 328,
"end": 2424
} | class ____:
def test_empty(self):
"""An empty array requires no correction, should return 1.0."""
ranks = np.array([], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_one(self):
"""A single element requires no correction, should return 1.0."""
ranks = np.array([1.0], dtype=np.float64)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_no_correction(self):
"""Arrays with no ties require no correction."""
ranks = np.arange(2.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
ranks = np.arange(3.0)
c = tiecorrect(ranks)
assert_equal(c, 1.0)
def test_basic(self):
"""Check a few basic examples of the tie correction factor."""
# One tie of two elements
ranks = np.array([1.0, 2.5, 2.5])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of two elements (same as above, but tie is not at the end)
ranks = np.array([1.5, 1.5, 3.0])
c = tiecorrect(ranks)
T = 2.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# One tie of three elements
ranks = np.array([1.0, 3.0, 3.0, 3.0])
c = tiecorrect(ranks)
T = 3.0
N = ranks.size
expected = 1.0 - (T**3 - T) / (N**3 - N)
assert_equal(c, expected)
# Two ties, lengths 2 and 3.
ranks = np.array([1.5, 1.5, 4.0, 4.0, 4.0])
c = tiecorrect(ranks)
T1 = 2.0
T2 = 3.0
N = ranks.size
expected = 1.0 - ((T1**3 - T1) + (T2**3 - T2)) / (N**3 - N)
assert_equal(c, expected)
def test_overflow(self):
ntie, k = 2000, 5
a = np.repeat(np.arange(k), ntie)
n = a.size # ntie * k
out = tiecorrect(rankdata(a))
assert_equal(out, 1.0 - k * (ntie**3 - ntie) / float(n**3 - n))
@make_xp_test_case(stats.rankdata)
| TestTieCorrect |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_v2.py | {
"start": 92191,
"end": 96437
} | class ____(
DenseColumn,
fc_old._DenseColumn, # pylint: disable=protected-access
collections.namedtuple(
'NumericColumn',
('key', 'shape', 'default_value', 'dtype', 'normalizer_fn'))):
"""see `numeric_column`."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {
self.key:
parsing_ops.FixedLenFeature(self.shape, self.dtype,
self.default_value)
}
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _parse_example_spec(self):
return self.parse_example_spec
def _transform_input_tensor(self, input_tensor):
if isinstance(input_tensor, sparse_tensor_lib.SparseTensor):
raise ValueError(
'The corresponding Tensor of numerical column must be a Tensor. '
'SparseTensor is not supported. key: {}'.format(self.key))
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return math_ops.cast(input_tensor, dtypes.float32)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _transform_feature(self, inputs):
input_tensor = inputs.get(self.key)
return self._transform_input_tensor(input_tensor)
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class.
In this case, we apply the `normalizer_fn` to the input tensor.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Normalized input tensor.
Raises:
ValueError: If a SparseTensor is passed in.
"""
input_tensor = transformation_cache.get(self.key, state_manager)
return self._transform_input_tensor(input_tensor)
@property
def variable_shape(self):
"""See `DenseColumn` base class."""
return tensor_shape.TensorShape(self.shape)
@property
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _variable_shape(self):
return self.variable_shape
def get_dense_tensor(self, transformation_cache, state_manager):
"""Returns dense `Tensor` representing numeric feature.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Dense `Tensor` created within `transform_feature`.
"""
# Feature has been already transformed. Return the intermediate
# representation created by _transform_feature.
return transformation_cache.get(self, state_manager)
@deprecation.deprecated(_FEATURE_COLUMN_DEPRECATION_DATE,
_FEATURE_COLUMN_DEPRECATION)
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
del weight_collections
del trainable
return inputs.get(self)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['normalizer_fn'] = serialization._serialize_keras_object( # pylint: disable=protected-access
self.normalizer_fn)
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
_check_config_keys(config, cls._fields)
kwargs = _standardize_and_copy_config(config)
kwargs['normalizer_fn'] = serialization._deserialize_keras_object( # pylint: disable=protected-access
config['normalizer_fn'],
custom_objects=custom_objects)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
@serialization.register_feature_column
| NumericColumn |
python | joke2k__faker | faker/providers/currency/de_CH/__init__.py | {
"start": 48,
"end": 388
} | class ____(CurrencyProvider):
# source: https://de.wikipedia.org/wiki/Schreibweise_von_Zahlen#Dezimaltrennzeichen_2
price_formats = ["\N{FIGURE DASH}.##", "%.##", "%#.##", "%##.##", "% ###.##", "%# ###.##"]
def pricetag(self):
return "Fr.\N{NO-BREAK SPACE}" + self.numerify(self.random_element(self.price_formats))
| Provider |
python | xlwings__xlwings | xlwings/base_classes.py | {
"start": 17686,
"end": 17944
} | class ____:
@property
def api(self):
raise NotImplementedError()
@property
def print_area(self):
raise NotImplementedError()
@print_area.setter
def print_area(self, value):
raise NotImplementedError()
| PageSetup |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin-strict_ini/plugin_fail_baseConfig.py | {
"start": 1370,
"end": 1556
} | class ____(BaseModel):
class Config:
extra = 'forbid'
ForbidExtraModel(x=1)
# MYPY: error: Unexpected keyword argument "x" for "ForbidExtraModel" [call-arg]
| ForbidExtraModel |
python | google__pytype | pytype/overlays/attr_overlay.py | {
"start": 9899,
"end": 11284
} | class ____(AttrsBase):
"""Implements the @attr.define decorator.
See https://www.attrs.org/en/stable/api.html#next-generation-apis
"""
# Override the default arguments.
DEFAULT_ARGS: ClassVar[dict[str, Any]] = {
# Entries from Decorator.DEFAULT_ARGS
"init": True,
"kw_only": False,
# Deviations from @attr.s
"auto_attribs": None,
# The overlay doesn't pay attention to these yet, so declaring these
# deviations doesn't do much. Here in case in the future the overlay does.
"slots": True,
"weakref_slots": True,
"auto_exc": True,
"auto_detect": True,
}
@classmethod
def make(cls, ctx, module):
return super().make("define", ctx, module)
def _handle_auto_attribs(self, auto_attribs, local_ops, cls_name):
if auto_attribs is not None:
return super()._handle_auto_attribs(auto_attribs, local_ops, cls_name)
is_annotated = {}
for op in local_ops:
local = self.ctx.vm.annotated_locals[cls_name][op.name]
if not classgen.is_relevant_class_local(local, op.name, False):
continue
if op.name not in is_annotated:
is_annotated[op.name] = op.is_annotate()
elif op.is_annotate():
is_annotated[op.name] = True
all_annotated = all(is_annotated.values())
return all_annotated, _ordering_for_auto_attrib(all_annotated)
| AttrsNextGenDefine |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 81586,
"end": 84299
} | class ____(TestCase):
def assertAllGroupsEqual(self, groupby1, groupby2):
for a, b in zip(groupby1, groupby2):
key1, group1 = a
key2, group2 = b
self.assertEqual(key1, key2)
self.assertListEqual(list(group1), list(group2))
self.assertRaises(StopIteration, lambda: next(groupby1))
self.assertRaises(StopIteration, lambda: next(groupby2))
def test_default_funcs(self):
iterable = [(x // 5, x) for x in range(1000)]
actual = mi.groupby_transform(iterable)
expected = groupby(iterable)
self.assertAllGroupsEqual(actual, expected)
def test_valuefunc(self):
iterable = [(int(x / 5), int(x / 3), x) for x in range(10)]
# Test the standard usage of grouping one iterable using another's keys
grouper = mi.groupby_transform(
iterable, keyfunc=itemgetter(0), valuefunc=itemgetter(-1)
)
actual = [(k, list(g)) for k, g in grouper]
expected = [(0, [0, 1, 2, 3, 4]), (1, [5, 6, 7, 8, 9])]
self.assertEqual(actual, expected)
grouper = mi.groupby_transform(
iterable, keyfunc=itemgetter(1), valuefunc=itemgetter(-1)
)
actual = [(k, list(g)) for k, g in grouper]
expected = [(0, [0, 1, 2]), (1, [3, 4, 5]), (2, [6, 7, 8]), (3, [9])]
self.assertEqual(actual, expected)
# and now for something a little different
d = dict(zip(range(10), 'abcdefghij'))
grouper = mi.groupby_transform(
range(10), keyfunc=lambda x: x // 5, valuefunc=d.get
)
actual = [(k, ''.join(g)) for k, g in grouper]
expected = [(0, 'abcde'), (1, 'fghij')]
self.assertEqual(actual, expected)
def test_no_valuefunc(self):
iterable = range(1000)
def key(x):
return x // 5
actual = mi.groupby_transform(iterable, key, valuefunc=None)
expected = groupby(iterable, key)
self.assertAllGroupsEqual(actual, expected)
actual = mi.groupby_transform(iterable, key) # default valuefunc
expected = groupby(iterable, key)
self.assertAllGroupsEqual(actual, expected)
def test_reducefunc(self):
iterable = range(50)
keyfunc = lambda k: 10 * (k // 10)
valuefunc = lambda v: v + 1
reducefunc = sum
actual = list(
mi.groupby_transform(
iterable,
keyfunc=keyfunc,
valuefunc=valuefunc,
reducefunc=reducefunc,
)
)
expected = [(0, 55), (10, 155), (20, 255), (30, 355), (40, 455)]
self.assertEqual(actual, expected)
| GroupByTransformTests |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_bigquery.py | {
"start": 22034,
"end": 31608
} | class ____:
@pytest.mark.parametrize("as_dict", [True, False])
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_execute__table(self, mock_hook, as_dict):
max_results = 100
selected_fields = "DATE"
operator = BigQueryGetDataOperator(
gcp_conn_id=GCP_CONN_ID,
task_id=TASK_ID,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
table_project_id=TEST_GCP_PROJECT_ID,
max_results=max_results,
selected_fields=selected_fields,
location=TEST_DATASET_LOCATION,
as_dict=as_dict,
use_legacy_sql=False,
)
operator.execute(None)
mock_hook.assert_called_with(gcp_conn_id=GCP_CONN_ID, impersonation_chain=None, use_legacy_sql=False)
mock_hook.return_value.list_rows.assert_called_once_with(
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
project_id=TEST_GCP_PROJECT_ID,
max_results=max_results,
selected_fields=selected_fields,
location=TEST_DATASET_LOCATION,
)
@pytest.mark.parametrize("as_dict", [True, False])
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_execute__job_id(self, mock_hook, as_dict):
max_results = 100
selected_fields = "DATE"
operator = BigQueryGetDataOperator(
job_project_id=TEST_JOB_PROJECT_ID,
gcp_conn_id=GCP_CONN_ID,
task_id=TASK_ID,
job_id=TEST_JOB_ID,
max_results=max_results,
selected_fields=selected_fields,
location=TEST_DATASET_LOCATION,
as_dict=as_dict,
)
operator.execute(None)
mock_hook.return_value.get_query_results.assert_called_once_with(
job_id=TEST_JOB_ID,
location=TEST_DATASET_LOCATION,
max_results=max_results,
project_id=TEST_JOB_PROJECT_ID,
selected_fields=selected_fields,
)
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_execute__job_id_table_id_mutual_exclusive_exception(self, _):
max_results = 100
selected_fields = "DATE"
operator = BigQueryGetDataOperator(
gcp_conn_id=GCP_CONN_ID,
task_id=TASK_ID,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
table_project_id=TEST_GCP_PROJECT_ID,
job_id=TEST_JOB_ID,
max_results=max_results,
selected_fields=selected_fields,
location=TEST_DATASET_LOCATION,
)
with pytest.raises(AirflowException, match="mutually exclusive"):
operator.execute(None)
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_generate_query__with_table_project_id(self, mock_hook):
operator = BigQueryGetDataOperator(
gcp_conn_id=GCP_CONN_ID,
task_id=TASK_ID,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
table_project_id=TEST_GCP_PROJECT_ID,
max_results=100,
use_legacy_sql=False,
)
assert (
operator.generate_query(hook=mock_hook) == f"select * from `{TEST_GCP_PROJECT_ID}."
f"{TEST_DATASET}.{TEST_TABLE_ID}` limit 100"
)
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_generate_query__without_table_project_id(self, mock_hook):
hook_project_id = mock_hook.project_id
operator = BigQueryGetDataOperator(
gcp_conn_id=GCP_CONN_ID,
task_id=TASK_ID,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
max_results=100,
use_legacy_sql=False,
)
assert (
operator.generate_query(hook=mock_hook) == f"select * from `{hook_project_id}."
f"{TEST_DATASET}.{TEST_TABLE_ID}` limit 100"
)
@pytest.mark.db_test
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_bigquery_get_data_operator_async_with_selected_fields(
self, mock_hook, create_task_instance_of_operator
):
"""
Asserts that a task is deferred and a BigQuerygetDataTrigger will be fired
when the BigQueryGetDataOperator is executed with deferrable=True.
"""
ti = create_task_instance_of_operator(
BigQueryGetDataOperator,
dag_id="dag_id",
task_id="get_data_from_bq",
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
job_project_id=TEST_JOB_PROJECT_ID,
max_results=100,
selected_fields="value,name",
deferrable=True,
use_legacy_sql=False,
)
with pytest.raises(TaskDeferred) as exc:
ti.task.execute(MagicMock())
assert isinstance(exc.value.trigger, BigQueryGetDataTrigger), (
"Trigger is not a BigQueryGetDataTrigger"
)
@pytest.mark.db_test
@pytest.mark.parametrize("as_dict", [True, False])
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
def test_bigquery_get_data_operator_async_without_selected_fields(
self, mock_hook, create_task_instance_of_operator, as_dict
):
"""
Asserts that a task is deferred and a BigQueryGetDataTrigger will be fired
when the BigQueryGetDataOperator is executed with deferrable=True.
"""
ti = create_task_instance_of_operator(
BigQueryGetDataOperator,
dag_id="dag_id",
task_id="get_data_from_bq",
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
job_project_id=TEST_JOB_PROJECT_ID,
max_results=100,
deferrable=True,
as_dict=as_dict,
use_legacy_sql=False,
)
with pytest.raises(TaskDeferred) as exc:
ti.task.execute(MagicMock())
assert isinstance(exc.value.trigger, BigQueryGetDataTrigger), (
"Trigger is not a BigQueryGetDataTrigger"
)
@pytest.mark.parametrize("as_dict", [True, False])
def test_bigquery_get_data_operator_execute_failure(self, as_dict):
"""Tests that an AirflowException is raised in case of error event"""
operator = BigQueryGetDataOperator(
task_id="get_data_from_bq",
dataset_id=TEST_DATASET,
table_id="any",
job_project_id=TEST_JOB_PROJECT_ID,
max_results=100,
deferrable=True,
as_dict=as_dict,
use_legacy_sql=False,
)
with pytest.raises(AirflowException):
operator.execute_complete(
context=None, event={"status": "error", "message": "test failure message"}
)
@pytest.mark.parametrize("as_dict", [True, False])
def test_bigquery_get_data_op_execute_complete_with_records(self, as_dict):
"""Asserts that exception is raised with correct expected exception message"""
operator = BigQueryGetDataOperator(
task_id="get_data_from_bq",
dataset_id=TEST_DATASET,
table_id="any",
job_project_id=TEST_JOB_PROJECT_ID,
max_results=100,
deferrable=True,
as_dict=as_dict,
use_legacy_sql=False,
)
with mock.patch.object(operator.log, "info") as mock_log_info:
operator.execute_complete(context=None, event={"status": "success", "records": [20]})
mock_log_info.assert_called_with("Total extracted rows: %s", 1)
@pytest.mark.parametrize("as_dict", [True, False])
@mock.patch("airflow.providers.google.cloud.operators.bigquery.BigQueryHook")
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryJob")
def test_encryption_configuration(self, mock_job, mock_hook, as_dict):
encryption_configuration = {
"kmsKeyName": "projects/PROJECT/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY",
}
mock_hook.return_value.insert_job.return_value = mock_job
mock_hook.return_value.project_id = TEST_GCP_PROJECT_ID
max_results = 1
selected_fields = "DATE"
operator = BigQueryGetDataOperator(
job_project_id=TEST_GCP_PROJECT_ID,
gcp_conn_id=GCP_CONN_ID,
task_id=TASK_ID,
job_id="",
max_results=max_results,
dataset_id=TEST_DATASET,
table_id=TEST_TABLE_ID,
selected_fields=selected_fields,
location=TEST_DATASET_LOCATION,
as_dict=as_dict,
encryption_configuration=encryption_configuration,
deferrable=True,
)
with pytest.raises(TaskDeferred):
operator.execute(MagicMock())
mock_hook.return_value.insert_job.assert_called_with(
configuration={
"query": {
"query": f"""select DATE from `{TEST_GCP_PROJECT_ID}.{TEST_DATASET}.{TEST_TABLE_ID}` limit 1""",
"useLegacySql": True,
"destinationEncryptionConfiguration": encryption_configuration,
}
},
project_id=TEST_GCP_PROJECT_ID,
location=TEST_DATASET_LOCATION,
job_id="",
nowait=True,
)
| TestBigQueryGetDataOperator |
python | keras-team__keras | keras/src/ops/core.py | {
"start": 19887,
"end": 22350
} | class ____(Operation):
def __init__(self, cond, body, maximum_iterations=None, *, name=None):
super().__init__(name=name)
self.cond = cond
self.body = body
self.maximum_iterations = maximum_iterations
def call(self, loop_vars):
return backend.core.while_loop(
self.cond,
self.body,
loop_vars,
maximum_iterations=self.maximum_iterations,
)
def compute_output_spec(self, loop_vars):
return tree.map_structure(
lambda v: KerasTensor(v.shape, dtype=v.dtype), loop_vars
)
@keras_export("keras.ops.while_loop")
def while_loop(
cond,
body,
loop_vars,
maximum_iterations=None,
):
"""While loop implementation.
Args:
cond: A callable that represents the termination condition of the loop.
Must accept a `loop_vars` like structure as an argument. If
`loop_vars` is a tuple or list, each element of `loop_vars` will be
passed positionally to the callable.
body: A callable that represents the loop body. Must accept a
`loop_vars` like structure as an argument, and return update value
with the same structure. If `loop_vars` is a tuple or list, each
element of `loop_vars` will be passed positionally to the callable.
loop_vars: An arbitrary nested structure of tensor state to persist
across loop iterations.
maximum_iterations: Optional maximum number of iterations of the while
loop to run. If provided, the `cond` output is AND-ed with an
additional condition ensuring the number of iterations executed is
no greater than `maximum_iterations`.
Returns:
A list/tuple of tensors, has the same shape and dtype as `inputs`.
Examples:
>>> i = 0
>>> cond = lambda i: i < 10
>>> body = lambda i: i + 1
>>> keras.ops.while_loop(cond, body, i)
10
>>> x, y = 0, 1
>>> cond = lambda x, y: x < 10
>>> body = lambda x, y: (x + 1, y + 1)
>>> keras.ops.while_loop(cond, body, (x, y))
10, 11
"""
if any_symbolic_tensors((loop_vars,)):
return WhileLoop(
cond, body, maximum_iterations=maximum_iterations
).symbolic_call(loop_vars)
return backend.core.while_loop(
cond,
body,
loop_vars,
maximum_iterations=maximum_iterations,
)
| WhileLoop |
python | sqlalchemy__sqlalchemy | test/orm/test_core_compilation.py | {
"start": 90279,
"end": 95897
} | class ____(
_poly_fixtures._PolymorphicAliasedJoins, RelationshipNaturalInheritedTest
):
# this is the label style for the polymorphic selectable, not the
# outside query
label_style = LABEL_STYLE_DISAMBIGUATE_ONLY
straight_company_to_person_expected = (
"SELECT companies.company_id, companies.name FROM companies JOIN "
"(SELECT people.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, engineers.person_id AS person_id_1, "
"engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"managers.person_id AS person_id_2, managers.status AS status_1, "
"managers.manager_name AS manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin ON companies.company_id = pjoin.company_id"
)
person_paperwork_expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN (SELECT people.person_id AS person_id, people.company_id "
"AS company_id, people.name AS name, people.type AS type, "
"engineers.person_id AS person_id_1, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, managers.person_id "
"AS person_id_2, managers.status AS status_1, managers.manager_name "
"AS manager_name FROM people LEFT OUTER JOIN engineers "
"ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin ON companies.company_id = pjoin.company_id "
"JOIN paperwork ON pjoin.person_id = paperwork.person_id"
)
default_pjoin = (
"(SELECT people.person_id AS person_id, people.company_id AS "
"company_id, people.name AS name, people.type AS type, "
"engineers.person_id AS person_id_1, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, engineers.primary_language "
"AS primary_language, managers.person_id AS person_id_2, "
"managers.status AS status_1, managers.manager_name AS manager_name "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers ON people.person_id = "
"managers.person_id) AS pjoin "
"ON companies.company_id = pjoin.company_id"
)
flat_aliased_pjoin = (
"(SELECT people.person_id AS person_id, people.company_id AS "
"company_id, people.name AS name, people.type AS type, "
"engineers.person_id AS person_id_1, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"managers.person_id AS person_id_2, managers.status AS status_1, "
"managers.manager_name AS manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin_1 ON companies.company_id = pjoin_1.company_id"
)
aliased_pjoin = (
"(SELECT people.person_id AS person_id, people.company_id AS "
"company_id, people.name AS name, people.type AS type, "
"engineers.person_id AS person_id_1, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, engineers.primary_language "
"AS primary_language, managers.person_id AS person_id_2, "
"managers.status AS status_1, managers.manager_name AS manager_name "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers ON people.person_id = "
"managers.person_id) AS pjoin_1 "
"ON companies.company_id = pjoin_1.company_id"
)
c_to_p_whereclause = (
"SELECT companies.company_id, companies.name FROM companies JOIN "
"(SELECT people.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, engineers.person_id AS person_id_1, "
"engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"managers.person_id AS person_id_2, managers.status AS status_1, "
"managers.manager_name AS manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin ON companies.company_id = pjoin.company_id "
"WHERE pjoin.name = :name_1"
)
poly_columns = (
"SELECT pjoin.person_id FROM (SELECT people.person_id AS "
"person_id, people.company_id AS company_id, people.name AS name, "
"people.type AS type, engineers.person_id AS person_id_1, "
"engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"managers.person_id AS person_id_2, "
"managers.status AS status_1, managers.manager_name AS manager_name "
"FROM people LEFT OUTER JOIN engineers "
"ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id) AS pjoin"
)
| RelNaturalAliasedJoinsDisamTest |
python | PrefectHQ__prefect | src/prefect/server/schemas/core.py | {
"start": 41182,
"end": 43288
} | class ____(ORMBaseModel):
key: Optional[str] = Field(
default=None, description="An optional unique reference key for this artifact."
)
type: Optional[str] = Field(
default=None,
description=(
"An identifier that describes the shape of the data field. e.g. 'result',"
" 'table', 'markdown'"
),
)
description: Optional[str] = Field(
default=None, description="A markdown-enabled description of the artifact."
)
# data will eventually be typed as `Optional[Union[Result, Any]]`
data: Optional[Union[Dict[str, Any], Any]] = Field(
default=None,
description=(
"Data associated with the artifact, e.g. a result.; structure depends on"
" the artifact type."
),
)
metadata_: Optional[dict[str, str]] = Field(
default=None,
description=(
"User-defined artifact metadata. Content must be string key and value"
" pairs."
),
)
flow_run_id: Optional[UUID] = Field(
default=None, description="The flow run associated with the artifact."
)
task_run_id: Optional[UUID] = Field(
default=None, description="The task run associated with the artifact."
)
@classmethod
def from_result(cls, data: Any | dict[str, Any]) -> "Artifact":
artifact_info: dict[str, Any] = dict()
if isinstance(data, dict):
artifact_key = data.pop("artifact_key", None)
if artifact_key:
artifact_info["key"] = artifact_key
artifact_type = data.pop("artifact_type", None)
if artifact_type:
artifact_info["type"] = artifact_type
description = data.pop("artifact_description", None)
if description:
artifact_info["description"] = description
return cls(data=data, **artifact_info)
@field_validator("metadata_")
@classmethod
def validate_metadata_length(cls, v: dict[str, str]) -> dict[str, str]:
return validate_max_metadata_length(v)
| Artifact |
python | pytorch__pytorch | scripts/release_notes/test_release_notes.py | {
"start": 69,
"end": 2235
} | class ____(unittest.TestCase):
def test_create_new(self):
with tempfile.TemporaryDirectory() as tempdir:
commit_list_path = f"{tempdir}/commitlist.csv"
commit_list = CommitList.create_new(
commit_list_path, "v1.5.0", "6000dca5df"
)
self.assertEqual(len(commit_list.commits), 33)
self.assertEqual(commit_list.commits[0].commit_hash, "7335f079abb")
self.assertTrue(
commit_list.commits[0].title.startswith("[pt][quant] qmul and qadd")
)
self.assertEqual(commit_list.commits[-1].commit_hash, "6000dca5df6")
self.assertTrue(
commit_list.commits[-1].title.startswith(
"[nomnigraph] Copy device option when customize "
)
)
def test_read_write(self):
with tempfile.TemporaryDirectory() as tempdir:
commit_list_path = f"{tempdir}/commitlist.csv"
initial = CommitList.create_new(commit_list_path, "v1.5.0", "7543e7e558")
initial.write_to_disk()
expected = CommitList.from_existing(commit_list_path)
expected.commits[-2].category = "foobar"
expected.write_to_disk()
commit_list = CommitList.from_existing(commit_list_path)
for commit, expected_commit in zip(commit_list.commits, expected.commits):
self.assertEqual(commit, expected_commit)
def test_update_to(self):
with tempfile.TemporaryDirectory() as tempdir:
commit_list_path = f"{tempdir}/commitlist.csv"
initial = CommitList.create_new(commit_list_path, "v1.5.0", "7543e7e558")
initial.commits[-2].category = "foobar"
self.assertEqual(len(initial.commits), 2143)
initial.write_to_disk()
commit_list = CommitList.from_existing(commit_list_path)
commit_list.update_to("5702a28b26")
self.assertEqual(len(commit_list.commits), 2143 + 4)
self.assertEqual(commit_list.commits[-5], initial.commits[-1])
if __name__ == "__main__":
unittest.main()
| TestCommitList |
python | doocs__leetcode | lcci/05.08.Draw Line/Solution.py | {
"start": 0,
"end": 380
} | class ____:
def drawLine(self, length: int, w: int, x1: int, x2: int, y: int) -> List[int]:
ans = [0] * length
i = (y * w + x1) // 32
j = (y * w + x2) // 32
for k in range(i, j + 1):
ans[k] = -1
ans[i] = (ans[i] & 0xFFFFFFFF) >> (x1 % 32) if x1 % 32 else -1
ans[j] &= -0x80000000 >> (x2 % 32)
return ans
| Solution |
python | numba__numba | numba/core/datamodel/models.py | {
"start": 28450,
"end": 28905
} | class ____(ArrayModel):
def __init__(self, dmm, fe_type):
self._be_type = dmm.lookup(fe_type.dtype).get_data_type()
super(NestedArrayModel, self).__init__(dmm, fe_type)
def as_storage_type(self):
"""Return the LLVM type representation for the storage of
the nestedarray.
"""
ret = ir.ArrayType(self._be_type, self._fe_type.nitems)
return ret
@register_default(types.Optional)
| NestedArrayModel |
python | huggingface__transformers | src/transformers/models/oneformer/modeling_oneformer.py | {
"start": 80710,
"end": 83210
} | class ____(nn.Module):
def __init__(
self, embed_dim, num_heads, dropout=0.0, activation="relu", normalize_before=False, layer_norm_eps=1e-05
):
super().__init__()
self.multihead_attn = nn.MultiheadAttention(embed_dim, num_heads, dropout=dropout)
self.norm = nn.LayerNorm(embed_dim, eps=layer_norm_eps)
self.dropout = nn.Dropout(dropout)
self.activation = ACT2FN[activation]
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(
self,
output,
memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2, attention_weights = self.multihead_attn(
query=self.with_pos_embed(output, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)
output = output + self.dropout(output2)
output = self.norm(output)
return output, attention_weights
def forward_pre(
self,
output,
memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
output2 = self.norm(output)
output2, attention_weights = self.multihead_attn(
query=self.with_pos_embed(output2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory,
attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask,
)
output = output + self.dropout(output2)
return output, attention_weights
def forward(
self,
output,
memory,
memory_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
):
if self.normalize_before:
return self.forward_pre(output, memory, memory_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(output, memory, memory_mask, memory_key_padding_mask, pos, query_pos)
| OneFormerTransformerDecoderCrossAttentionLayer |
python | patrick-kidger__equinox | equinox/nn/_batch_norm.py | {
"start": 393,
"end": 11415
} | class ____(StatefulLayer):
r"""Computes a mean and standard deviation over the batch and spatial
dimensions of an array, and uses these to normalise the whole array. Optionally
applies a channelwise affine transformation afterwards.
Given an input array $x = [x_1, ... x_C]$ with $C$ channels, this layer computes
$$\frac{x_i - \mathbb{E}[x_i]}{\sqrt{\text{Var}[x_i] + \varepsilon}} * \gamma_i + \beta_i$$
for all $i$. Here $*$ denotes elementwise multiplication and $\gamma$, $\beta$ have
shape $(C,)$ if `channelwise_affine=True` and $\gamma = 1$, $\beta = 0$ if
`channelwise_affine=False`. Expectations are computed over all spatial dimensions
*and* over the batch dimension, and updated batch-by-batch according to `momentum`.
!!! example
See [this example](../../examples/stateful.ipynb) for example usage.
!!! warning
This layer must be used inside of a `vmap` or `pmap` with a matching
`axis_name`. (Not doing so will raise a `NameError`.)
Note that this layer behaves differently during training and inference. During
training then statistics are computed using the input data, and the running
statistics updated. During inference then just the running statistics are used.
Whether the model is in training or inference mode should be toggled using
[`equinox.nn.inference_mode`][].
With `mode = "batch"` during training the batch mean and variance are used
for normalization. For inference the exponential running mean and unbiased
variance are used for normalization. This is in line with how other machine
learning packages (e.g. PyTorch, flax, haiku) implement batch norm.
With `mode = "ema"` exponential running means and variances are kept. During
training the batch statistics are used to fill in the running statistics until
they are populated. During inference the running statistics are used for
normalization.
??? cite
[Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift](https://arxiv.org/abs/1502.03167)
```bibtex
@article{DBLP:journals/corr/IoffeS15,
author = {Sergey Ioffe and Christian Szegedy},
title = {Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift},
journal = {CoRR},
volume = {abs/1502.03167},
year = {2015},
url = {http://arxiv.org/abs/1502.03167},
eprinttype = {arXiv},
eprint = {1502.03167},
timestamp = {Mon, 13 Aug 2018 16:47:06 +0200},
biburl = {https://dblp.org/rec/journals/corr/IoffeS15.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
""" # noqa: E501
weight: Float[Array, "input_size"] | None
bias: Float[Array, "input_size"] | None
ema_first_time_index: None | StateIndex[Bool[Array, ""]]
ema_state_index: (
None | StateIndex[tuple[Float[Array, "input_size"], Float[Array, "input_size"]]]
)
batch_counter: None | StateIndex[Int[Array, ""]]
batch_state_index: (
None | StateIndex[tuple[Float[Array, "input_size"], Float[Array, "input_size"]]]
)
axis_name: Hashable | Sequence[Hashable]
inference: bool
input_size: int = field(static=True)
eps: float = field(static=True)
channelwise_affine: bool = field(static=True)
momentum: float = field(static=True)
mode: Literal["ema", "batch"] = field(static=True)
def __init__(
self,
input_size: int,
axis_name: Hashable | Sequence[Hashable],
eps: float = 1e-5,
channelwise_affine: bool = True,
momentum: float = 0.99,
inference: bool = False,
dtype=None,
mode: Literal["ema", "batch", "legacy"] = "legacy",
):
"""**Arguments:**
- `input_size`: The number of channels in the input array.
- `axis_name`: The name of the batch axis to compute statistics over, as passed
to `axis_name` in `jax.vmap` or `jax.pmap`. Can also be a sequence (e.g. a
tuple or a list) of names, to compute statistics over multiple named axes.
- `eps`: Value added to the denominator for numerical stability.
- `channelwise_affine`: Whether the module has learnable channel-wise affine
parameters.
- `momentum`: The rate at which to update the running statistics. Should be a
value between 0 and 1 exclusive.
- `inference`: If `False` then the batch means and variances will be calculated
and used to update the running statistics. If `True` then the running
statistics are directly used for normalisation. This may be toggled with
[`equinox.nn.inference_mode`][] or overridden during
[`equinox.nn.BatchNorm.__call__`][].
- `dtype`: The dtype to use for the running statistics and the weight and bias
if `channelwise_affine` is `True`. Defaults to either
`jax.numpy.float32` or `jax.numpy.float64` depending on whether JAX is in
64-bit mode.
- `mode`: The variant of batch norm to use, either 'ema' or 'batch'.
"""
if mode == "legacy":
mode = "ema"
warnings.warn(
"When `eqx.nn.BatchNorm(..., mode=...)` is unspecified it defaults to "
"'ema', for backward compatibility. This typically has a performance "
"impact, and for new code the user is encouraged to use 'batch' "
"instead. See `https://github.com/patrick-kidger/equinox/issues/659`."
)
if mode not in {"ema", "batch"}:
raise ValueError("Invalid mode, must be 'ema' or 'batch'.")
self.mode = mode
dtype = default_floating_dtype() if dtype is None else dtype
if channelwise_affine:
self.weight = jnp.ones((input_size,), dtype=dtype)
self.bias = jnp.zeros((input_size,), dtype=dtype)
else:
self.weight = None
self.bias = None
if mode == "ema":
self.ema_first_time_index = StateIndex(jnp.array(True))
init_buffers = (
jnp.empty((input_size,), dtype=dtype),
jnp.empty((input_size,), dtype=dtype),
)
self.ema_state_index = StateIndex(init_buffers)
self.batch_counter = None
self.batch_state_index = None
else:
self.batch_counter = StateIndex(jnp.array(0))
init_hidden = (
jnp.zeros((input_size,), dtype=dtype),
jnp.ones((input_size,), dtype=dtype),
)
self.batch_state_index = StateIndex(init_hidden)
self.ema_first_time_index = None
self.ema_state_index = None
self.inference = inference
self.axis_name = axis_name
self.input_size = input_size
self.eps = eps
self.channelwise_affine = channelwise_affine
self.momentum = momentum
@named_scope("eqx.nn.BatchNorm")
def __call__(
self,
x: Array,
state: State,
*,
key: PRNGKeyArray | None = None,
inference: bool | None = None,
) -> tuple[Array, State]:
"""**Arguments:**
- `x`: A JAX array of shape `(input_size, dim_1, ..., dim_N)`.
- `state`: An [`equinox.nn.State`][] object (which is used to store the
running statistics).
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
- `inference`: As per [`equinox.nn.BatchNorm.__init__`][]. If
`True` or `False` then it will take priority over `self.inference`. If
`None` then the value from `self.inference` will be used.
**Returns:**
A 2-tuple of:
- A JAX array of shape `(input_size, dim_1, ..., dim_N)`.
- An updated state object (storing the updated running statistics).
**Raises:**
A `NameError` if no `vmap`s are placed around this operation, or if this vmap
does not have a matching `axis_name`.
"""
del key
if inference is None:
inference = self.inference
def _stats(y):
mean = jnp.mean(y)
mean = lax.pmean(mean, self.axis_name)
var = jnp.mean((y - mean) * jnp.conj(y - mean))
var = lax.pmean(var, self.axis_name)
var = jnp.maximum(0.0, var)
return mean, var
def _norm(y, m, v, w, b):
out = (y - m) / jnp.sqrt(v + self.eps)
if self.channelwise_affine:
out = out * w + b
return out
if self.mode == "ema":
assert self.ema_first_time_index is not None
assert self.ema_state_index is not None
if inference:
mean, var = state.get(self.ema_state_index)
else:
first_time = state.get(self.ema_first_time_index)
state = state.set(self.ema_first_time_index, jnp.array(False))
batch_mean, batch_var = jax.vmap(_stats)(x)
running_mean, running_var = state.get(self.ema_state_index)
momentum = self.momentum
mean = (1 - momentum) * batch_mean + momentum * running_mean
var = (1 - momentum) * batch_var + momentum * running_var
# since jnp.array(0) == False
mean = lax.select(first_time, batch_mean, mean)
var = lax.select(first_time, batch_var, var)
state = state.set(self.ema_state_index, (mean, var))
else:
assert self.batch_state_index is not None
assert self.batch_counter is not None
counter = state.get(self.batch_counter)
hidden_mean, hidden_var = state.get(self.batch_state_index)
if inference:
# Zero-debias approach: mean = hidden_mean / (1 - momentum^counter)
# For simplicity we do the minimal version here (no warmup).
scale = jnp.where(counter == 0, 1, 1 - self.momentum**counter)
mean = hidden_mean / scale
var = hidden_var / scale
else:
mean, var = jax.vmap(_stats)(x)
new_counter = counter + 1
new_hidden_mean = hidden_mean * self.momentum + mean * (
1 - self.momentum
)
new_hidden_var = hidden_var * self.momentum + var * (1 - self.momentum)
state = state.set(self.batch_counter, new_counter)
state = state.set(
self.batch_state_index, (new_hidden_mean, new_hidden_var)
)
out = jax.vmap(_norm)(x, mean, var, self.weight, self.bias)
return out, state
| BatchNorm |
python | huggingface__transformers | src/transformers/models/ministral/modeling_ministral.py | {
"start": 1617,
"end": 5640
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| MinistralMLP |
python | kamyu104__LeetCode-Solutions | Python/number-of-pairs-satisfying-inequality.py | {
"start": 586,
"end": 1102
} | class ____(object): # 0-indexed.
def __init__(self, n):
self.__bit = [0]*(n+1) # Extra one for dummy node.
def add(self, i, val):
i += 1 # Extra one for dummy node.
while i < len(self.__bit):
self.__bit[i] += val
i += (i & -i)
def query(self, i):
i += 1 # Extra one for dummy node.
ret = 0
while i > 0:
ret += self.__bit[i]
i -= (i & -i)
return ret
# bit, fenwick tree, coordinate compression
| BIT |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-chroma/destination_chroma/config.py | {
"start": 1334,
"end": 1741
} | class ____(BaseModel):
mode: Literal["persistent_client"] = Field("persistent_client", const=True)
path: str = Field(..., title="Path", description="Where Chroma will store its database files on disk, and load them on start.")
class Config:
title = "Persistent Client Mode"
schema_extra = {"description": "Configure Chroma to save and load from your local machine"}
| PersistentMode |
python | ray-project__ray | rllib/connectors/common/tensor_to_numpy.py | {
"start": 382,
"end": 826
} | class ____(ConnectorV2):
"""Converts (framework) tensors across the entire input data into numpy arrays."""
@override(ConnectorV2)
def __call__(
self,
*,
rl_module: RLModule,
batch: Dict[str, Any],
episodes: List[EpisodeType],
explore: Optional[bool] = None,
shared_data: Optional[dict] = None,
**kwargs,
) -> Any:
return convert_to_numpy(batch)
| TensorToNumpy |
python | spack__spack | lib/spack/spack/vendor/jinja2/ext.py | {
"start": 21152,
"end": 21539
} | class ____(Extension):
"""Adds break and continue to the template engine."""
tags = {"break", "continue"}
def parse(self, parser: "Parser") -> t.Union[nodes.Break, nodes.Continue]:
token = next(parser.stream)
if token.value == "break":
return nodes.Break(lineno=token.lineno)
return nodes.Continue(lineno=token.lineno)
| LoopControlExtension |
python | getsentry__sentry | src/sentry/db/models/fields/uuid.py | {
"start": 1993,
"end": 5778
} | class ____(models.Field):
"""Field for storing UUIDs."""
description = "Universally unique identifier."
def __init__(self, auto_add=False, coerce_to=UUID, **kwargs):
"""Instantiate the field."""
# If the `auto_add` argument is specified as True, substitute an
# appropriate callable which requires no arguments and will return
# a UUID.
if auto_add is True:
auto_add = uuid4
# If the `auto_add` arguments is specified as a string
# parse out and import the callable.
if isinstance(auto_add, str):
module_name, member = auto_add.split(":")
module = importlib.import_module(module_name)
auto_add = getattr(module, member)
# Save the `auto_add` and `coerce_to` rules.
self._auto_add = auto_add
self._coerce_to = coerce_to
# If `auto_add` is enabled, it should imply that the field
# is not editable, and should not show up in ModelForms.
if auto_add and "editable" not in kwargs:
kwargs["editable"] = False
# Blank values shall be nulls.
if kwargs.get("blank", False) and not kwargs.get("null", False):
raise AttributeError(
" ".join(
(
"Blank UUIDs are stored as NULL. Therefore, setting",
"`blank` to True requires `null` to be True.",
)
)
)
# Enforce CHAR(32) for unsupported engines
kwargs["max_length"] = 32
# Now pass the rest of the work to CharField.
super().__init__(**kwargs)
def db_type(self, connection) -> str:
return "uuid"
def get_internal_type(self) -> str:
return "CharField"
def get_prep_value(self, value):
"""Return a wrapped, valid UUID value."""
# If the value is None, return None.
if not value:
if self.null or self._auto_add or (self.default != NOT_PROVIDED):
return None
raise ValueError(
"Explicit UUID required unless either `null` is " "True or `auto_add` is given."
)
# If we already have a UUID, pass it through.
if isinstance(value, UUID):
return value
# Convert our value to a UUID.
return UUID(value)
def pre_save(self, instance, add):
"""If auto is set, generate a UUID at random."""
# If the `auto_add` option was set, and there is no value
# on the model instance, then generate a UUID using the given
# callable.
if self._auto_add and add and not getattr(instance, self.attname):
uuid_value = self._auto_add()
# Save the UUID to the model instance
setattr(instance, self.attname, uuid_value)
return uuid_value
# This is the standard case; just use the superclass logic.
return super().pre_save(instance, add)
def contribute_to_class(
self, cls: type[models.Model], name: str, private_only: bool = False
) -> None:
super().contribute_to_class(cls, name, private_only=private_only)
setattr(cls, name, Creator(self))
def to_python(self, value):
"""Return a UUID object."""
if isinstance(value, self._coerce_to) or not value:
return value
return self._coerce_to(value)
@property
def _auto_add_str(self):
"""Return a dot path, as a string, of the `_auto_add` callable.
If `_auto_add` is a boolean, return it unchanged.
"""
if isinstance(self._auto_add, bool):
return self._auto_add
return f"{self._auto_add.__module__}:{self._auto_add.__name__}"
| UUIDField |
python | PrefectHQ__prefect | tests/test_flows.py | {
"start": 55018,
"end": 61381
} | class ____:
async def test_subflow_with_one_upstream_task_future(self, prefect_client):
@task
def child_task(x):
return x
@flow
def child_flow(x):
return x
@flow
def parent_flow():
task_future = child_task.submit(1)
flow_state = child_flow(x=task_future, return_state=True)
task_future.wait()
task_state = task_future.state
return task_state, flow_state
task_state, flow_state = parent_flow()
flow_tracking_task_run = await prefect_client.read_task_run(
flow_state.state_details.task_run_id
)
assert flow_tracking_task_run.task_inputs == dict(
x=[TaskRunResult(id=task_state.state_details.task_run_id)],
)
async def test_subflow_with_one_upstream_task_state(self, prefect_client):
@task
def child_task(x):
return x
@flow
def child_flow(x):
return x
@flow
def parent_flow():
task_state = child_task(257, return_state=True)
flow_state = child_flow(x=task_state, return_state=True)
return task_state, flow_state
task_state, flow_state = parent_flow()
flow_tracking_task_run = await prefect_client.read_task_run(
flow_state.state_details.task_run_id
)
assert flow_tracking_task_run.task_inputs == dict(
x=[TaskRunResult(id=task_state.state_details.task_run_id)],
)
async def test_subflow_with_one_upstream_task_result(self, prefect_client):
@task
def child_task(x):
return x
@flow
def child_flow(x):
return x
@flow
def parent_flow():
task_state = child_task(257, return_state=True)
task_result = task_state.result()
flow_state = child_flow(x=task_result, return_state=True)
return task_state, flow_state
task_state, flow_state = parent_flow()
flow_tracking_task_run = await prefect_client.read_task_run(
flow_state.state_details.task_run_id
)
assert flow_tracking_task_run.task_inputs == dict(
x=[TaskRunResult(id=task_state.state_details.task_run_id)],
)
async def test_subflow_with_one_upstream_task_future_and_allow_failure(
self, prefect_client
):
@task
def child_task():
raise ValueError()
@flow
def child_flow(x):
return x
@flow
def parent_flow():
future = child_task.submit()
flow_state = child_flow(x=allow_failure(future), return_state=True)
future.wait()
return quote((future.state, flow_state))
task_state, flow_state = parent_flow().unquote()
assert isinstance(await flow_state.result(), ValueError)
flow_tracking_task_run = await prefect_client.read_task_run(
flow_state.state_details.task_run_id
)
assert task_state.is_failed()
assert flow_tracking_task_run.task_inputs == dict(
x=[TaskRunResult(id=task_state.state_details.task_run_id)],
)
async def test_subflow_with_one_upstream_task_state_and_allow_failure(
self, prefect_client
):
@task
def child_task():
raise ValueError()
@flow
def child_flow(x):
return x
@flow
def parent_flow():
task_state = child_task(return_state=True)
flow_state = child_flow(x=allow_failure(task_state), return_state=True)
return quote((task_state, flow_state))
task_state, flow_state = parent_flow().unquote()
assert isinstance(await flow_state.result(), ValueError)
flow_tracking_task_run = await prefect_client.read_task_run(
flow_state.state_details.task_run_id
)
assert task_state.is_failed()
assert flow_tracking_task_run.task_inputs == dict(
x=[TaskRunResult(id=task_state.state_details.task_run_id)],
)
async def test_subflow_with_no_upstream_tasks(self, prefect_client):
@flow
def bar(x, y):
return x + y
@flow
def foo():
return bar(x=2, y=1, return_state=True)
child_flow_state = await foo(return_state=True).result()
flow_tracking_task_run = await prefect_client.read_task_run(
child_flow_state.state_details.task_run_id
)
assert flow_tracking_task_run.task_inputs == dict(
x=[],
y=[],
)
async def test_subflow_with_upstream_task_passes_validation(self, prefect_client):
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/14036
"""
@task
def child_task(x: int):
return x
@flow
def child_flow(x: int):
return x
@flow
def parent_flow():
task_state = child_task(257, return_state=True)
flow_state = child_flow(x=task_state, return_state=True)
return task_state, flow_state
task_state, flow_state = parent_flow()
assert flow_state.is_completed()
flow_tracking_task_run = await prefect_client.read_task_run(
flow_state.state_details.task_run_id
)
assert flow_tracking_task_run.task_inputs == dict(
x=[TaskRunResult(id=task_state.state_details.task_run_id)],
)
# We flush the APILogHandler in a non-blocking manner, so we need to wait for the logs to be
# written before we can read them to avoid flakiness.
async def _wait_for_logs(
prefect_client: PrefectClient,
expected_num_logs: Optional[int] = None,
timeout: int = 10,
):
logs = []
start_time = time.time()
while True:
logs = await prefect_client.read_logs()
if logs:
if expected_num_logs is None:
break
else:
if len(logs) >= expected_num_logs:
break
if time.time() - start_time > timeout:
raise TimeoutError("Timed out in _wait_for_logs()")
await asyncio.sleep(1)
return logs
@pytest.mark.enable_api_log_handler
| TestSubflowTaskInputs |
python | pytorch__pytorch | test/onnx/exporter/test_tensors.py | {
"start": 229,
"end": 584
} | class ____(common_utils.TestCase):
def test_it_is_hashable(self):
tensor = _tensors.SymbolicTensor(
opset=onnxscript.values.Opset(domain="test", version=1)
)
self.assertEqual(hash(tensor), hash(tensor))
self.assertIn(tensor, {tensor})
if __name__ == "__main__":
common_utils.run_tests()
| SymbolicTensorTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF024.py | {
"start": 684,
"end": 876
} | class ____: ...
dict.fromkeys(pierogi_fillings, MysteryBox)
bar.fromkeys(pierogi_fillings, [])
def bad_dict() -> None:
dict = MysteryBox()
dict.fromkeys(pierogi_fillings, [])
| MysteryBox |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.